From 1b1dcef61d739190527eb01380a75e6f91dea7a9 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 30 May 2018 13:34:25 -0700 Subject: [PATCH 001/212] Update master version to 0.12.0 (#768) --- internal/internal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/internal.go b/internal/internal.go index 97abf756c..d67942ccf 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -18,7 +18,7 @@ import "time" // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -const UserAgent = "opencensus-go [0.11.0]" +const UserAgent = "opencensus-go [0.12.0]" // MonotonicEndTime returns the end time at present // but offset from start, monotonically. From b8a6dd9b8d7803b1c25c3ae3c531d8a8f1db7975 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 4 Jun 2018 10:48:40 -0700 Subject: [PATCH 002/212] Fix lint issues (#771) --- plugin/ocgrpc/client.go | 1 + plugin/ocgrpc/stats_common.go | 8 +++++++- plugin/ochttp/trace.go | 2 +- stats/view/worker.go | 2 ++ 4 files changed, 11 insertions(+), 2 deletions(-) diff --git a/plugin/ocgrpc/client.go b/plugin/ocgrpc/client.go index 37d238f14..a6c466ae8 100644 --- a/plugin/ocgrpc/client.go +++ b/plugin/ocgrpc/client.go @@ -31,6 +31,7 @@ type ClientHandler struct { StartOptions trace.StartOptions } +// HandleConn exists to satisfy gRPC stats.Handler. func (c *ClientHandler) HandleConn(ctx context.Context, cs stats.ConnStats) { // no-op } diff --git a/plugin/ocgrpc/stats_common.go b/plugin/ocgrpc/stats_common.go index acb626e12..119bbda9b 100644 --- a/plugin/ocgrpc/stats_common.go +++ b/plugin/ocgrpc/stats_common.go @@ -56,10 +56,16 @@ var ( DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) ) +// Server tags are applied to the context used to process each RPC, as well as +// the measures at the end of each RPC. var ( KeyServerMethod, _ = tag.NewKey("grpc_server_method") - KeyClientMethod, _ = tag.NewKey("grpc_client_method") KeyServerStatus, _ = tag.NewKey("grpc_server_status") +) + +// Client tags are applied to measures at the end of each RPC. +var ( + KeyClientMethod, _ = tag.NewKey("grpc_client_method") KeyClientStatus, _ = tag.NewKey("grpc_client_status") ) diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 80ee86c7a..79bbfd193 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -146,7 +146,7 @@ func responseAttrs(resp *http.Response) []trace.Attribute { } } -// HTTPStatusToTraceStatus converts the HTTP status code to a trace.Status that +// TraceStatus converts the HTTP status code to a trace.Status that // represents the outcome as closely as possible. func TraceStatus(httpStatusCode int, statusLine string) trace.Status { var code int32 diff --git a/stats/view/worker.go b/stats/view/worker.go index 2d1e8059c..ba9d7fccb 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -94,6 +94,8 @@ func Unregister(views ...*View) { <-req.done } +// RetrieveData gets a snapshot of the data collected for the the view registered +// with the given name. It is intended for testing only. func RetrieveData(viewName string) ([]*Row, error) { req := &retrieveDataReq{ now: time.Now(), From 5897c5ce32247fc8af19c7710abd96e3304fb43c Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 4 Jun 2018 13:11:06 -0700 Subject: [PATCH 003/212] Export a variable with the current version, for use by exporters (#775) See: census-ecosystem/opencensus-go-exporter-stackdriver#7 --- .travis.yml | 1 + exporterutil/version.go | 20 +++++++++ internal/check/version.go | 88 +++++++++++++++++++++++++++++++++++++++ internal/internal.go | 9 +++- 4 files changed, 116 insertions(+), 2 deletions(-) create mode 100644 exporterutil/version.go create mode 100644 internal/check/version.go diff --git a/.travis.yml b/.travis.yml index 5d1d18292..2d6daa6b2 100644 --- a/.travis.yml +++ b/.travis.yml @@ -24,3 +24,4 @@ script: - go vet ./... - go test -v -race $PKGS # Run all the tests with the race detector enabled - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' + - go run internal/check/version.go diff --git a/exporterutil/version.go b/exporterutil/version.go new file mode 100644 index 000000000..09afc4148 --- /dev/null +++ b/exporterutil/version.go @@ -0,0 +1,20 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exporterutil contains common utilities for exporter implementations. +package exporterutil + +// Version is the current release version of OpenCensus in use. It is made +// available for exporters to include in User-Agent-like metadata. +var Version = "0.12.0" diff --git a/internal/check/version.go b/internal/check/version.go new file mode 100644 index 000000000..03ef89e69 --- /dev/null +++ b/internal/check/version.go @@ -0,0 +1,88 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Command version checks that the version string matches the latest Git tag. +// This is expected to pass only on the master branch. +package main + +import ( + "bytes" + "fmt" + "log" + "os" + "os/exec" + "sort" + "strconv" + "strings" + + "go.opencensus.io/exporterutil" +) + +func main() { + cmd := exec.Command("git", "tag") + var buf bytes.Buffer + cmd.Stdout = &buf + err := cmd.Run() + if err != nil { + log.Fatal(err) + } + var versions []version + for _, vStr := range strings.Split(buf.String(), "\n") { + if len(vStr) == 0 { + continue + } + versions = append(versions, parseVersion(vStr)) + } + sort.Slice(versions, func(i, j int) bool { + return versionLess(versions[i], versions[j]) + }) + latest := versions[len(versions)-1] + codeVersion := parseVersion("v" + exporterutil.Version) + if !versionLess(latest, codeVersion) { + fmt.Printf("exporterutil.Version is out of date with Git tags. Got %s; want %s\n", latest, exporterutil.Version) + os.Exit(1) + } + fmt.Printf("exporterutil.Version is up-to-date: %s\n", exporterutil.Version) +} + +type version [3]int + +func versionLess(v1, v2 version) bool { + for c := 0; c < 3; c++ { + if diff := v1[c] - v2[c]; diff != 0 { + return diff < 0 + } + } + return false +} + +func parseVersion(vStr string) version { + split := strings.Split(vStr[1:], ".") + var ( + v version + err error + ) + for i := 0; i < 3; i++ { + v[i], err = strconv.Atoi(split[i]) + if err != nil { + fmt.Printf("Unrecognized version tag %q: %s\n", vStr, err) + os.Exit(2) + } + } + return v +} + +func (v version) String() string { + return fmt.Sprintf("%d.%d.%d", v[0], v[1], v[2]) +} diff --git a/internal/internal.go b/internal/internal.go index d67942ccf..bef89a920 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -14,11 +14,16 @@ package internal // import "go.opencensus.io/internal" -import "time" +import ( + "fmt" + "time" + + "go.opencensus.io/exporterutil" +) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -const UserAgent = "opencensus-go [0.12.0]" +var UserAgent = fmt.Sprintf("opencensus-go [%s]", exporterutil.Version) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. From 57c74344f14bef266fd8bae3e3b8e67920295263 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 4 Jun 2018 14:16:18 -0700 Subject: [PATCH 004/212] Update version string to 0.13.0 (#776) --- exporterutil/version.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporterutil/version.go b/exporterutil/version.go index 09afc4148..8acdf998e 100644 --- a/exporterutil/version.go +++ b/exporterutil/version.go @@ -17,4 +17,4 @@ package exporterutil // Version is the current release version of OpenCensus in use. It is made // available for exporters to include in User-Agent-like metadata. -var Version = "0.12.0" +var Version = "0.13.0" From 0dabe09745469743c19cd5f93a6e77f3ec8490b0 Mon Sep 17 00:00:00 2001 From: JBD Date: Wed, 6 Jun 2018 13:55:54 -0700 Subject: [PATCH 005/212] Move version accessor to the opencensus package (#781) Version returns the OpenCensus library version. It is not an exporter utility. If it is an exporter utility, it shouldnt be in exportutil. In Go, utility packages are organized under an existing package. For example, io/iotuil or net/http/httputil. Fixes #779. --- exporterutil/version.go | 8 +++++++- internal/check/version.go | 8 ++++---- internal/internal.go | 4 ++-- doc.go => opencensus.go | 5 +++++ 4 files changed, 18 insertions(+), 7 deletions(-) rename doc.go => opencensus.go (86%) diff --git a/exporterutil/version.go b/exporterutil/version.go index 8acdf998e..d20109085 100644 --- a/exporterutil/version.go +++ b/exporterutil/version.go @@ -13,8 +13,14 @@ // limitations under the License. // Package exporterutil contains common utilities for exporter implementations. +// +// Deprecated: Don't use this package. package exporterutil +import opencensus "go.opencensus.io" + // Version is the current release version of OpenCensus in use. It is made // available for exporters to include in User-Agent-like metadata. -var Version = "0.13.0" +var Version = opencensus.Version() + +// TODO(jbd): Remove this package at the next release. diff --git a/internal/check/version.go b/internal/check/version.go index 03ef89e69..d38a9d514 100644 --- a/internal/check/version.go +++ b/internal/check/version.go @@ -26,7 +26,7 @@ import ( "strconv" "strings" - "go.opencensus.io/exporterutil" + opencensus "go.opencensus.io" ) func main() { @@ -48,12 +48,12 @@ func main() { return versionLess(versions[i], versions[j]) }) latest := versions[len(versions)-1] - codeVersion := parseVersion("v" + exporterutil.Version) + codeVersion := parseVersion("v" + opencensus.Version()) if !versionLess(latest, codeVersion) { - fmt.Printf("exporterutil.Version is out of date with Git tags. Got %s; want %s\n", latest, exporterutil.Version) + fmt.Printf("exporter.Version is out of date with Git tags. Got %s; want %s\n", latest, opencensus.Version()) os.Exit(1) } - fmt.Printf("exporterutil.Version is up-to-date: %s\n", exporterutil.Version) + fmt.Printf("exporter.Version is up-to-date: %s\n", opencensus.Version()) } type version [3]int diff --git a/internal/internal.go b/internal/internal.go index bef89a920..e1d1238d0 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -18,12 +18,12 @@ import ( "fmt" "time" - "go.opencensus.io/exporterutil" + "go.opencensus.io" ) // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go [%s]", exporterutil.Version) +var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. diff --git a/doc.go b/opencensus.go similarity index 86% rename from doc.go rename to opencensus.go index 942ddb8be..8a98fce84 100644 --- a/doc.go +++ b/opencensus.go @@ -14,3 +14,8 @@ // Package opencensus contains Go support for OpenCensus. package opencensus // import "go.opencensus.io" + +// Version is the current release version of OpenCensus in use. +func Version() string { + return "0.13.0" +} From 6edeb78af2d9e4f169abb223feaef35da2e45d06 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 7 Jun 2018 14:14:51 -0700 Subject: [PATCH 006/212] Initial support for lazily enabling zpages at runtime (#774) We still need to actually remove zpage.Handler before we can remove the eager initialization. Will will do this in around a month according to the deprecation policy. See: #772 --- examples/grpc/helloworld_server/main.go | 7 +++-- examples/http/helloworld_server/main.go | 7 ++++- zpages/example_test.go | 6 ++--- zpages/rpcz.go | 2 +- zpages/tracez.go | 4 --- zpages/zpages.go | 36 ++++++++++++++++++++----- zpages/{z_test.go => zpages_test.go} | 24 +++++++++++++++++ 7 files changed, 69 insertions(+), 17 deletions(-) rename zpages/{z_test.go => zpages_test.go} (86%) diff --git a/examples/grpc/helloworld_server/main.go b/examples/grpc/helloworld_server/main.go index e9eb78257..c0215a921 100644 --- a/examples/grpc/helloworld_server/main.go +++ b/examples/grpc/helloworld_server/main.go @@ -47,10 +47,13 @@ func (s *server) SayHello(ctx context.Context, in *pb.HelloRequest) (*pb.HelloRe } func main() { + // Start z-Pages server. go func() { - http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler)) - log.Fatal(http.ListenAndServe(":8081", nil)) + mux := http.NewServeMux() + zpages.Handle(mux, "/debug") + log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux)) }() + // Register stats and trace exporters to export // the collected data. view.RegisterExporter(&exporter.PrintExporter{}) diff --git a/examples/http/helloworld_server/main.go b/examples/http/helloworld_server/main.go index b13d6a2c9..3f042c15a 100644 --- a/examples/http/helloworld_server/main.go +++ b/examples/http/helloworld_server/main.go @@ -29,7 +29,12 @@ import ( ) func main() { - go func() { log.Fatal(http.ListenAndServe(":8081", zpages.Handler)) }() + // Start z-Pages server. + go func() { + mux := http.NewServeMux() + zpages.Handle(mux, "/debug") + log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux)) + }() // Register stats and trace exporters to export the collected data. exporter := &exporter.PrintExporter{} diff --git a/zpages/example_test.go b/zpages/example_test.go index 1af712b35..141663cc4 100644 --- a/zpages/example_test.go +++ b/zpages/example_test.go @@ -22,7 +22,7 @@ import ( ) func Example() { - // Both /debug/tracez and /debug/rpcz will be served. - http.Handle("/debug/", http.StripPrefix("/debug", zpages.Handler)) - log.Fatal(http.ListenAndServe(":9999", nil)) + // Both /debug/tracez and /debug/rpcz will be served on the default mux. + zpages.Handle(nil, "/debug") + log.Fatal(http.ListenAndServe("127.0.0.1:9999", nil)) } diff --git a/zpages/rpcz.go b/zpages/rpcz.go index 310cb2e4a..acdcacc25 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -56,7 +56,7 @@ var ( } ) -func init() { +func registerRPCViews() { views := make([]*view.View, 0, len(viewType)) for v := range viewType { views = append(views, v) diff --git a/zpages/tracez.go b/zpages/tracez.go index e3ff2c4ab..330022c23 100644 --- a/zpages/tracez.go +++ b/zpages/tracez.go @@ -75,10 +75,6 @@ var ( } ) -func init() { - internal.LocalSpanStoreEnabled = true -} - func canonicalCodeString(code int32) string { if code < 0 || int(code) >= len(canonicalCodes) { return "error code " + strconv.FormatInt(int64(code), 10) diff --git a/zpages/zpages.go b/zpages/zpages.go index fc47d6f9a..6d2305880 100644 --- a/zpages/zpages.go +++ b/zpages/zpages.go @@ -32,15 +32,39 @@ package zpages // import "go.opencensus.io/zpages" import ( "net/http" + "path" + "sync" + + "go.opencensus.io/internal" ) -// Handler is an http.Handler that serves the zpages. +// TODO(ramonza): Remove Handler to make initialization lazy. + +// Handler is deprecated: Use Handle. var Handler http.Handler func init() { - zpagesMux := http.NewServeMux() - zpagesMux.HandleFunc("/rpcz", rpczHandler) - zpagesMux.HandleFunc("/tracez", tracezHandler) - zpagesMux.Handle("/public/", http.FileServer(fs)) - Handler = zpagesMux + mux := http.NewServeMux() + Handle(mux, "") + Handler = mux +} + +// Handle adds the z-pages to the given ServeMux rooted at pathPrefix. +func Handle(mux *http.ServeMux, pathPrefix string) { + enable() + if mux == nil { + mux = http.DefaultServeMux + } + mux.HandleFunc(path.Join(pathPrefix, "rpcz"), rpczHandler) + mux.HandleFunc(path.Join(pathPrefix, "tracez"), tracezHandler) + mux.Handle(path.Join(pathPrefix, "public/"), http.FileServer(fs)) +} + +var enableOnce sync.Once + +func enable() { + enableOnce.Do(func() { + internal.LocalSpanStoreEnabled = true + registerRPCViews() + }) } diff --git a/zpages/z_test.go b/zpages/zpages_test.go similarity index 86% rename from zpages/z_test.go rename to zpages/zpages_test.go index 9552eb76e..dc09d893d 100644 --- a/zpages/z_test.go +++ b/zpages/zpages_test.go @@ -21,6 +21,10 @@ import ( "testing" "time" + "fmt" + "net/http" + "net/http/httptest" + "go.opencensus.io/trace" ) @@ -85,3 +89,23 @@ func TestTraceRows(t *testing.T) { t.Errorf("writeTextTraces: got %q want %q\n", buf.String(), want) } } + +func TestGetZPages(t *testing.T) { + mux := http.NewServeMux() + Handle(mux, "/debug") + server := httptest.NewServer(mux) + defer server.Close() + tests := []string{"/debug/rpcz", "/debug/tracez"} + for _, tt := range tests { + t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) { + res, err := http.Get(server.URL + tt) + if err != nil { + t.Error(err) + return + } + if got, want := res.StatusCode, http.StatusOK; got != want { + t.Errorf("res.StatusCode = %d; want %d", got, want) + } + }) + } +} From d5427b08642f550a80b6894032c0af6b2348bd83 Mon Sep 17 00:00:00 2001 From: Peiqin Zhao Date: Mon, 11 Jun 2018 21:33:17 -0700 Subject: [PATCH 007/212] Set the report period shorter that the exported data could be printed (#785) --- examples/helloworld/main.go | 1 + 1 file changed, 1 insertion(+) diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index 155b1fc2a..316202de1 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -54,6 +54,7 @@ func main() { log.Fatal(err) } videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes) + view.SetReportingPeriod(2 * time.Second) // Create view to see the processed video size // distribution broken down by frontend. From 47d3bc068646c83a1dace87944c9d234b975a604 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Fri, 15 Jun 2018 12:43:31 -0700 Subject: [PATCH 008/212] Version bump (#791) --- internal/check/version.go | 2 +- opencensus.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/check/version.go b/internal/check/version.go index d38a9d514..ab57ae73d 100644 --- a/internal/check/version.go +++ b/internal/check/version.go @@ -50,7 +50,7 @@ func main() { latest := versions[len(versions)-1] codeVersion := parseVersion("v" + opencensus.Version()) if !versionLess(latest, codeVersion) { - fmt.Printf("exporter.Version is out of date with Git tags. Got %s; want %s\n", latest, opencensus.Version()) + fmt.Printf("exporter.Version is out of date with Git tags. Got %s; want something greater than %s\n", opencensus.Version(), latest) os.Exit(1) } fmt.Printf("exporter.Version is up-to-date: %s\n", opencensus.Version()) diff --git a/opencensus.go b/opencensus.go index 8a98fce84..eb8e7213d 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.13.0" + return "0.14.0" } From ff7de98412e5c010eb978f11056f90c00561637f Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Fri, 15 Jun 2018 14:21:32 -0700 Subject: [PATCH 009/212] Deprecate public symbols individually in Stackdriver exporter (#792) Some tools may not prominently display the package-level deprecation. To improve the changes that users will notice the deprecation, deprecate the public types and functions as well. --- exporter/stackdriver/stackdriver.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/exporter/stackdriver/stackdriver.go b/exporter/stackdriver/stackdriver.go index 793d3ae29..b4f152149 100644 --- a/exporter/stackdriver/stackdriver.go +++ b/exporter/stackdriver/stackdriver.go @@ -33,6 +33,8 @@ import ( ) // Options contains options for configuring the exporter. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. type Options struct { // ProjectID is the identifier of the Stackdriver // project the user is uploading the stats data to. @@ -81,6 +83,8 @@ type Options struct { // Exporter is a stats.Exporter and trace.Exporter // implementation that uploads data to Stackdriver. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. type Exporter struct { traceExporter *traceExporter statsExporter *statsExporter @@ -88,6 +92,8 @@ type Exporter struct { // NewExporter creates a new Exporter that implements both stats.Exporter and // trace.Exporter. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. func NewExporter(o Options) (*Exporter, error) { if o.ProjectID == "" { creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...) From 23f7442163d94a579d3e132b09797f950dfaa495 Mon Sep 17 00:00:00 2001 From: JBD Date: Mon, 18 Jun 2018 14:12:14 -0700 Subject: [PATCH 010/212] Do not exit if request fails (#793) This allows us to report the unsuccessful outgoing calls rather than killing the process. Also, logs should be proper sentences, they are capitalized. --- examples/grpc/helloworld_client/main.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/grpc/helloworld_client/main.go b/examples/grpc/helloworld_client/main.go index 10429a5ef..12d845090 100644 --- a/examples/grpc/helloworld_client/main.go +++ b/examples/grpc/helloworld_client/main.go @@ -46,7 +46,7 @@ func main() { // stats handler to enable stats and tracing. conn, err := grpc.Dial(address, grpc.WithStatsHandler(&ocgrpc.ClientHandler{}), grpc.WithInsecure()) if err != nil { - log.Fatalf("did not connect: %v", err) + log.Fatalf("Cannot connect: %v", err) } defer conn.Close() c := pb.NewGreeterClient(conn) @@ -60,10 +60,10 @@ func main() { for { r, err := c.SayHello(context.Background(), &pb.HelloRequest{Name: name}) if err != nil { - log.Fatalf("could not greet: %v", err) + log.Printf("Could not greet: %v", err) + } else { + log.Printf("Greeting: %s", r.Message) } - log.Printf("Greeting: %s", r.Message) - - time.Sleep(2 * time.Second) // Wait for the data collection. + time.Sleep(2 * time.Second) } } From f886faf58fcce7e69458b5c7bc1f228d0063033b Mon Sep 17 00:00:00 2001 From: JBD Date: Mon, 18 Jun 2018 22:32:38 -0700 Subject: [PATCH 011/212] Add Datadog to the exporters list (#794) --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 6bafca507..1a4a58b5e 100644 --- a/README.md +++ b/README.md @@ -32,8 +32,7 @@ Currently, OpenCensus supports: * Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver] * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces - - +* [Datadog][exporter-datadog] for stats and traces ## Overview ![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) @@ -188,3 +187,4 @@ release in which the functionality was marked *Deprecated*. [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger [exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws +[exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog From 212f272cd24d9e931330f7e68a48357050fd3c18 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 20 Jun 2018 18:09:40 -0500 Subject: [PATCH 012/212] Documentation and example cleanup (#787) * Replace my.org domain with example.com * Remove vestigial references to "subscribing" to views --- README.md | 6 ++--- examples/helloworld/main.go | 10 ++++---- exporter/prometheus/example/main.go | 6 ++--- exporter/stackdriver/examples/stats/main.go | 6 ++--- internal/readme/stats.go | 6 ++--- internal/readme/tags.go | 4 +-- plugin/ocgrpc/client_metrics.go | 4 +-- plugin/ocgrpc/example_test.go | 4 +-- plugin/ocgrpc/server_metrics.go | 4 +-- plugin/ochttp/client.go | 7 ++++-- plugin/ochttp/server.go | 9 ++++--- plugin/ochttp/stats.go | 10 +++++++- plugin/ochttp/trace.go | 2 +- stats/example_test.go | 4 ++- stats/measure.go | 28 +++++++++++++++------ stats/measure_float64.go | 8 +++--- stats/measure_int64.go | 8 +++--- stats/view/example_test.go | 6 ++--- stats/view/view.go | 4 +-- stats/view/view_test.go | 2 +- stats/view/worker.go | 8 +++--- stats/view/worker_commands.go | 6 ++--- stats/view/worker_test.go | 8 +++--- tag/example_test.go | 6 ++--- trace/doc.go | 2 +- 25 files changed, 100 insertions(+), 68 deletions(-) diff --git a/README.md b/README.md index 1a4a58b5e..a0f3c560b 100644 --- a/README.md +++ b/README.md @@ -115,16 +115,16 @@ Here we create a view with the DistributionAggregation over our measure. [embedmd]:# (internal/readme/stats.go view) ```go if err := view.Register(&view.View{ - Name: "my.org/video_size_distribution", + Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), }); err != nil { - log.Fatalf("Failed to subscribe to view: %v", err) + log.Fatalf("Failed to register view: %v", err) } ``` -Subscribe begins collecting data for the view. Subscribed views' data will be +Register begins collecting data for the view. Registered views' data will be exported via the registered exporters. ## Traces diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index 316202de1..c93edcf29 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -49,24 +49,24 @@ func main() { trace.RegisterExporter(e) var err error - frontendKey, err = tag.NewKey("my.org/keys/frontend") + frontendKey, err = tag.NewKey("example.com/keys/frontend") if err != nil { log.Fatal(err) } - videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes) + videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes) view.SetReportingPeriod(2 * time.Second) // Create view to see the processed video size // distribution broken down by frontend. // Register will allow view data to be exported. if err := view.Register(&view.View{ - Name: "my.org/views/video_size", + Name: "example.com/views/video_size", Description: "processed video size over time", TagKeys: []tag.Key{frontendKey}, Measure: videoSize, Aggregation: view.Distribution(0, 1<<16, 1<<32), }); err != nil { - log.Fatalf("Cannot subscribe to the view: %v", err) + log.Fatalf("Cannot register view: %v", err) } // Process the video. @@ -87,7 +87,7 @@ func process(ctx context.Context) { if err != nil { log.Fatal(err) } - ctx, span := trace.StartSpan(ctx, "my.org/ProcessVideo") + ctx, span := trace.StartSpan(ctx, "example.com/ProcessVideo") defer span.End() // Process video. // Record the processed video size. diff --git a/exporter/prometheus/example/main.go b/exporter/prometheus/example/main.go index ab1ecbde2..838cf3603 100644 --- a/exporter/prometheus/example/main.go +++ b/exporter/prometheus/example/main.go @@ -31,8 +31,8 @@ import ( // Create measures. The program will record measures for the size of // processed videos and the number of videos marked as spam. var ( - videoCount = stats.Int64("my.org/measures/video_count", "number of processed videos", stats.UnitDimensionless) - videoSize = stats.Int64("my.org/measures/video_size", "size of processed video", stats.UnitBytes) + videoCount = stats.Int64("example.com/measures/video_count", "number of processed videos", stats.UnitDimensionless) + videoSize = stats.Int64("example.com/measures/video_size", "size of processed video", stats.UnitBytes) ) func main() { @@ -62,7 +62,7 @@ func main() { Aggregation: view.Distribution(0, 1<<16, 1<<32), }, ); err != nil { - log.Fatalf("Cannot subscribe to the view: %v", err) + log.Fatalf("Cannot register the view: %v", err) } // Set reporting period to report data at every second. diff --git a/exporter/stackdriver/examples/stats/main.go b/exporter/stackdriver/examples/stats/main.go index ec30d3e73..1b2fb6ba4 100644 --- a/exporter/stackdriver/examples/stats/main.go +++ b/exporter/stackdriver/examples/stats/main.go @@ -30,7 +30,7 @@ import ( // Create measures. The program will record measures for the size of // processed videos and the nubmer of videos marked as spam. -var videoSize = stats.Int64("my.org/measure/video_size", "size of processed videos", stats.UnitBytes) +var videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes) func main() { ctx := context.Background() @@ -60,12 +60,12 @@ func main() { // Subscribe will allow view data to be exported. // Once no longer need, you can unsubscribe from the view. if err := view.Register(&view.View{ - Name: "my.org/views/video_size_cum", + Name: "example.com/views/video_size_cum", Description: "processed video size over time", Measure: videoSize, Aggregation: view.Distribution(0, 1<<16, 1<<32), }); err != nil { - log.Fatalf("Cannot subscribe to the view: %v", err) + log.Fatalf("Cannot register the view: %v", err) } processVideo(ctx) diff --git a/internal/readme/stats.go b/internal/readme/stats.go index dae506ba3..e8a27ff98 100644 --- a/internal/readme/stats.go +++ b/internal/readme/stats.go @@ -29,7 +29,7 @@ import ( func statsExamples() { ctx := context.Background() - videoSize := stats.Int64("my.org/video_size", "processed video size", "MB") + videoSize := stats.Int64("example.com/video_size", "processed video size", "MB") // START aggs distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) @@ -41,12 +41,12 @@ func statsExamples() { // START view if err := view.Register(&view.View{ - Name: "my.org/video_size_distribution", + Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), }); err != nil { - log.Fatalf("Failed to subscribe to view: %v", err) + log.Fatalf("Failed to register view: %v", err) } // END view diff --git a/internal/readme/tags.go b/internal/readme/tags.go index 579e3e7e2..09d9ac12f 100644 --- a/internal/readme/tags.go +++ b/internal/readme/tags.go @@ -24,11 +24,11 @@ import ( func tagsExamples() { ctx := context.Background() - osKey, err := tag.NewKey("my.org/keys/user-os") + osKey, err := tag.NewKey("example.com/keys/user-os") if err != nil { log.Fatal(err) } - userIDKey, err := tag.NewKey("my.org/keys/user-id") + userIDKey, err := tag.NewKey("example.com/keys/user-id") if err != nil { log.Fatal(err) } diff --git a/plugin/ocgrpc/client_metrics.go b/plugin/ocgrpc/client_metrics.go index b8efacfb3..7d0352062 100644 --- a/plugin/ocgrpc/client_metrics.go +++ b/plugin/ocgrpc/client_metrics.go @@ -31,9 +31,9 @@ var ( ClientServerLatency = stats.Float64("grpc.io/client/server_latency", `Propagated from the server and should have the same value as "grpc.io/server/latency".`, stats.UnitMilliseconds) ) -// Predefined views may be subscribed to collect data for the above measures. +// Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are subscribed by +// package. These are declared as a convenience only; none are registered by // default. var ( ClientSentBytesPerRPCView = &view.View{ diff --git a/plugin/ocgrpc/example_test.go b/plugin/ocgrpc/example_test.go index a8d5759d5..9c0ca7c9d 100644 --- a/plugin/ocgrpc/example_test.go +++ b/plugin/ocgrpc/example_test.go @@ -23,7 +23,7 @@ import ( ) func ExampleClientHandler() { - // Subscribe views to collect data. + // Register views to collect data. if err := view.Register(ocgrpc.DefaultClientViews...); err != nil { log.Fatal(err) } @@ -38,7 +38,7 @@ func ExampleClientHandler() { } func ExampleServerHandler() { - // Subscribe to views to collect data. + // Register views to collect data. if err := view.Register(ocgrpc.DefaultServerViews...); err != nil { log.Fatal(err) } diff --git a/plugin/ocgrpc/server_metrics.go b/plugin/ocgrpc/server_metrics.go index 02323f871..609d9ed24 100644 --- a/plugin/ocgrpc/server_metrics.go +++ b/plugin/ocgrpc/server_metrics.go @@ -34,9 +34,9 @@ var ( // mechanism to load these defaults from a common repository/config shared by // all supported languages. Likely a serialized protobuf of these defaults. -// Predefined views may be subscribed to collect data for the above measures. +// Predefined views may be registered to collect data for the above measures. // As always, you may also define your own custom views over measures collected by this -// package. These are declared as a convenience only; none are subscribed by +// package. These are declared as a convenience only; none are registered by // default. var ( ServerReceivedBytesPerRPCView = &view.View{ diff --git a/plugin/ochttp/client.go b/plugin/ochttp/client.go index 37f42b3b1..ad0464ff3 100644 --- a/plugin/ochttp/client.go +++ b/plugin/ochttp/client.go @@ -22,8 +22,11 @@ import ( ) // Transport is an http.RoundTripper that instruments all outgoing requests with -// stats and tracing. The zero value is intended to be a useful default, but for -// now it's recommended that you explicitly set Propagation. +// OpenCensus stats and tracing. +// +// The zero value is intended to be a useful default, but for +// now it's recommended that you explicitly set Propagation, since the default +// for this may change. type Transport struct { // Base may be set to wrap another http.RoundTripper that does the actual // requests. By default http.DefaultTransport is used. diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 4b3c855e5..24801fe12 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -30,16 +30,19 @@ import ( "go.opencensus.io/trace/propagation" ) -// Handler is a http.Handler that is aware of the incoming request's span. +// Handler is an http.Handler wrapper to instrument your HTTP server with +// OpenCensus. It supports both stats and tracing. // +// Tracing +// +// This handler is aware of the incoming request's span, reading it from request +// headers as configured using the Propagation field. // The extracted span can be accessed from the incoming request's // context. // // span := trace.FromContext(r.Context()) // // The server span will be automatically ended at the end of ServeHTTP. -// -// Incoming propagation mechanism is determined by the given HTTP propagators. type Handler struct { // Propagation defines how traces are propagated. If unspecified, // B3 propagation will be used. diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index 2bd11f6dd..19a882500 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -41,6 +41,10 @@ var ( // ClientRequestCount or ServerRequestCount, since it is recorded before the status is known. var ( // Host is the value of the HTTP Host header. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. Host, _ = tag.NewKey("http.host") // StatusCode is the numeric HTTP response status code, @@ -48,6 +52,10 @@ var ( StatusCode, _ = tag.NewKey("http.status") // Path is the URL path (not including query string) in the request. + // + // The value of this tag can be controlled by the HTTP client, so you need + // to watch out for potentially generating high-cardinality labels in your + // metrics backend if you use this tag in views. Path, _ = tag.NewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). @@ -61,7 +69,7 @@ var ( ) // Package ochttp provides some convenience views. -// You need to subscribe to the views for data to actually be collected. +// You need to register the views for data to actually be collected. var ( ClientRequestCountView = &view.View{ Name: "opencensus.io/http/client/request_count", diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 79bbfd193..2542d2fcc 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -146,7 +146,7 @@ func responseAttrs(resp *http.Response) []trace.Attribute { } } -// TraceStatus converts the HTTP status code to a trace.Status that +// TraceStatus is a utility to convert the HTTP status code to a trace.Status that // represents the outcome as closely as possible. func TraceStatus(httpStatusCode int, statusLine string) trace.Status { var code int32 diff --git a/stats/example_test.go b/stats/example_test.go index cbb5012ca..5520eac8e 100644 --- a/stats/example_test.go +++ b/stats/example_test.go @@ -24,8 +24,10 @@ func ExampleRecord() { ctx := context.Background() // Measures are usually declared as package-private global variables. - openConns := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless) + openConns := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless) // Instrumented packages call stats.Record() to record measuremens. stats.Record(ctx, openConns.M(124)) // Record 124 open connections. + + // Without any views or exporters registered, this statement has no observable effects. } diff --git a/stats/measure.go b/stats/measure.go index aa555c209..7b4b49c67 100644 --- a/stats/measure.go +++ b/stats/measure.go @@ -20,19 +20,31 @@ import ( "sync/atomic" ) -// Measure represents a type of metric to be tracked and recorded. -// For example, latency, request Mb/s, and response Mb/s are measures +// Measure represents a single numeric value to be tracked and recorded. +// For example, latency, request bytes, and response bytes could be measures // to collect from a server. // -// Each measure needs to be registered before being used. -// Measure constructors such as Int64 and -// Float64 automatically registers the measure -// by the given name. -// Each registered measure needs to be unique by name. -// Measures also have a description and a unit. +// Measures by themselves have no outside effects. In order to be exported, +// the measure needs to be used in a View. If no Views are defined over a +// measure, there is very little cost in recording it. type Measure interface { + // Name returns the name of this measure. + // + // Measure names are globally unique (among all libraries linked into your program). + // We recommend prefixing the measure name with a domain name relevant to your + // project or application. + // + // Measure names are never sent over the wire or exported to backends. + // They are only used to create Views. Name() string + + // Description returns the human-readable description of this measure. Description() string + + // Unit returns the units for the values this measure takes on. + // + // Units are encoded according to the case-sensitive abbreviations from the + // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html Unit() string } diff --git a/stats/measure_float64.go b/stats/measure_float64.go index 8de6b5221..da4b5a83b 100644 --- a/stats/measure_float64.go +++ b/stats/measure_float64.go @@ -15,7 +15,7 @@ package stats -// Float64Measure is a measure of type float64. +// Float64Measure is a measure for float64 values. type Float64Measure struct { md *measureDescriptor } @@ -44,8 +44,10 @@ func (m *Float64Measure) M(v float64) Measurement { return Measurement{m: m, v: v} } -// Float64 creates a new measure of type Float64Measure. -// It never returns an error. +// Float64 creates a new measure for float64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} diff --git a/stats/measure_int64.go b/stats/measure_int64.go index b6fd25f0d..5fedaad05 100644 --- a/stats/measure_int64.go +++ b/stats/measure_int64.go @@ -15,7 +15,7 @@ package stats -// Int64Measure is a measure of type int64. +// Int64Measure is a measure for int64 values. type Int64Measure struct { md *measureDescriptor } @@ -44,8 +44,10 @@ func (m *Int64Measure) M(v int64) Measurement { return Measurement{m: m, v: float64(v)} } -// Int64 creates a new measure of type Int64Measure. -// It never returns an error. +// Int64 creates a new measure for int64 values. +// +// See the documentation for interface Measure for more guidance on the +// parameters of this function. func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} diff --git a/stats/view/example_test.go b/stats/view/example_test.go index 0abb054d8..78556a313 100644 --- a/stats/view/example_test.go +++ b/stats/view/example_test.go @@ -23,11 +23,11 @@ import ( func Example() { // Measures are usually declared and used by instrumented packages. - m := stats.Int64("my.org/measure/openconns", "open connections", stats.UnitDimensionless) + m := stats.Int64("example.com/measure/openconns", "open connections", stats.UnitDimensionless) - // Views are usually subscribed in your application main function. + // Views are usually registered in your application main function. if err := view.Register(&view.View{ - Name: "my.org/views/openconns", + Name: "example.com/views/openconns", Description: "open connections", Measure: m, Aggregation: view.Distribution(0, 1000, 2000), diff --git a/stats/view/view.go b/stats/view/view.go index 87bf5d466..82a5753b4 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -71,10 +71,10 @@ func (v *View) same(other *View) bool { // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { if v.Measure == nil { - return fmt.Errorf("cannot subscribe view %q: measure not set", v.Name) + return fmt.Errorf("cannot register view %q: measure not set", v.Name) } if v.Aggregation == nil { - return fmt.Errorf("cannot subscribe view %q: aggregation not set", v.Name) + return fmt.Errorf("cannot register view %q: aggregation not set", v.Name) } if v.Name == "" { v.Name = v.Measure.Name() diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 28abb4c1e..45a4bd341 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -346,7 +346,7 @@ func TestViewSortedKeys(t *testing.T) { Measure: m, Aggregation: Sum(), }) - // Subscribe normalizes the view by sorting the tag keys, retrieve the normalized view + // Register normalizes the view by sorting the tag keys, retrieve the normalized view v := Find("sort_keys") want := []string{"a", "b", "c"} diff --git a/stats/view/worker.go b/stats/view/worker.go index ba9d7fccb..1ea0a8cbc 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -49,8 +49,8 @@ var defaultWorker *worker var defaultReportingDuration = 10 * time.Second -// Find returns a subscribed view associated with this name. -// If no subscribed view is found, nil is returned. +// Find returns a registered view associated with this name. +// If no registered view is found, nil is returned. func Find(name string) (v *View) { req := &getViewByNameReq{ name: name, @@ -62,7 +62,7 @@ func Find(name string) (v *View) { } // Register begins collecting data for the given views. -// Once a view is subscribed, it reports data to the registered exporters. +// Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { for _, v := range views { if err := v.canonicalize(); err != nil { @@ -181,7 +181,7 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { } if x, ok := w.views[vi.view.Name]; ok { if !x.view.same(vi.view) { - return nil, fmt.Errorf("cannot subscribe view %q; a different view with the same name is already subscribed", v.Name) + return nil, fmt.Errorf("cannot register view %q; a different view with the same name is already registered", v.Name) } // the view is already registered so there is nothing to do and the diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index ef79ec383..d0dd00ce7 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -73,7 +73,7 @@ func (cmd *registerViewReq) handleCommand(w *worker) { } } -// unregisterFromViewReq is the command to unsubscribe to a view. Has no +// unregisterFromViewReq is the command to unregister to a view. Has no // impact on the data collection for client that are pulling data from the // library. type unregisterFromViewReq struct { @@ -143,7 +143,7 @@ type recordReq struct { func (cmd *recordReq) handleCommand(w *worker) { for _, m := range cmd.ms { - if (m == stats.Measurement{}) { // not subscribed + if (m == stats.Measurement{}) { // not registered continue } ref := w.getMeasureRef(m.Measure().Name()) @@ -154,7 +154,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } // setReportingPeriodReq is the command to modify the duration between -// reporting the collected data to the subscribed clients. +// reporting the collected data to the registered clients. type setReportingPeriodReq struct { d time.Duration c chan bool diff --git a/stats/view/worker_test.go b/stats/view/worker_test.go index 78aaa4fd5..de9e6180b 100644 --- a/stats/view/worker_test.go +++ b/stats/view/worker_test.go @@ -42,7 +42,7 @@ func Test_Worker_ViewRegistration(t *testing.T) { } tcs := []testCase{ { - "register and subscribe to v1ID", + "register v1ID", []registration{ { sc1, @@ -52,7 +52,7 @@ func Test_Worker_ViewRegistration(t *testing.T) { }, }, { - "register v1ID+v2ID, susbsribe to v1ID", + "register v1ID+v2ID", []registration{ { sc1, @@ -62,7 +62,7 @@ func Test_Worker_ViewRegistration(t *testing.T) { }, }, { - "register to v1ID; subscribe to v1ID and view with same ID", + "register to v1ID; ??? to v1ID and view with same ID", []registration{ { sc1, @@ -263,7 +263,7 @@ func TestReportUsage(t *testing.T) { SetReportingPeriod(25 * time.Millisecond) if err := Register(tt.view); err != nil { - t.Fatalf("%v: cannot subscribe: %v", tt.name, err) + t.Fatalf("%v: cannot register: %v", tt.name, err) } e := &countExporter{} diff --git a/tag/example_test.go b/tag/example_test.go index f18f5db2d..fe0c5d9e9 100644 --- a/tag/example_test.go +++ b/tag/example_test.go @@ -30,7 +30,7 @@ var ( func ExampleNewKey() { // Get a key to represent user OS. - key, err := tag.NewKey("my.org/keys/user-os") + key, err := tag.NewKey("example.com/keys/user-os") if err != nil { log.Fatal(err) } @@ -38,11 +38,11 @@ func ExampleNewKey() { } func ExampleNew() { - osKey, err := tag.NewKey("my.org/keys/user-os") + osKey, err := tag.NewKey("example.com/keys/user-os") if err != nil { log.Fatal(err) } - userIDKey, err := tag.NewKey("my.org/keys/user-id") + userIDKey, err := tag.NewKey("example.com/keys/user-id") if err != nil { log.Fatal(err) } diff --git a/trace/doc.go b/trace/doc.go index a2b54e58c..db00044b1 100644 --- a/trace/doc.go +++ b/trace/doc.go @@ -42,7 +42,7 @@ It is common to want to capture all the activity of a function call in a span. F this to work, the function must take a context.Context as a parameter. Add these two lines to the top of the function: - ctx, span := trace.StartSpan(ctx, "my.org/Run") + ctx, span := trace.StartSpan(ctx, "example.com/Run") defer span.End() StartSpan will create a new top-level span if the context From 2eab5e685b189d1945a32ffc086e22f3641ab1ad Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 21 Jun 2018 20:59:52 +0800 Subject: [PATCH 013/212] Span naming flexibility (#796) This PR enables more flexibility in span naming for various use cases. span.SetName() allows one to update the span name after creation time. This allows routing middlewares to update the span name to the matched route providing lower cardinality then path names (think REST services). ochttp client and server handlers now allow for the span name to be generated from pluggable functions defaulting to the already existing spanNameFromURL. This can be used if span naming conventions in a brown field deployment are different from OpenCensus defaults or if using URL constructors for REST services that can also report route templates instead of expanded URL path. --- plugin/ochttp/client.go | 10 +++++ plugin/ochttp/server.go | 12 +++++- plugin/ochttp/server_test.go | 2 +- plugin/ochttp/trace.go | 14 +++---- plugin/ochttp/trace_test.go | 71 ++++++++++++++++++++++++++++++++++-- trace/trace.go | 10 +++++ trace/trace_test.go | 51 ++++++++++++++++++++++++++ 7 files changed, 157 insertions(+), 13 deletions(-) diff --git a/plugin/ochttp/client.go b/plugin/ochttp/client.go index ad0464ff3..180792106 100644 --- a/plugin/ochttp/client.go +++ b/plugin/ochttp/client.go @@ -46,6 +46,11 @@ type Transport struct { // for spans started by this transport. StartOptions trace.StartOptions + // NameFromRequest holds the function to use for generating the span name + // from the information found in the outgoing HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string + // TODO: Implement tag propagation for HTTP. } @@ -57,6 +62,10 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if format == nil { format = defaultFormat } + spanNameFormatter := t.FormatSpanName + if spanNameFormatter == nil { + spanNameFormatter = spanNameFromURL + } rt = &traceTransport{ base: rt, format: format, @@ -64,6 +73,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { Sampler: t.StartOptions.Sampler, SpanKind: trace.SpanKindClient, }, + formatSpanName: spanNameFormatter, } rt = statsTransport{base: rt} return rt.RoundTrip(req) diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 24801fe12..fe2a6eb58 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -63,6 +63,11 @@ type Handler struct { // be added as a linked trace instead of being added as a parent of the // current trace. IsPublicEndpoint bool + + // FormatSpanName holds the function to use for generating the span name + // from the information found in the incoming HTTP Request. By default the + // name equals the URL Path. + FormatSpanName func(*http.Request) string } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -79,7 +84,12 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - name := spanNameFromURL(r.URL) + var name string + if h.FormatSpanName == nil { + name = spanNameFromURL(r) + } else { + name = h.FormatSpanName(r) + } ctx := r.Context() var span *trace.Span sc, ok := h.extractSpanContext(r) diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index c1e58b61d..01683ec7f 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -309,7 +309,7 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) { }() inRes := tt.res inRes.Body = prc - tr := &traceTransport{base: &testResponseTransport{res: inRes}} + tr := &traceTransport{base: &testResponseTransport{res: inRes}, formatSpanName: spanNameFromURL} req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing"))) if err != nil { t.Fatalf("NewRequest error: %v", err) diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 2542d2fcc..ea066a2c6 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -17,7 +17,6 @@ package ochttp import ( "io" "net/http" - "net/url" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" @@ -39,9 +38,10 @@ const ( ) type traceTransport struct { - base http.RoundTripper - startOptions trace.StartOptions - format propagation.HTTPFormat + base http.RoundTripper + startOptions trace.StartOptions + format propagation.HTTPFormat + formatSpanName func(*http.Request) string } // TODO(jbd): Add message events for request and response size. @@ -50,7 +50,7 @@ type traceTransport struct { // The created span can follow a parent span, if a parent is presented in // the request's context. func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { - name := spanNameFromURL(req.URL) + name := t.formatSpanName(req) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. _, span := trace.StartSpan(req.Context(), name, @@ -127,8 +127,8 @@ func (t *traceTransport) CancelRequest(req *http.Request) { } } -func spanNameFromURL(u *url.URL) string { - return u.Path +func spanNameFromURL(req *http.Request) string { + return req.URL.Path } func requestAttrs(r *http.Request) []trace.Attribute { diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index be46cc955..bc9ce16f3 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -25,7 +25,6 @@ import ( "log" "net/http" "net/http/httptest" - "net/url" "reflect" "strings" "testing" @@ -36,6 +35,14 @@ import ( "go.opencensus.io/trace" ) +type testExporter struct { + spans []*trace.SpanData +} + +func (t *testExporter) ExportSpan(s *trace.SpanData) { + t.spans = append(t.spans, s) +} + type testTransport struct { ch chan *http.Request } @@ -357,11 +364,67 @@ func TestSpanNameFromURL(t *testing.T) { } for _, tt := range tests { t.Run(tt.u, func(t *testing.T) { - u, err := url.Parse(tt.u) + req, err := http.NewRequest("GET", tt.u, nil) + if err != nil { + t.Errorf("url issue = %v", err) + } + if got := spanNameFromURL(req); got != tt.want { + t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) + } + }) + } +} + +func TestFormatSpanName(t *testing.T) { + formatSpanName := func(r *http.Request) string { + return r.Method + " " + r.URL.Path + } + + handler := &Handler{ + Handler: http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + resp.Write([]byte("Hello, world!")) + }), + FormatSpanName: formatSpanName, + } + + server := httptest.NewServer(handler) + defer server.Close() + + client := &http.Client{ + Transport: &Transport{FormatSpanName: formatSpanName}, + } + + tests := []struct { + u string + want string + }{ + { + u: "/hello?q=a", + want: "GET /hello", + }, + { + u: "/a/b?q=c", + want: "GET /a/b", + }, + } + + for _, tt := range tests { + t.Run(tt.u, func(t *testing.T) { + var te testExporter + trace.RegisterExporter(&te) + res, err := client.Get(server.URL + tt.u) if err != nil { - t.Errorf("url.Parse() = %v", err) + t.Fatalf("error creating request: %v", err) + } + res.Body.Close() + trace.UnregisterExporter(&te) + if want, got := 2, len(te.spans); want != got { + t.Fatalf("got exported spans %#v, wanted two spans", te.spans) + } + if got := te.spans[0].Name; got != tt.want { + t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } - if got := spanNameFromURL(u); got != tt.want { + if got := te.spans[1].Name; got != tt.want { t.Errorf("spanNameFromURL() = %v, want %v", got, tt.want) } }) diff --git a/trace/trace.go b/trace/trace.go index 19c6930ef..fc75b72f0 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -310,6 +310,16 @@ func (s *Span) SpanContext() SpanContext { return s.spanContext } +// SetName sets the name of the span, if it is recording events. +func (s *Span) SetName(name string) { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.Name = name + s.mu.Unlock() +} + // SetStatus sets the status of the span, if it is recording events. func (s *Span) SetStatus(status Status) { if !s.IsRecordingEvents() { diff --git a/trace/trace_test.go b/trace/trace_test.go index b5994506d..10309bac8 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -446,6 +446,57 @@ func TestMessageEvents(t *testing.T) { } } +func TestSetSpanName(t *testing.T) { + want := "SpanName-1" + span := startSpan(StartOptions{}) + span.SetName(want) + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + if got.Name != want { + t.Errorf("span.Name=%q; want %q", got.Name, want) + } +} + +func TestSetSpanNameUnsampledSpan(t *testing.T) { + var nilSpanData *SpanData + span := startSpan(StartOptions{Sampler: NeverSample()}) + span.SetName("NoopName") + + if want, got := nilSpanData, span.data; want != got { + t.Errorf("span.data=%+v; want %+v", got, want) + } +} + +func TestSetSpanNameAfterSpanEnd(t *testing.T) { + want := "SpanName-2" + span := startSpan(StartOptions{}) + span.SetName(want) + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + // updating name after span.End + span.SetName("NoopName") + + // exported span should not be updated by previous call to SetName + if got.Name != want { + t.Errorf("span.Name=%q; want %q", got.Name, want) + } + + // span should not be exported again + var te testExporter + RegisterExporter(&te) + span.End() + UnregisterExporter(&te) + if len(te.spans) != 0 { + t.Errorf("got exported spans %#v, wanted no spans", te.spans) + } +} + func TestSetSpanStatus(t *testing.T) { span := startSpan(StartOptions{}) span.SetStatus(Status{Code: int32(1), Message: "request failed"}) From 7f3f80768f1e52e92264253acafd719d2d0d5791 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 21 Jun 2018 11:40:57 -0500 Subject: [PATCH 014/212] Fix zpage.Handler (#798) The deprecated zpage.Handler was broken by 6edeb78a. --- zpages/zpages.go | 2 +- zpages/zpages_test.go | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+), 1 deletion(-) diff --git a/zpages/zpages.go b/zpages/zpages.go index 6d2305880..5929d1fe7 100644 --- a/zpages/zpages.go +++ b/zpages/zpages.go @@ -45,7 +45,7 @@ var Handler http.Handler func init() { mux := http.NewServeMux() - Handle(mux, "") + Handle(mux, "/") Handler = mux } diff --git a/zpages/zpages_test.go b/zpages/zpages_test.go index dc09d893d..c5d8d44ec 100644 --- a/zpages/zpages_test.go +++ b/zpages/zpages_test.go @@ -109,3 +109,21 @@ func TestGetZPages(t *testing.T) { }) } } + +func TestGetZPages_default(t *testing.T) { + server := httptest.NewServer(Handler) + defer server.Close() + tests := []string{"/rpcz", "/tracez"} + for _, tt := range tests { + t.Run(fmt.Sprintf("GET %s", tt), func(t *testing.T) { + res, err := http.Get(server.URL + tt) + if err != nil { + t.Error(err) + return + } + if got, want := res.StatusCode, http.StatusOK; got != want { + t.Errorf("res.StatusCode = %d; want %d", got, want) + } + }) + } +} From 48905db4a42827c009d77f5745a6a9125266823f Mon Sep 17 00:00:00 2001 From: Jason Mavandi Date: Thu, 21 Jun 2018 14:53:25 -0600 Subject: [PATCH 015/212] Fix stats/view.View comment (#800) The comment said to use the Subscribe function when it should said Register --- stats/view/view.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stats/view/view.go b/stats/view/view.go index 82a5753b4..22323e2c5 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -29,7 +29,7 @@ import ( ) // View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Subscribe function to be before data will be +// Views need to be passed to the Register function to be before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. From 420188b6cba8de9e6a59625fe1cdd32463129ccd Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 25 Jun 2018 13:07:35 -0500 Subject: [PATCH 016/212] Fix race in RetrieveData (#799) RetrieveData previously would return pointers to internal aggregators that might subsequently be mutated based on future view updates. Instead, have collectedRows always return an immutable snapshot of the collected aggregators (by calling clone on each one). Since we do this, we can also avoid a defensive copy on the normal export path. Fixes: #795 --- stats/view/collector.go | 5 +++-- stats/view/worker.go | 14 -------------- 2 files changed, 3 insertions(+), 16 deletions(-) diff --git a/stats/view/collector.go b/stats/view/collector.go index 863a5b62a..250395db2 100644 --- a/stats/view/collector.go +++ b/stats/view/collector.go @@ -40,11 +40,12 @@ func (c *collector) addSample(s string, v float64) { aggregator.addSample(v) } +// collectRows returns a snapshot of the collected Row values. func (c *collector) collectedRows(keys []tag.Key) []*Row { - var rows []*Row + rows := make([]*Row, 0, len(c.signatures)) for sig, aggregator := range c.signatures { tags := decodeTags([]byte(sig), keys) - row := &Row{tags, aggregator} + row := &Row{Tags: tags, Data: aggregator.clone()} rows = append(rows, row) } return rows diff --git a/stats/view/worker.go b/stats/view/worker.go index 1ea0a8cbc..ce2f86ab6 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -204,9 +204,6 @@ func (w *worker) reportUsage(now time.Time) { if !ok { w.startTimes[v] = now } - // Make sure collector is never going - // to mutate the exported data. - rows = deepCopyRowData(rows) viewData := &Data{ View: v.view, Start: w.startTimes[v], @@ -220,14 +217,3 @@ func (w *worker) reportUsage(now time.Time) { exportersMu.Unlock() } } - -func deepCopyRowData(rows []*Row) []*Row { - newRows := make([]*Row, 0, len(rows)) - for _, r := range rows { - newRows = append(newRows, &Row{ - Data: r.Data.clone(), - Tags: r.Tags, - }) - } - return newRows -} From 264a2a48d94c062252389fffbc308ba555e35166 Mon Sep 17 00:00:00 2001 From: Justin Gracenin <34716715+jgracenin@users.noreply.github.com> Date: Tue, 26 Jun 2018 08:27:59 -0400 Subject: [PATCH 017/212] zpages/rpcz: fix constant error total accumulation (#803) When there was any error on any gRPC method, the errors total kept accumulating at each interval on the /debug/rpcz page even though no additional errors were actually occurring. The Prometheus metrics looked correct, so I figured it had to be a problem with the rpcz page itself. It seems a guard was put in for this already with the map, however, the map was always empty due to this bug. I verified this fix locally. --- zpages/rpcz.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/zpages/rpcz.go b/zpages/rpcz.go index acdcacc25..30193d1db 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -261,7 +261,7 @@ func (s snapExporter) ExportView(vd *view.Data) { // Update field of s corresponding to the view. switch vd.View { case ocgrpc.ClientCompletedRPCsView: - if _, ok := haveResetErrors[method]; ok { + if _, ok := haveResetErrors[method]; !ok { haveResetErrors[method] = struct{}{} s.ErrorsTotal = 0 } @@ -288,7 +288,7 @@ func (s snapExporter) ExportView(vd *view.Data) { // currently unused case ocgrpc.ServerCompletedRPCsView: - if _, ok := haveResetErrors[method]; ok { + if _, ok := haveResetErrors[method]; !ok { haveResetErrors[method] = struct{}{} s.ErrorsTotal = 0 } From 140879bf49cb17276aaf28b5f6a04e251ff98473 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 26 Jun 2018 07:28:15 -0500 Subject: [PATCH 018/212] Do not enforce singleton property of Prometheus exporter (#801) It is unnecessarily defensive to enforce that only a single instance of the Prometheus exporter is created. Doing this gets in the way of using the exporter in systems where exporters are created dynamically at runtime based on configuration, such as Istio. --- exporter/prometheus/example_test.go | 2 +- exporter/prometheus/prometheus.go | 21 ++------------------- exporter/prometheus/prometheus_test.go | 25 +++---------------------- 3 files changed, 6 insertions(+), 42 deletions(-) diff --git a/exporter/prometheus/example_test.go b/exporter/prometheus/example_test.go index b17d3cbc7..073a8bdd5 100644 --- a/exporter/prometheus/example_test.go +++ b/exporter/prometheus/example_test.go @@ -29,7 +29,7 @@ func Example() { } view.RegisterExporter(exporter) - // Serve the scrap endpoint at localhost:9999. + // Serve the scrape endpoint on port 9999. http.Handle("/metrics", exporter) log.Fatal(http.ListenAndServe(":9999", nil)) } diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 3d78b8317..50665dcb1 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -12,14 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package prometheus contains a Prometheus exporter. -// -// Please note that this exporter is currently work in progress and not complete. +// Package prometheus contains a Prometheus exporter that supports exporting +// OpenCensus views as Prometheus metrics. package prometheus // import "go.opencensus.io/exporter/prometheus" import ( "bytes" - "errors" "fmt" "log" "net/http" @@ -51,23 +49,8 @@ type Options struct { OnError func(err error) } -var ( - newExporterOnce sync.Once - errSingletonExporter = errors.New("expecting only one exporter per instance") -) - // NewExporter returns an exporter that exports stats to Prometheus. -// Only one exporter should exist per instance func NewExporter(o Options) (*Exporter, error) { - var err = errSingletonExporter - var exporter *Exporter - newExporterOnce.Do(func() { - exporter, err = newExporter(o) - }) - return exporter, err -} - -func newExporter(o Options) (*Exporter, error) { if o.Registry == nil { o.Registry = prometheus.NewRegistry() } diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index af3c5b3c0..371da56d6 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -91,29 +91,10 @@ func TestOnlyCumulativeWindowSupported(t *testing.T) { } } -func TestSingletonExporter(t *testing.T) { - exp, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("NewExporter() = %v", err) - } - if exp == nil { - t.Fatal("Nil exporter") - } - - // Should all now fail - exp, err = NewExporter(Options{}) - if err == nil { - t.Fatal("NewExporter() = nil") - } - if exp != nil { - t.Fatal("Non-nil exporter") - } -} - func TestCollectNonRacy(t *testing.T) { // Despite enforcing the singleton, for this case we // need an exporter hence won't be using NewExporter. - exp, err := newExporter(Options{}) + exp, err := NewExporter(Options{}) if err != nil { t.Fatalf("NewExporter: %v", err) } @@ -202,7 +183,7 @@ func (vc *vCreator) createAndAppend(name, description string, keys []tag.Key, me } func TestMetricsEndpointOutput(t *testing.T) { - exporter, err := newExporter(Options{}) + exporter, err := NewExporter(Options{}) if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } @@ -276,7 +257,7 @@ func TestMetricsEndpointOutput(t *testing.T) { } func TestCumulativenessFromHistograms(t *testing.T) { - exporter, err := newExporter(Options{}) + exporter, err := NewExporter(Options{}) if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } From 464202fb079b1dea35fe9c0ec2d4d1432c8dbe78 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 28 Jun 2018 18:56:39 -0400 Subject: [PATCH 019/212] Add tag.DecodeEach to decode a serialized tag map (#806) This can be used to parse the tags in headers into a data structure other than tag.Map: it's useful for applications that want to read the tags header but don't use the stats package. --- tag/map_codec.go | 31 ++++++++++++++++++++++--------- 1 file changed, 22 insertions(+), 9 deletions(-) diff --git a/tag/map_codec.go b/tag/map_codec.go index 33be1f0d4..3e998950c 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -177,45 +177,58 @@ func Encode(m *Map) []byte { // Decode decodes the given []byte into a tag map. func Decode(bytes []byte) (*Map, error) { ts := newMap() + err := DecodeEach(bytes, ts.upsert) + if err != nil { + // no partial failures + return nil, err + } + return ts, nil +} +// DecodeEach decodes the given serialized tag map, calling handler for each +// tag key and value decoded. +func DecodeEach(bytes []byte, fn func(key Key, val string)) error { eg := &encoderGRPC{ buf: bytes, } if len(eg.buf) == 0 { - return ts, nil + return nil } version := eg.readByte() if version > tagsVersionID { - return nil, fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) + return fmt.Errorf("cannot decode: unsupported version: %q; supports only up to: %q", version, tagsVersionID) } for !eg.readEnded() { typ := keyType(eg.readByte()) if typ != keyTypeString { - return nil, fmt.Errorf("cannot decode: invalid key type: %q", typ) + return fmt.Errorf("cannot decode: invalid key type: %q", typ) } k, err := eg.readBytesWithVarintLen() if err != nil { - return nil, err + return err } v, err := eg.readBytesWithVarintLen() if err != nil { - return nil, err + return err } key, err := NewKey(string(k)) if err != nil { - return nil, err // no partial failures + return err } val := string(v) if !checkValue(val) { - return nil, errInvalidValue // no partial failures + return errInvalidValue + } + fn(key, val) + if err != nil { + return err } - ts.upsert(key, val) } - return ts, nil + return nil } From 5898014add478ccd0aec9904e2ac1f7f3cd49d15 Mon Sep 17 00:00:00 2001 From: JBD Date: Thu, 28 Jun 2018 16:41:21 -0700 Subject: [PATCH 020/212] Add step-by-step instructions to CONTRIBUTING (#807) Fixes #805. --- CONTRIBUTING.md | 34 +++++++++++++++++++++++++++++++++- 1 file changed, 33 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 6ec97c9ef..3f3aed396 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -21,4 +21,36 @@ All submissions, including submissions by project members, require review. We use GitHub pull requests for this purpose. Consult [GitHub Help] for more information on using pull requests. -[GitHub Help]: https://help.github.com/articles/about-pull-requests/ \ No newline at end of file +[GitHub Help]: https://help.github.com/articles/about-pull-requests/ + +## Instructions + +Fork the repo, checkout the upstream repo to your GOPATH by: + +``` +$ go get -d go.opencensus.io +``` + +Add your fork as an origin: + +``` +cd $(go env GOPATH)/src/go.opencensus.io +git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git +``` + +Run tests: + +``` +$ go test ./... +``` + +Checkout a new branch, make modifications and push the branch to your fork: + +``` +$ git checkout -b feature +# edit files +$ git commit +$ git push fork feature +``` + +Open a pull request against the main opencensus-go repo. From e262766cd0d230a1bb7c37281e345e465f19b41b Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 29 Jun 2018 07:59:06 -0700 Subject: [PATCH 021/212] Fix links to zpages in http example. (#808) --- examples/http/README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/http/README.md b/examples/http/README.md index 86517f72c..577fd3160 100644 --- a/examples/http/README.md +++ b/examples/http/README.md @@ -27,5 +27,5 @@ You will see traces and stats exported on the stdout. You can use one of the to upload collected data to the backend of your choice. You can also see the z-pages provided from the server: -* Traces: http://localhost:8081/tracez -* RPCs: http://localhost:8081/rpcz +* Traces: http://localhost:8081/debug/tracez +* RPCs: http://localhost:8081/debug/rpcz From d2694f19d26a511651863404fc63fa480551fa48 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 2 Jul 2018 13:20:09 -0400 Subject: [PATCH 022/212] Version bump to 0.15.0 (#809) --- exporterutil/version.go | 3 ++- opencensus.go | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/exporterutil/version.go b/exporterutil/version.go index d20109085..8572061fe 100644 --- a/exporterutil/version.go +++ b/exporterutil/version.go @@ -17,10 +17,11 @@ // Deprecated: Don't use this package. package exporterutil -import opencensus "go.opencensus.io" +import "go.opencensus.io" // Version is the current release version of OpenCensus in use. It is made // available for exporters to include in User-Agent-like metadata. +// Deprecated: Use opencensus.Version(). var Version = opencensus.Version() // TODO(jbd): Remove this package at the next release. diff --git a/opencensus.go b/opencensus.go index eb8e7213d..8b67867d8 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.14.0" + return "0.15.0" } From e16226179d0f7cb82bbdfed8cd824f39bb53e887 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 9 Jul 2018 10:44:02 -0700 Subject: [PATCH 023/212] Remove deprecated methods in line with deprecation policy (#812) --- plugin/ocgrpc/client_metrics.go | 9 --------- plugin/ochttp/server_test.go | 11 ++++++++--- plugin/ochttp/trace.go | 4 ++-- plugin/ochttp/trace_test.go | 2 +- trace/trace.go | 27 --------------------------- trace/trace_test.go | 2 +- 6 files changed, 12 insertions(+), 43 deletions(-) diff --git a/plugin/ocgrpc/client_metrics.go b/plugin/ocgrpc/client_metrics.go index 7d0352062..abe978b67 100644 --- a/plugin/ocgrpc/client_metrics.go +++ b/plugin/ocgrpc/client_metrics.go @@ -91,15 +91,6 @@ var ( TagKeys: []tag.Key{KeyClientMethod}, Aggregation: DefaultMillisecondsDistribution, } - - // Deprecated: This view is going to be removed, if you need it please define it - // yourself. - ClientRequestCountView = &view.View{ - Name: "Count of request messages per client RPC", - TagKeys: []tag.Key{KeyClientMethod}, - Measure: ClientRoundtripLatency, - Aggregation: view.Count(), - } ) // DefaultClientViews are the default client views provided by this package. diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 01683ec7f..ba2a2ba55 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -300,8 +300,7 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) { for _, tt := range tests { t.Run(tt.want.Message, func(t *testing.T) { - span := trace.NewSpan("testing", nil, trace.StartOptions{Sampler: trace.AlwaysSample()}) - ctx := trace.WithSpan(context.Background(), span) + ctx := context.Background() prc, pwc := io.Pipe() go func() { pwc.Write([]byte("Foo")) @@ -309,7 +308,13 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) { }() inRes := tt.res inRes.Body = prc - tr := &traceTransport{base: &testResponseTransport{res: inRes}, formatSpanName: spanNameFromURL} + tr := &traceTransport{ + base: &testResponseTransport{res: inRes}, + formatSpanName: spanNameFromURL, + startOptions: trace.StartOptions{ + Sampler: trace.AlwaysSample(), + }, + } req, err := http.NewRequest("POST", "https://example.org", bytes.NewReader([]byte("testing"))) if err != nil { t.Fatalf("NewRequest error: %v", err) diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index ea066a2c6..abb1a04ce 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -53,11 +53,11 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { name := t.formatSpanName(req) // TODO(jbd): Discuss whether we want to prefix // outgoing requests with Sent. - _, span := trace.StartSpan(req.Context(), name, + ctx, span := trace.StartSpan(req.Context(), name, trace.WithSampler(t.startOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) - req = req.WithContext(trace.WithSpan(req.Context(), span)) + req = req.WithContext(ctx) if t.format != nil { t.format.SpanContextToRequest(span.SpanContext(), req) } diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index bc9ce16f3..eb1728f5c 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -107,7 +107,7 @@ func TestTransport_RoundTrip(t *testing.T) { req, _ := http.NewRequest("GET", "http://foo.com", nil) if tt.parent != nil { - req = req.WithContext(trace.WithSpan(req.Context(), tt.parent)) + req = req.WithContext(trace.NewContext(req.Context(), tt.parent)) } rt.RoundTrip(req) diff --git a/trace/trace.go b/trace/trace.go index fc75b72f0..c1332ed37 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -98,13 +98,6 @@ func FromContext(ctx context.Context) *Span { return s } -// WithSpan returns a new context with the given Span attached. -// -// Deprecated: Use NewContext. -func WithSpan(parent context.Context, s *Span) context.Context { - return NewContext(parent, s) -} - // NewContext returns a new context with the given Span attached. func NewContext(parent context.Context, s *Span) context.Context { return context.WithValue(parent, contextKey{}, s) @@ -185,26 +178,6 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont return NewContext(ctx, span), span } -// NewSpan returns a new span. -// -// If parent is not nil, created span will be a child of the parent. -// -// Deprecated: Use StartSpan. -func NewSpan(name string, parent *Span, o StartOptions) *Span { - var parentSpanContext SpanContext - if parent != nil { - parentSpanContext = parent.SpanContext() - } - return startSpanInternal(name, parent != nil, parentSpanContext, false, o) -} - -// NewSpanWithRemoteParent returns a new span with the given parent SpanContext. -// -// Deprecated: Use StartSpanWithRemoteParent. -func NewSpanWithRemoteParent(name string, parent SpanContext, o StartOptions) *Span { - return startSpanInternal(name, true, parent, true, o) -} - func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { span := &Span{} span.spanContext = parent diff --git a/trace/trace_test.go b/trace/trace_test.go index 10309bac8..234531b97 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -43,7 +43,7 @@ func TestStrings(t *testing.T) { func TestFromContext(t *testing.T) { want := &Span{} - ctx := WithSpan(context.Background(), want) + ctx := NewContext(context.Background(), want) got := FromContext(ctx) if got != want { t.Errorf("got Span pointer %p want %p", got, want) From 58843c5f6dbecd49cbb0dae1fb97c477b6e7a544 Mon Sep 17 00:00:00 2001 From: JBD Date: Tue, 10 Jul 2018 13:08:43 -0700 Subject: [PATCH 024/212] Improve godoc for StartSpanXXX (#819) Fixes #817. --- trace/trace.go | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/trace/trace.go b/trace/trace.go index c1332ed37..3e640b745 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -147,6 +147,9 @@ func WithSampler(sampler Sampler) StartOption { // StartSpan starts a new child span of the current span in the context. If // there is no span in the context, creates a new trace and span. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext @@ -167,6 +170,9 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont // // If the incoming context contains a parent, it ignores. StartSpanWithRemoteParent is // preferred for cases where the parent is propagated via an incoming request. +// +// Returned context contains the newly created span. You can use it to +// propagate the returned span in process. func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanContext, o ...StartOption) (context.Context, *Span) { var opts StartOptions for _, op := range o { From aa5d0668a6b0a557de73a5916fc0c1932b59acfb Mon Sep 17 00:00:00 2001 From: JBD Date: Tue, 10 Jul 2018 13:08:54 -0700 Subject: [PATCH 025/212] Remove the Stackdriver example (#818) Vendor-specific exporters are not a part of the core repo anymore. The go.opencensus.io/exporter/stackdriver is deprecated. The example importing the deprecated package is confusing the users. Fixes #816. --- exporter/stackdriver/examples/stats/main.go | 82 --------------------- 1 file changed, 82 deletions(-) delete mode 100644 exporter/stackdriver/examples/stats/main.go diff --git a/exporter/stackdriver/examples/stats/main.go b/exporter/stackdriver/examples/stats/main.go deleted file mode 100644 index 1b2fb6ba4..000000000 --- a/exporter/stackdriver/examples/stats/main.go +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Command stackdriver is an example program that collects data for -// video size. Collected data is exported to -// Stackdriver Monitoring. -package main - -import ( - "context" - "fmt" - "log" - "time" - - "go.opencensus.io/exporter/stackdriver" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -// Create measures. The program will record measures for the size of -// processed videos and the nubmer of videos marked as spam. -var videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes) - -func main() { - ctx := context.Background() - - // Collected view data will be reported to Stackdriver Monitoring API - // via the Stackdriver exporter. - // - // In order to use the Stackdriver exporter, enable Stackdriver Monitoring API - // at https://console.cloud.google.com/apis/dashboard. - // - // Once API is enabled, you can use Google Application Default Credentials - // to setup the authorization. - // See https://developers.google.com/identity/protocols/application-default-credentials - // for more details. - exporter, err := stackdriver.NewExporter(stackdriver.Options{ - ProjectID: "project-id", // Google Cloud Console project ID. - }) - if err != nil { - log.Fatal(err) - } - view.RegisterExporter(exporter) - - // Set reporting period to report data at every second. - view.SetReportingPeriod(1 * time.Second) - - // Create view to see the processed video size cumulatively. - // Subscribe will allow view data to be exported. - // Once no longer need, you can unsubscribe from the view. - if err := view.Register(&view.View{ - Name: "example.com/views/video_size_cum", - Description: "processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(0, 1<<16, 1<<32), - }); err != nil { - log.Fatalf("Cannot register the view: %v", err) - } - - processVideo(ctx) - - // Wait for a duration longer than reporting duration to ensure the stats - // library reports the collected data. - fmt.Println("Wait longer than the reporting duration...") - time.Sleep(1 * time.Minute) -} - -func processVideo(ctx context.Context) { - // Do some processing and record stats. - stats.Record(ctx, videoSize.M(25648)) -} From 995d8cdbaafe7052f34a9d65ac97b0c10af9b416 Mon Sep 17 00:00:00 2001 From: JBD Date: Tue, 10 Jul 2018 16:16:13 -0700 Subject: [PATCH 026/212] Improve the README (#820) Add a proper tracing section that explains the basic trace tree, spans and propagation. Add note about the execution tracer support. Make few editorial changes. Fixes #815. --- README.md | 51 ++++++++++++++++++++++++++++++++++++++-- internal/readme/trace.go | 2 +- 2 files changed, 50 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index a0f3c560b..79840c81b 100644 --- a/README.md +++ b/README.md @@ -29,10 +29,11 @@ Currently, OpenCensus supports: * [Prometheus][exporter-prom] for stats * [OpenZipkin][exporter-zipkin] for traces -* Stackdriver [Monitoring][exporter-stackdriver] and [Trace][exporter-stackdriver] +* [Stackdriver][exporter-stackdriver] Monitoring for stats and Trace for traces * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces * [Datadog][exporter-datadog] for stats and traces + ## Overview ![OpenCensus Overview](https://i.imgur.com/cf4ElHE.jpg) @@ -129,12 +130,58 @@ exported via the registered exporters. ## Traces +A distributed trace tracks the progression of a single user request as +it is handled by the services and processes that make up an application. +Each step is called a span in the trace. Spans include metadata about the step, +including especially the time spent in the step, called the span’s latency. + +Below you see a trace and several spans underneath it. + +![Traces and spans](https://i.imgur.com/7hZwRVj.png) + +### Spans + +Span is the unit step in a trace. Each span has name, latency, status and +additional metadata. + +[embedmd]:# (internal/readme/trace.go startend) +```go +ctx, span := trace.StartSpan(ctx, "cache.Get") +defer span.End() +``` + +### Propagation + +Spans can have parents or can be root spans if they don't have any parents. +The current span is propagated in-process and across the network to allow associating +new child spans with the parent. + +In the same process, context.Context is used to propagate spans. +trace.StartSpan creates a new span as a root if the current context +doesn't contain a span. Or, it creates a child of the span that is +already in current context. The returned context can be used to keep +propagating the newly created span in the current context. + [embedmd]:# (internal/readme/trace.go startend) ```go -ctx, span := trace.StartSpan(ctx, "your choice of name") +ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() ``` +Across the network, OpenCensus provides different propagation +methods for different protocols. + +* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation) + by default but can be configured to use a custom propagation method by setting another + [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). + +## Execution Tracer + +With Go 1.11, OpenCensus Go will support integration with the Go execution tracer. +See [Debugging Latency in Go](https://medium.com/observability/debugging-latency-in-go-1-11-9f97a7910d68) +for an example of their mutual use. + ## Profiles OpenCensus tags can be applied as profiler labels diff --git a/internal/readme/trace.go b/internal/readme/trace.go index e2c8383fc..12a0293f7 100644 --- a/internal/readme/trace.go +++ b/internal/readme/trace.go @@ -24,7 +24,7 @@ func traceExamples() { ctx := context.Background() // START startend - ctx, span := trace.StartSpan(ctx, "your choice of name") + ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() // END startend } From cfb8fcdb21f431d1589591005f462b89be6f799d Mon Sep 17 00:00:00 2001 From: JBD Date: Tue, 10 Jul 2018 19:32:34 -0700 Subject: [PATCH 027/212] Explain what the trace.StartSpan snippet does (#821) --- README.md | 9 ++++++++- internal/readme/trace.go | 2 ++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 79840c81b..557c71591 100644 --- a/README.md +++ b/README.md @@ -141,13 +141,18 @@ Below you see a trace and several spans underneath it. ### Spans -Span is the unit step in a trace. Each span has name, latency, status and +Span is the unit step in a trace. Each span has a name, latency, status and additional metadata. +Below we are starting a span for a cache read and ending it +when we are done: + [embedmd]:# (internal/readme/trace.go startend) ```go ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() + +// Do work to get from cache. ``` ### Propagation @@ -166,6 +171,8 @@ propagating the newly created span in the current context. ```go ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() + +// Do work to get from cache. ``` Across the network, OpenCensus provides different propagation diff --git a/internal/readme/trace.go b/internal/readme/trace.go index 12a0293f7..43e147ef7 100644 --- a/internal/readme/trace.go +++ b/internal/readme/trace.go @@ -26,5 +26,7 @@ func traceExamples() { // START startend ctx, span := trace.StartSpan(ctx, "cache.Get") defer span.End() + + // Do work to get from cache. // END startend } From a7b334561e9c2b75823b12d4d7bc8f1e2ab2dbc4 Mon Sep 17 00:00:00 2001 From: JBD Date: Wed, 11 Jul 2018 13:20:27 -0700 Subject: [PATCH 028/212] Fix the impression that users cannot contribute exporters (#822) --- README.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 557c71591..0a8ef4222 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,10 @@ OpenCensus Go libraries require Go 1.8 or later. ## Exporters OpenCensus can export instrumentation data to various backends. -Currently, OpenCensus supports: +OpenCensus has exporter implementations for the following, users +can implement their own exporters by implementing the exporter interfaces +([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), +[trace](https://godoc.org/go.opencensus.io/trace#Exporter)): * [Prometheus][exporter-prom] for stats * [OpenZipkin][exporter-zipkin] for traces From d07d8eb4b371b7d79b49339c1438d939550533a8 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 11 Jul 2018 16:08:09 -0700 Subject: [PATCH 029/212] Add a getting started section with links to integrations (#823) --- README.md | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/README.md b/README.md index 0a8ef4222..9e6d99ab4 100644 --- a/README.md +++ b/README.md @@ -22,6 +22,18 @@ The use of vendoring or a dependency management tool is recommended. OpenCensus Go libraries require Go 1.8 or later. +## Getting Started + +The easiest way to get started using OpenCensus in your application is to use an existing +integration with your RPC framework: + +* [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) +* [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) +* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) + +If you're a framework not listed here, you could either implement your own middleware for your +framework or use [custom stats](#stats) and [spans](#spans) directly in your application. + ## Exporters OpenCensus can export instrumentation data to various backends. @@ -46,13 +58,6 @@ multiple services until there is a response. OpenCensus allows you to instrument your services and collect diagnostics data all through your services end-to-end. -Start with instrumenting HTTP and gRPC clients and servers, -then add additional custom instrumentation if needed. - -* [HTTP guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/http) -* [gRPC guide](https://github.com/census-instrumentation/opencensus-go/tree/master/examples/grpc) - - ## Tags Tags represent propagated key-value pairs. They are propagated using `context.Context` @@ -223,7 +228,7 @@ Before version 1.0.0, the following deprecation policy will be observed: No backwards-incompatible changes will be made except for the removal of symbols that have been marked as *Deprecated* for at least one minor release (e.g. 0.9.0 to 0.10.0). A release -removing the *Deprecated* functionality will be made no sooner than 28 days after the first +removing the *Deprecated* functionality will be made no sooner than 28 days after the first release in which the functionality was marked *Deprecated*. [travis-image]: https://travis-ci.org/census-instrumentation/opencensus-go.svg?branch=master From e1698d7839a5fe8c0e23dd35ca940e6014617dbc Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Mon, 16 Jul 2018 09:28:13 +0800 Subject: [PATCH 030/212] Add go-kit to list of integrated RPC frameworks (#828) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9e6d99ab4..fc934faec 100644 --- a/README.md +++ b/README.md @@ -30,13 +30,14 @@ integration with your RPC framework: * [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) * [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) * [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) +* [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) If you're a framework not listed here, you could either implement your own middleware for your framework or use [custom stats](#stats) and [spans](#spans) directly in your application. ## Exporters -OpenCensus can export instrumentation data to various backends. +OpenCensus can export instrumentation data to various backends. OpenCensus has exporter implementations for the following, users can implement their own exporters by implementing the exporter interfaces ([stats](https://godoc.org/go.opencensus.io/stats/view#Exporter), From 824f6a62b825d737557c28c68ab9a05310341cd2 Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Tue, 17 Jul 2018 12:03:10 -0700 Subject: [PATCH 031/212] README: add links to integrations (#831) Updates #825. Adds links for instrumented: * Groupcache * Redis drivers * Memcache driver * Caddy webserver * MongoDB driver --- README.md | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/README.md b/README.md index fc934faec..bc50ee44c 100644 --- a/README.md +++ b/README.md @@ -31,6 +31,12 @@ integration with your RPC framework: * [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) * [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) * [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) +* [Groupcache](https://godoc.org/github.com/orijtech/groupcache) +* [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) +* [MongoDB](https://godoc.org/github.com/orijtech/mongo-go-driver) +* [Redis gomodule/redigo](https://godoc.org/github.com/orijtech/redigo) +* [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) +* [Memcache](https://godoc.org/github.com/orijtech/gomemcache) If you're a framework not listed here, you could either implement your own middleware for your framework or use [custom stats](#stats) and [spans](#spans) directly in your application. From e1f41b1f502f5ce2c47a213fcee0721947995372 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 19 Jul 2018 10:41:14 -0700 Subject: [PATCH 032/212] Update issue templates (#835) --- .github/ISSUE_TEMPLATE/feature_request.md | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/feature_request.md diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 000000000..fc4444384 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,20 @@ +--- +name: Feature request +about: Suggest an idea for this project + +--- + +**NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in other the OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. + + +**Is your feature request related to a problem? Please describe.** +A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] + +**Describe the solution you'd like** +A clear and concise description of what you want to happen. + +**Describe alternatives you've considered** +A clear and concise description of any alternative solutions or features you've considered. + +**Additional context** +Add any other context or screenshots about the feature request here. From 2b984bd18b2e315de639909f8a9f35140c0e118b Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 21 Jul 2018 03:20:20 +0800 Subject: [PATCH 033/212] optimizes Span.End() to return faster if possible (#832) Span.End() optimizations: * only create spanData if needed for spanStore or export (sampled span with at least one registered exporter) * lock free exporter iteration * made gRPC test traceExporter safe for concurrent use --- plugin/ocgrpc/grpc_test.go | 4 ++++ trace/export.go | 34 ++++++++++++++++++++++++---------- trace/trace.go | 24 ++++++++++++------------ 3 files changed, 40 insertions(+), 22 deletions(-) diff --git a/plugin/ocgrpc/grpc_test.go b/plugin/ocgrpc/grpc_test.go index f0cba59ad..73b0f5c13 100644 --- a/plugin/ocgrpc/grpc_test.go +++ b/plugin/ocgrpc/grpc_test.go @@ -15,6 +15,7 @@ package ocgrpc import ( + "sync" "testing" "time" @@ -127,9 +128,12 @@ func TestServerHandler(t *testing.T) { } type traceExporter struct { + mu sync.Mutex buffer []*trace.SpanData } func (e *traceExporter) ExportSpan(sd *trace.SpanData) { + e.mu.Lock() e.buffer = append(e.buffer, sd) + e.mu.Unlock() } diff --git a/trace/export.go b/trace/export.go index c522550fa..77a8c7357 100644 --- a/trace/export.go +++ b/trace/export.go @@ -16,6 +16,7 @@ package trace import ( "sync" + "sync/atomic" "time" ) @@ -30,9 +31,11 @@ type Exporter interface { ExportSpan(s *SpanData) } +type exportersMap map[Exporter]struct{} + var ( - exportersMu sync.Mutex - exporters map[Exporter]struct{} + exporterMu sync.Mutex + exporters atomic.Value ) // RegisterExporter adds to the list of Exporters that will receive sampled @@ -40,20 +43,31 @@ var ( // // Binaries can register exporters, libraries shouldn't register exporters. func RegisterExporter(e Exporter) { - exportersMu.Lock() - if exporters == nil { - exporters = make(map[Exporter]struct{}) + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } } - exporters[e] = struct{}{} - exportersMu.Unlock() + new[e] = struct{}{} + exporters.Store(new) + exporterMu.Unlock() } // UnregisterExporter removes from the list of Exporters the Exporter that was // registered with the given name. func UnregisterExporter(e Exporter) { - exportersMu.Lock() - delete(exporters, e) - exportersMu.Unlock() + exporterMu.Lock() + new := make(exportersMap) + if old, ok := exporters.Load().(exportersMap); ok { + for k, v := range old { + new[k] = v + } + } + delete(new, e) + exporters.Store(new) + exporterMu.Unlock() } // SpanData contains all the information collected by a Span. diff --git a/trace/trace.go b/trace/trace.go index 3e640b745..38d206de2 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -248,19 +248,19 @@ func (s *Span) End() { if s.executionTracerTaskEnd != nil { s.executionTracerTaskEnd() } - // TODO: optimize to avoid this call if sd won't be used. - sd := s.makeSpanData() - sd.EndTime = internal.MonotonicEndTime(sd.StartTime) - if s.spanStore != nil { - s.spanStore.finished(s, sd) - } - if s.spanContext.IsSampled() { - // TODO: consider holding exportersMu for less time. - exportersMu.Lock() - for e := range exporters { - e.ExportSpan(sd) + exp, _ := exporters.Load().(exportersMap) + mustExport := s.spanContext.IsSampled() && len(exp) > 0 + if s.spanStore != nil || mustExport { + sd := s.makeSpanData() + sd.EndTime = internal.MonotonicEndTime(sd.StartTime) + if s.spanStore != nil { + s.spanStore.finished(s, sd) + } + if mustExport { + for e := range exp { + e.ExportSpan(sd) + } } - exportersMu.Unlock() } }) } From 648d530567c3e0c55fca6dda7309eda3600f7ab5 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Sat, 21 Jul 2018 03:22:23 +0800 Subject: [PATCH 034/212] made trackingresponsewriter a properly transparant wrapper (#830) --- plugin/ochttp/server.go | 267 ++++++++++++++++++++++++++++++----- plugin/ochttp/server_test.go | 68 +++++---- 2 files changed, 261 insertions(+), 74 deletions(-) diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index fe2a6eb58..5aa932c8a 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -15,10 +15,8 @@ package ochttp import ( - "bufio" "context" - "errors" - "net" + "io" "net/http" "strconv" "sync" @@ -139,7 +137,7 @@ func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.Respo track.reqSize = r.ContentLength } stats.Record(ctx, ServerRequestCount.M(1)) - return track, track.end + return track.wrappedResponseWriter(), track.end } type trackingResponseWriter struct { @@ -153,39 +151,9 @@ type trackingResponseWriter struct { writer http.ResponseWriter } -// Compile time assertions for widely used net/http interfaces -var _ http.CloseNotifier = (*trackingResponseWriter)(nil) -var _ http.Flusher = (*trackingResponseWriter)(nil) -var _ http.Hijacker = (*trackingResponseWriter)(nil) -var _ http.Pusher = (*trackingResponseWriter)(nil) +// Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var errHijackerUnimplemented = errors.New("ResponseWriter does not implement http.Hijacker") - -func (t *trackingResponseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { - hj, ok := t.writer.(http.Hijacker) - if !ok { - return nil, nil, errHijackerUnimplemented - } - return hj.Hijack() -} - -func (t *trackingResponseWriter) CloseNotify() <-chan bool { - cn, ok := t.writer.(http.CloseNotifier) - if !ok { - return nil - } - return cn.CloseNotify() -} - -func (t *trackingResponseWriter) Push(target string, opts *http.PushOptions) error { - pusher, ok := t.writer.(http.Pusher) - if !ok { - return http.ErrNotSupported - } - return pusher.Push(target, opts) -} - func (t *trackingResponseWriter) end() { t.endOnce.Do(func() { if t.statusCode == 0 { @@ -223,8 +191,231 @@ func (t *trackingResponseWriter) WriteHeader(statusCode int) { t.statusLine = http.StatusText(t.statusCode) } -func (t *trackingResponseWriter) Flush() { - if flusher, ok := t.writer.(http.Flusher); ok { - flusher.Flush() +// wrappedResponseWriter returns a wrapped version of the original +// ResponseWriter and only implements the same combination of additional +// interfaces as the original. +// This implementation is based on https://github.com/felixge/httpsnoop. +func (t *trackingResponseWriter) wrappedResponseWriter() http.ResponseWriter { + var ( + hj, i0 = t.writer.(http.Hijacker) + cn, i1 = t.writer.(http.CloseNotifier) + pu, i2 = t.writer.(http.Pusher) + fl, i3 = t.writer.(http.Flusher) + rf, i4 = t.writer.(io.ReaderFrom) + ) + + switch { + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + }{t} + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + io.ReaderFrom + }{t, rf} + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Flusher + }{t, fl} + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{t, fl, rf} + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + }{t, pu} + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + io.ReaderFrom + }{t, pu, rf} + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + }{t, pu, fl} + case !i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Pusher + http.Flusher + io.ReaderFrom + }{t, pu, fl, rf} + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + }{t, cn} + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{t, cn, rf} + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + }{t, cn, fl} + case !i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, cn, fl, rf} + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{t, cn, pu} + case !i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, cn, pu, rf} + case !i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + }{t, cn, pu, fl} + case !i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, cn, pu, fl, rf} + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + }{t, hj} + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{t, hj, rf} + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + }{t, hj, fl} + case i0 && !i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Flusher + io.ReaderFrom + }{t, hj, fl, rf} + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + }{t, hj, pu} + case i0 && !i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + io.ReaderFrom + }{t, hj, pu, rf} + case i0 && !i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + }{t, hj, pu, fl} + case i0 && !i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, pu, fl, rf} + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + }{t, hj, cn} + case i0 && i1 && !i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + io.ReaderFrom + }{t, hj, cn, rf} + case i0 && i1 && !i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + }{t, hj, cn, fl} + case i0 && i1 && !i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Flusher + io.ReaderFrom + }{t, hj, cn, fl, rf} + case i0 && i1 && i2 && !i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + }{t, hj, cn, pu} + case i0 && i1 && i2 && !i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + io.ReaderFrom + }{t, hj, cn, pu, rf} + case i0 && i1 && i2 && i3 && !i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + }{t, hj, cn, pu, fl} + case i0 && i1 && i2 && i3 && i4: + return struct { + http.ResponseWriter + http.Hijacker + http.CloseNotifier + http.Pusher + http.Flusher + io.ReaderFrom + }{t, hj, cn, pu, fl, rf} + default: + return struct { + http.ResponseWriter + }{t} } } diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index ba2a2ba55..fc6317169 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -139,32 +139,20 @@ func (trw *testResponseWriterHijacker) Hijack() (net.Conn, *bufio.ReadWriter, er func TestUnitTestHandlerProxiesHijack(t *testing.T) { tests := []struct { - w http.ResponseWriter - wantErr string + w http.ResponseWriter + hasHijack bool }{ - {httptest.NewRecorder(), "ResponseWriter does not implement http.Hijacker"}, - {nil, "ResponseWriter does not implement http.Hijacker"}, - {new(testResponseWriterHijacker), ""}, + {httptest.NewRecorder(), false}, + {nil, false}, + {new(testResponseWriterHijacker), true}, } for i, tt := range tests { tw := &trackingResponseWriter{writer: tt.w} - conn, buf, err := tw.Hijack() - if tt.wantErr != "" { - if err == nil || !strings.Contains(err.Error(), tt.wantErr) { - t.Errorf("#%d got error (%v) want error substring (%q)", i, err, tt.wantErr) - } - if conn != nil { - t.Errorf("#%d inconsistent state got non-nil conn (%v)", i, conn) - } - if buf != nil { - t.Errorf("#%d inconsistent state got non-nil buf (%v)", i, buf) - } - continue - } - - if err != nil { - t.Errorf("#%d got unexpected error %v", i, err) + w := tw.wrappedResponseWriter() + _, ttHijacker := w.(http.Hijacker) + if want, have := tt.hasHijack, ttHijacker; want != have { + t.Errorf("#%d Hijack got %t, want %t", i, have, want) } } } @@ -234,20 +222,28 @@ func TestHandlerProxiesHijack_HTTP1(t *testing.T) { func TestHandlerProxiesHijack_HTTP2(t *testing.T) { cst := httptest.NewUnstartedServer(&Handler{ Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - conn, _, err := w.(http.Hijacker).Hijack() - if conn != nil { - data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto) - fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data) - conn.Close() - return - } + if _, ok := w.(http.Hijacker); ok { + conn, _, err := w.(http.Hijacker).Hijack() + if conn != nil { + data := fmt.Sprintf("Surprisingly got the Hijacker() Proto: %s", r.Proto) + fmt.Fprintf(conn, "%s 200\nContent-Length:%d\r\n\r\n%s", r.Proto, len(data), data) + conn.Close() + return + } - switch { - case err == nil: - fmt.Fprintf(w, "Unexpectedly did not encounter an error!") - default: - fmt.Fprintf(w, "Unexpected error: %v", err) - case strings.Contains(err.(error).Error(), "Hijack"): + switch { + case err == nil: + fmt.Fprintf(w, "Unexpectedly did not encounter an error!") + default: + fmt.Fprintf(w, "Unexpected error: %v", err) + case strings.Contains(err.(error).Error(), "Hijack"): + // Confirmed HTTP/2.0, let's stream to it + for i := 0; i < 5; i++ { + fmt.Fprintf(w, "%d\n", i) + w.(http.Flusher).Flush() + } + } + } else { // Confirmed HTTP/2.0, let's stream to it for i := 0; i < 5; i++ { fmt.Fprintf(w, "%d\n", i) @@ -386,7 +382,7 @@ func TestHandlerImplementsHTTPPusher(t *testing.T) { }{ { rt: h1Transport(), - wantBody: "true", + wantBody: "false", }, { rt: h2Transport(), @@ -394,7 +390,7 @@ func TestHandlerImplementsHTTPPusher(t *testing.T) { }, { rt: &Transport{Base: h1Transport()}, - wantBody: "true", + wantBody: "false", }, { rt: &Transport{Base: h2Transport()}, From 2e6ddffdf9365d86cc29c9ab45ca14123b41543c Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 23 Jul 2018 10:06:59 -0700 Subject: [PATCH 035/212] Add Bug report template (#839) --- .github/ISSUE_TEMPLATE/bug_report.md | 21 +++++++++++++++++++++ 1 file changed, 21 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 000000000..19947e34c --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,21 @@ +--- +name: Bug report +about: Create a report to help us improve + +--- + +**Describe the bug** +A clear and concise description of what the bug is. + +**To Reproduce** +Steps to reproduce the behavior: +1. Go to '...' +2. Click on '....' +3. Scroll down to '....' +4. See error + +**Expected behavior** +A clear and concise description of what you expected to happen. + +**Additional context** +Add any other context about the problem here. From 6892f2d0b466dd54b1a46b88b6b37e292ee355f0 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 23 Jul 2018 11:33:23 -0700 Subject: [PATCH 036/212] Fix race to update config in trace.ApplyConfig (#840) --- trace/config.go | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/trace/config.go b/trace/config.go index d5473a798..e464671c7 100644 --- a/trace/config.go +++ b/trace/config.go @@ -14,7 +14,10 @@ package trace -import "go.opencensus.io/trace/internal" +import ( + "go.opencensus.io/trace/internal" + "sync" +) // Config represents the global tracing configuration. type Config struct { @@ -25,10 +28,14 @@ type Config struct { IDGenerator internal.IDGenerator } +var configWriteMu sync.Mutex + // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. func ApplyConfig(cfg Config) { + configWriteMu.Lock() + defer configWriteMu.Unlock() c := *config.Load().(*Config) if cfg.DefaultSampler != nil { c.DefaultSampler = cfg.DefaultSampler From 9260bbf87e9331af7c9a6ceb7d43dde112b93c39 Mon Sep 17 00:00:00 2001 From: JBD Date: Tue, 24 Jul 2018 08:42:34 -0700 Subject: [PATCH 037/212] Don't trace /healthz (#844) Kubernetes users end up having tons of /healthz traces which is costly and no-value. Disable known health endpoint tracing until we have a better solution. --- plugin/ochttp/client.go | 3 +++ plugin/ochttp/server.go | 3 +++ plugin/ochttp/server_test.go | 41 ++++++++++++++++++++++++++++++++++++ plugin/ochttp/trace.go | 12 +++++++++++ 4 files changed, 59 insertions(+) diff --git a/plugin/ochttp/client.go b/plugin/ochttp/client.go index 180792106..55c2567e7 100644 --- a/plugin/ochttp/client.go +++ b/plugin/ochttp/client.go @@ -57,6 +57,9 @@ type Transport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats and traces for the request. func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { rt := t.base() + if isHealthEndpoint(req.URL.Path) { + return rt.RoundTrip(req) + } // TODO: remove excessive nesting of http.RoundTrippers here. format := t.Propagation if format == nil { diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 5aa932c8a..72aa8c2d7 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -82,6 +82,9 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { + if isHealthEndpoint(r.URL.Path) { + return r, func() {} + } var name string if h.FormatSpanName == nil { name = spanNameFromURL(r) diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index fc6317169..29ba795f4 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -551,3 +551,44 @@ func TestHandlerImplementsHTTPCloseNotify(t *testing.T) { t.Errorf("HTTP2Log got\n\t%q\nwant\n\t%q", g, w) } } + +func TestIgnoreHealthz(t *testing.T) { + var spans int + + ts := httptest.NewServer(&Handler{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := trace.FromContext(r.Context()) + if span != nil { + spans++ + } + fmt.Fprint(w, "ok") + }), + StartOptions: trace.StartOptions{ + Sampler: trace.AlwaysSample(), + }, + }) + defer ts.Close() + + client := &http.Client{} + + for _, path := range []string{"/healthz", "/_ah/health"} { + resp, err := client.Get(ts.URL + path) + if err != nil { + t.Fatalf("Cannot GET %q: %v", path, err) + } + + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("Cannot read body for %q: %v", path, err) + } + + if got, want := string(b), "ok"; got != want { + t.Fatalf("Body for %q = %q; want %q", path, got, want) + } + resp.Body.Close() + } + + if spans > 0 { + t.Errorf("Got %v spans; want no spans", spans) + } +} diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index abb1a04ce..81abc3c2c 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -197,3 +197,15 @@ var codeToStr = map[int32]string{ trace.StatusCodeDataLoss: `"DATA_LOSS"`, trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, } + +func isHealthEndpoint(path string) bool { + // Health checking is pretty frequent and + // traces collected for health endpoints + // can be extremely noisy and expensive. + // Disable canonical health checking endpoints + // like /healthz and /_ah/health for now. + if path == "/healthz" || path == "/_ah/health" { + return true + } + return false +} From 78fb78ae664f0f83af1fb8acf8817a77dcd8f4ef Mon Sep 17 00:00:00 2001 From: Anton Tolchanov <1687799+knyar@users.noreply.github.com> Date: Fri, 27 Jul 2018 18:17:14 +0100 Subject: [PATCH 038/212] Report data for a given view when it is unregistered. (#846) This might help with #773. --- stats/view/worker.go | 42 +++++++++++++++++++---------------- stats/view/worker_commands.go | 3 +++ stats/view/worker_test.go | 39 +++++++++++++++++++++++++++++++- 3 files changed, 64 insertions(+), 20 deletions(-) diff --git a/stats/view/worker.go b/stats/view/worker.go index ce2f86ab6..fef7bf513 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -194,26 +194,30 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } +func (w *worker) reportView(v *viewInternal, now time.Time) { + if !v.isSubscribed() { + return + } + rows := v.collectedRows() + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + viewData := &Data{ + View: v.view, + Start: w.startTimes[v], + End: time.Now(), + Rows: rows, + } + exportersMu.Lock() + for e := range exporters { + e.ExportView(viewData) + } + exportersMu.Unlock() +} + func (w *worker) reportUsage(now time.Time) { for _, v := range w.views { - if !v.isSubscribed() { - continue - } - rows := v.collectedRows() - _, ok := w.startTimes[v] - if !ok { - w.startTimes[v] = now - } - viewData := &Data{ - View: v.view, - Start: w.startTimes[v], - End: time.Now(), - Rows: rows, - } - exportersMu.Lock() - for e := range exporters { - e.ExportView(viewData) - } - exportersMu.Unlock() + w.reportView(v, now) } } diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index d0dd00ce7..06c3c5464 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -88,6 +88,9 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { continue } + // Report pending data for this view before removing it. + w.reportView(vi, time.Now()) + vi.unsubscribe() if !vi.isSubscribed() { // this was the last subscription and view is not collecting anymore. diff --git a/stats/view/worker_test.go b/stats/view/worker_test.go index de9e6180b..d43014648 100644 --- a/stats/view/worker_test.go +++ b/stats/view/worker_test.go @@ -362,9 +362,45 @@ func TestWorkerStarttime(t *testing.T) { e.Unlock() } +func TestUnregisterReportsUsage(t *testing.T) { + restart() + ctx := context.Background() + + m1 := stats.Int64("measure", "desc", "unit") + view1 := &View{Name: "count", Measure: m1, Aggregation: Count()} + m2 := stats.Int64("measure2", "desc", "unit") + view2 := &View{Name: "count2", Measure: m2, Aggregation: Count()} + + SetReportingPeriod(time.Hour) + + if err := Register(view1, view2); err != nil { + t.Fatalf("cannot register: %v", err) + } + + e := &countExporter{} + RegisterExporter(e) + + stats.Record(ctx, m1.M(1)) + stats.Record(ctx, m2.M(1)) + stats.Record(ctx, m2.M(1)) + + Unregister(view2) + + // Unregister should only flush view2, so expect the count of 2. + want := int64(2) + + e.Lock() + got := e.totalCount + e.Unlock() + if got != want { + t.Errorf("got count data = %v; want %v", got, want) + } +} + type countExporter struct { sync.Mutex - count int64 + count int64 + totalCount int64 } func (e *countExporter) ExportView(vd *Data) { @@ -376,6 +412,7 @@ func (e *countExporter) ExportView(vd *Data) { e.Lock() defer e.Unlock() e.count = d.Value + e.totalCount += d.Value } type vdExporter struct { From 72a330c8c96dbe960ec88dbbdae20b52f8118510 Mon Sep 17 00:00:00 2001 From: Yuichi Saito Date: Mon, 30 Jul 2018 13:49:42 +0900 Subject: [PATCH 039/212] Follow modification to TraceContext HTTP propagation specs (#847) --- plugin/ochttp/propagation/tracecontext/propagation.go | 2 +- plugin/ochttp/propagation/tracecontext/propagation_test.go | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/plugin/ochttp/propagation/tracecontext/propagation.go b/plugin/ochttp/propagation/tracecontext/propagation.go index aea546c64..e4f44eeab 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation.go +++ b/plugin/ochttp/propagation/tracecontext/propagation.go @@ -29,7 +29,7 @@ import ( const ( supportedVersion = 0 maxVersion = 254 - header = "Trace-Parent" + header = "traceparent" ) var _ propagation.HTTPFormat = (*HTTPFormat)(nil) diff --git a/plugin/ochttp/propagation/tracecontext/propagation_test.go b/plugin/ochttp/propagation/tracecontext/propagation_test.go index c92e4e6dc..a9f02762f 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation_test.go +++ b/plugin/ochttp/propagation/tracecontext/propagation_test.go @@ -73,7 +73,7 @@ func TestHTTPFormat_FromRequest(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) - req.Header.Set("Trace-Parent", tt.header) + req.Header.Set("traceparent", tt.header) gotSc, gotOk := f.SpanContextFromRequest(req) if !reflect.DeepEqual(gotSc, tt.wantSc) { @@ -106,7 +106,7 @@ func TestHTTPFormat_ToRequest(t *testing.T) { req, _ := http.NewRequest("GET", "http://example.com", nil) f.SpanContextToRequest(tt.sc, req) - h := req.Header.Get("Trace-Parent") + h := req.Header.Get("traceparent") if got, want := h, tt.wantHeader; got != want { t.Errorf("HTTPFormat.ToRequest() header = %v, want %v", got, want) } From 5f0e92a92d90ebeaf2662ca76981a5a21243fb91 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Thu, 2 Aug 2018 15:58:41 +0800 Subject: [PATCH 040/212] lockless version of defaultIDGenerator.NewSpanID (#851) * lockless version of defaultIDGenerator.NewSpanID --- trace/trace.go | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/trace/trace.go b/trace/trace.go index 38d206de2..e0d87400b 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -477,15 +477,11 @@ type defaultIDGenerator struct { } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. -// mu should be held while this function is called. func (gen *defaultIDGenerator) NewSpanID() [8]byte { - gen.Lock() - id := gen.nextSpanID - gen.nextSpanID += gen.spanIDInc - if gen.nextSpanID == 0 { - gen.nextSpanID += gen.spanIDInc + var id uint64 + for id == 0 { + id = atomic.AddUint64(&gen.nextSpanID, gen.spanIDInc) } - gen.Unlock() var sid [8]byte binary.LittleEndian.PutUint64(sid[:], id) return sid From df6bd8075012c9e7088c96bc588eac92ab36d7d1 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 2 Aug 2018 10:10:50 -0700 Subject: [PATCH 041/212] Remove Stackdriver exporter (#790) The Stackdriver exporter has been moved to a different repo: https://github.com/census-ecosystem/opencensus-go-exporter-stackdriver New import path: contrib.go.opencensus.io/exporter/stackdriver --- exporter/stackdriver/propagation/http.go | 94 -- exporter/stackdriver/propagation/http_test.go | 70 -- exporter/stackdriver/stackdriver.go | 148 --- exporter/stackdriver/stackdriver_test.go | 125 --- exporter/stackdriver/stats.go | 439 --------- exporter/stackdriver/stats_test.go | 866 ------------------ exporter/stackdriver/trace.go | 172 ---- exporter/stackdriver/trace_proto.go | 255 ------ exporter/stackdriver/trace_proto_test.go | 389 -------- exporter/stackdriver/trace_test.go | 62 -- 10 files changed, 2620 deletions(-) delete mode 100644 exporter/stackdriver/propagation/http.go delete mode 100644 exporter/stackdriver/propagation/http_test.go delete mode 100644 exporter/stackdriver/stackdriver.go delete mode 100644 exporter/stackdriver/stackdriver_test.go delete mode 100644 exporter/stackdriver/stats.go delete mode 100644 exporter/stackdriver/stats_test.go delete mode 100644 exporter/stackdriver/trace.go delete mode 100644 exporter/stackdriver/trace_proto.go delete mode 100644 exporter/stackdriver/trace_proto_test.go delete mode 100644 exporter/stackdriver/trace_test.go diff --git a/exporter/stackdriver/propagation/http.go b/exporter/stackdriver/propagation/http.go deleted file mode 100644 index 7cc02a110..000000000 --- a/exporter/stackdriver/propagation/http.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package propagation implement X-Cloud-Trace-Context header propagation used -// by Google Cloud products. -package propagation // import "go.opencensus.io/exporter/stackdriver/propagation" - -import ( - "encoding/binary" - "encoding/hex" - "fmt" - "net/http" - "strconv" - "strings" - - "go.opencensus.io/trace" - "go.opencensus.io/trace/propagation" -) - -const ( - httpHeaderMaxSize = 200 - httpHeader = `X-Cloud-Trace-Context` -) - -var _ propagation.HTTPFormat = (*HTTPFormat)(nil) - -// HTTPFormat implements propagation.HTTPFormat to propagate -// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. -type HTTPFormat struct{} - -// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. -func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(httpHeader) - // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. - // Return if the header is empty or missing, or if the header is unreasonably - // large, to avoid making unnecessary copies of a large string. - if h == "" || len(h) > httpHeaderMaxSize { - return trace.SpanContext{}, false - } - - // Parse the trace id field. - slash := strings.Index(h, `/`) - if slash == -1 { - return trace.SpanContext{}, false - } - tid, h := h[:slash], h[slash+1:] - - buf, err := hex.DecodeString(tid) - if err != nil { - return trace.SpanContext{}, false - } - copy(sc.TraceID[:], buf) - - // Parse the span id field. - spanstr := h - semicolon := strings.Index(h, `;`) - if semicolon != -1 { - spanstr, h = h[:semicolon], h[semicolon+1:] - } - sid, err := strconv.ParseUint(spanstr, 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - binary.BigEndian.PutUint64(sc.SpanID[:], sid) - - // Parse the options field, options field is optional. - if !strings.HasPrefix(h, "o=") { - return sc, true - } - o, err := strconv.ParseUint(h[2:], 10, 64) - if err != nil { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(o) - return sc, true -} - -// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. -func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { - sid := binary.BigEndian.Uint64(sc.SpanID[:]) - header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) - req.Header.Set(httpHeader, header) -} diff --git a/exporter/stackdriver/propagation/http_test.go b/exporter/stackdriver/propagation/http_test.go deleted file mode 100644 index 9ad93b714..000000000 --- a/exporter/stackdriver/propagation/http_test.go +++ /dev/null @@ -1,70 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package propagation - -import ( - "net/http" - "reflect" - "testing" - - "go.opencensus.io/trace" -) - -func TestHTTPFormat(t *testing.T) { - format := &HTTPFormat{} - traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} - spanID1 := [8]byte{255, 0, 0, 0, 0, 0, 0, 123} - spanID2 := [8]byte{0, 0, 0, 0, 0, 0, 0, 123} - tests := []struct { - incoming string - wantSpanContext trace.SpanContext - }{ - { - incoming: "105445aa7843bc8bf206b12000100000/18374686479671623803;o=1", - wantSpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID1, - TraceOptions: 1, - }, - }, - { - incoming: "105445aa7843bc8bf206b12000100000/123;o=0", - wantSpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID2, - TraceOptions: 0, - }, - }, - } - for _, tt := range tests { - t.Run(tt.incoming, func(t *testing.T) { - req, _ := http.NewRequest("GET", "http://example.com", nil) - req.Header.Add(httpHeader, tt.incoming) - sc, ok := format.SpanContextFromRequest(req) - if !ok { - t.Errorf("exporter.SpanContextFromRequest() = false; want true") - } - if got, want := sc, tt.wantSpanContext; !reflect.DeepEqual(got, want) { - t.Errorf("exporter.SpanContextFromRequest() returned span context %v; want %v", got, want) - } - - req, _ = http.NewRequest("GET", "http://example.com", nil) - format.SpanContextToRequest(sc, req) - if got, want := req.Header.Get(httpHeader), tt.incoming; got != want { - t.Errorf("exporter.SpanContextToRequest() returned header %q; want %q", got, want) - } - }) - } -} diff --git a/exporter/stackdriver/stackdriver.go b/exporter/stackdriver/stackdriver.go deleted file mode 100644 index b4f152149..000000000 --- a/exporter/stackdriver/stackdriver.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stackdriver has moved. -// -// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead. -package stackdriver // import "go.opencensus.io/exporter/stackdriver" - -import ( - "context" - "errors" - "fmt" - "log" - "time" - - traceapi "cloud.google.com/go/trace/apiv2" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" -) - -// Options contains options for configuring the exporter. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -type Options struct { - // ProjectID is the identifier of the Stackdriver - // project the user is uploading the stats data to. - // If not set, this will default to your "Application Default Credentials". - // For details see: https://developers.google.com/accounts/docs/application-default-credentials - ProjectID string - - // OnError is the hook to be called when there is - // an error uploading the stats or tracing data. - // If no custom hook is set, errors are logged. - // Optional. - OnError func(err error) - - // MonitoringClientOptions are additional options to be passed - // to the underlying Stackdriver Monitoring API client. - // Optional. - MonitoringClientOptions []option.ClientOption - - // TraceClientOptions are additional options to be passed - // to the underlying Stackdriver Trace API client. - // Optional. - TraceClientOptions []option.ClientOption - - // BundleDelayThreshold determines the max amount of time - // the exporter can wait before uploading view data to - // the backend. - // Optional. - BundleDelayThreshold time.Duration - - // BundleCountThreshold determines how many view data events - // can be buffered before batch uploading them to the backend. - // Optional. - BundleCountThreshold int - - // Resource is an optional field that represents the Stackdriver - // MonitoredResource, a resource that can be used for monitoring. - // If no custom ResourceDescriptor is set, a default MonitoredResource - // with type global and no resource labels will be used. - // Optional. - Resource *monitoredrespb.MonitoredResource - - // MetricPrefix overrides the OpenCensus prefix of a stackdriver metric. - // Optional. - MetricPrefix string -} - -// Exporter is a stats.Exporter and trace.Exporter -// implementation that uploads data to Stackdriver. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -type Exporter struct { - traceExporter *traceExporter - statsExporter *statsExporter -} - -// NewExporter creates a new Exporter that implements both stats.Exporter and -// trace.Exporter. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -func NewExporter(o Options) (*Exporter, error) { - if o.ProjectID == "" { - creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...) - if err != nil { - return nil, fmt.Errorf("stackdriver: %v", err) - } - if creds.ProjectID == "" { - return nil, errors.New("stackdriver: no project found with application default credentials") - } - o.ProjectID = creds.ProjectID - } - se, err := newStatsExporter(o) - if err != nil { - return nil, err - } - te, err := newTraceExporter(o) - if err != nil { - return nil, err - } - return &Exporter{ - statsExporter: se, - traceExporter: te, - }, nil -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -func (e *Exporter) ExportView(vd *view.Data) { - e.statsExporter.ExportView(vd) -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *Exporter) ExportSpan(sd *trace.SpanData) { - e.traceExporter.ExportSpan(sd) -} - -// Flush waits for exported data to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose recent stats or spans. -func (e *Exporter) Flush() { - e.statsExporter.Flush() - e.traceExporter.Flush() -} - -func (o Options) handleError(err error) { - if o.OnError != nil { - o.OnError(err) - return - } - log.Printf("Error exporting to Stackdriver: %v", err) -} diff --git a/exporter/stackdriver/stackdriver_test.go b/exporter/stackdriver/stackdriver_test.go deleted file mode 100644 index 55cc81e93..000000000 --- a/exporter/stackdriver/stackdriver_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "go.opencensus.io/internal/testpb" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "golang.org/x/net/context/ctxhttp" -) - -func TestExport(t *testing.T) { - projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") - if !ok { - t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") - } - - var exportErrors []error - - exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) { - exportErrors = append(exportErrors, err) - }}) - if err != nil { - t.Fatal(err) - } - defer exporter.Flush() - - trace.RegisterExporter(exporter) - defer trace.UnregisterExporter(exporter) - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - _, span := trace.StartSpan(context.Background(), "custom-span") - time.Sleep(10 * time.Millisecond) - span.End() - - // Test HTTP spans - - handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - _, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork") - spanContext := backgroundSpan.SpanContext() - time.Sleep(10 * time.Millisecond) - backgroundSpan.End() - - _, span := trace.StartSpan(req.Context(), "Sleep") - span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID}) - time.Sleep(150 * time.Millisecond) // do work - span.End() - rw.Write([]byte("Hello, world!")) - }) - server := httptest.NewServer(&ochttp.Handler{Handler: handler}) - defer server.Close() - - ctx := context.Background() - client := &http.Client{ - Transport: &ochttp.Transport{}, - } - resp, err := ctxhttp.Get(ctx, client, server.URL+"/test/123?abc=xyz") - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if want, got := "Hello, world!", string(body); want != got { - t.Fatalf("resp.Body = %q; want %q", want, got) - } - - // Flush twice to expose issue of exporter creating traces internally (#557) - exporter.Flush() - exporter.Flush() - - for _, err := range exportErrors { - t.Error(err) - } -} - -func TestGRPC(t *testing.T) { - projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") - if !ok { - t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") - } - - exporter, err := NewExporter(Options{ProjectID: projectID}) - if err != nil { - t.Fatal(err) - } - defer exporter.Flush() - - trace.RegisterExporter(exporter) - defer trace.UnregisterExporter(exporter) - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - client, done := testpb.NewTestClient(t) - defer done() - - client.Single(context.Background(), &testpb.FooRequest{SleepNanos: int64(42 * time.Millisecond)}) -} diff --git a/exporter/stackdriver/stats.go b/exporter/stackdriver/stats.go deleted file mode 100644 index 93635dac8..000000000 --- a/exporter/stackdriver/stats.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "errors" - "fmt" - "os" - "path" - "strconv" - "strings" - "sync" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/ptypes/timestamp" - "google.golang.org/api/option" - "google.golang.org/api/support/bundler" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - labelpb "google.golang.org/genproto/googleapis/api/label" - "google.golang.org/genproto/googleapis/api/metric" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" -) - -const maxTimeSeriesPerUpload = 200 -const opencensusTaskKey = "opencensus_task" -const opencensusTaskDescription = "Opencensus task identifier" -const defaultDisplayNamePrefix = "OpenCensus" - -// statsExporter exports stats to the Stackdriver Monitoring. -type statsExporter struct { - bundler *bundler.Bundler - o Options - - createdViewsMu sync.Mutex - createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely - - c *monitoring.MetricClient - taskValue string -} - -// Enforces the singleton on NewExporter per projectID per process -// lest there will be races with Stackdriver. -var ( - seenProjectsMu sync.Mutex - seenProjects = make(map[string]bool) -) - -var ( - errBlankProjectID = errors.New("expecting a non-blank ProjectID") - errSingletonExporter = errors.New("only one exporter can be created per unique ProjectID per process") -) - -// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. -// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent -// invocations of NewExporter with the same ProjectID will return an error. -func newStatsExporter(o Options) (*statsExporter, error) { - if strings.TrimSpace(o.ProjectID) == "" { - return nil, errBlankProjectID - } - - seenProjectsMu.Lock() - defer seenProjectsMu.Unlock() - _, seen := seenProjects[o.ProjectID] - if seen { - return nil, errSingletonExporter - } - - seenProjects[o.ProjectID] = true - - opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent)) - client, err := monitoring.NewMetricClient(context.Background(), opts...) - if err != nil { - return nil, err - } - e := &statsExporter{ - c: client, - o: o, - createdViews: make(map[string]*metricpb.MetricDescriptor), - taskValue: getTaskValue(), - } - e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { - vds := bundle.([]*view.Data) - e.handleUpload(vds...) - }) - e.bundler.DelayThreshold = e.o.BundleDelayThreshold - e.bundler.BundleCountThreshold = e.o.BundleCountThreshold - return e, nil -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -func (e *statsExporter) ExportView(vd *view.Data) { - if len(vd.Rows) == 0 { - return - } - err := e.bundler.Add(vd, 1) - switch err { - case nil: - return - case bundler.ErrOversizedItem: - go e.handleUpload(vd) - case bundler.ErrOverflow: - e.o.handleError(errors.New("failed to upload: buffer full")) - default: - e.o.handleError(err) - } -} - -// getTaskValue returns a task label value in the format of -// "go-@". -func getTaskValue() string { - hostname, err := os.Hostname() - if err != nil { - hostname = "localhost" - } - return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname -} - -// handleUpload handles uploading a slice -// of Data, as well as error handling. -func (e *statsExporter) handleUpload(vds ...*view.Data) { - if err := e.uploadStats(vds); err != nil { - e.o.handleError(err) - } -} - -// Flush waits for exported view data to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose recent spans. -func (e *statsExporter) Flush() { - e.bundler.Flush() -} - -func (e *statsExporter) uploadStats(vds []*view.Data) error { - ctx, span := trace.StartSpan( - context.Background(), - "go.opencensus.io/exporter/stackdriver.uploadStats", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - - for _, vd := range vds { - if err := e.createMeasure(ctx, vd); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - return err - } - } - for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { - if err := e.c.CreateTimeSeries(ctx, req); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - // TODO(jbd): Don't fail fast here, batch errors? - return err - } - } - return nil -} - -func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { - var reqs []*monitoringpb.CreateTimeSeriesRequest - var timeSeries []*monitoringpb.TimeSeries - - resource := e.o.Resource - if resource == nil { - resource = &monitoredrespb.MonitoredResource{ - Type: "global", - } - } - - for _, vd := range vds { - for _, row := range vd.Rows { - ts := &monitoringpb.TimeSeries{ - Metric: &metricpb.Metric{ - Type: namespacedViewName(vd.View.Name), - Labels: newLabels(row.Tags, e.taskValue), - }, - Resource: resource, - Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, - } - timeSeries = append(timeSeries, ts) - if len(timeSeries) == limit { - reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(e.o.ProjectID), - TimeSeries: timeSeries, - }) - timeSeries = []*monitoringpb.TimeSeries{} - } - } - } - if len(timeSeries) > 0 { - reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(e.o.ProjectID), - TimeSeries: timeSeries, - }) - } - return reqs -} - -// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring. -// An error will be returned if there is already a metric descriptor created with the same name -// but it has a different aggregation or keys. -func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error { - e.createdViewsMu.Lock() - defer e.createdViewsMu.Unlock() - - m := vd.View.Measure - agg := vd.View.Aggregation - tagKeys := vd.View.TagKeys - viewName := vd.View.Name - - if md, ok := e.createdViews[viewName]; ok { - return equalMeasureAggTagKeys(md, m, agg, tagKeys) - } - - metricType := namespacedViewName(viewName) - var valueType metricpb.MetricDescriptor_ValueType - unit := m.Unit() - - switch agg.Type { - case view.AggTypeCount: - valueType = metricpb.MetricDescriptor_INT64 - // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", - // because this view does not apply to the recorded values. - unit = stats.UnitDimensionless - case view.AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - case view.AggTypeDistribution: - valueType = metricpb.MetricDescriptor_DISTRIBUTION - case view.AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - default: - return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) - } - - metricKind := metricpb.MetricDescriptor_CUMULATIVE - displayNamePrefix := defaultDisplayNamePrefix - if e.o.MetricPrefix != "" { - displayNamePrefix = e.o.MetricPrefix - } - - md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{ - Name: fmt.Sprintf("projects/%s", e.o.ProjectID), - MetricDescriptor: &metricpb.MetricDescriptor{ - Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), - DisplayName: path.Join(displayNamePrefix, viewName), - Description: vd.View.Description, - Unit: unit, - Type: metricType, - MetricKind: metricKind, - ValueType: valueType, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, - }) - if err != nil { - return err - } - - e.createdViews[viewName] = md - return nil -} - -func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { - return &monitoringpb.Point{ - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: newTypedValue(v, row), - } -} - -func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { - switch v := r.Data.(type) { - case *view.CountData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: v.Value, - }} - case *view.SumData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - case *view.DistributionData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: v.Count, - Mean: v.Mean, - SumOfSquaredDeviation: v.SumOfSquaredDev, - // TODO(songya): uncomment this once Stackdriver supports min/max. - // Range: &distributionpb.Distribution_Range{ - // Min: v.Min, - // Max: v.Max, - // }, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: vd.Aggregation.Buckets, - }, - }, - }, - BucketCounts: v.CountPerBucket, - }, - }} - case *view.LastValueData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - } - return nil -} - -func namespacedViewName(v string) string { - return path.Join("custom.googleapis.com", "opencensus", v) -} - -func newLabels(tags []tag.Tag, taskValue string) map[string]string { - labels := make(map[string]string) - for _, tag := range tags { - labels[internal.Sanitize(tag.Key.Name())] = tag.Value - } - labels[opencensusTaskKey] = taskValue - return labels -} - -func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor { - labelDescriptors := make([]*labelpb.LabelDescriptor, len(keys)+1) - for i, key := range keys { - labelDescriptors[i] = &labelpb.LabelDescriptor{ - Key: internal.Sanitize(key.Name()), - ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags - } - } - // Add a specific open census task id label. - labelDescriptors[len(keys)] = &labelpb.LabelDescriptor{ - Key: opencensusTaskKey, - ValueType: labelpb.LabelDescriptor_STRING, - Description: opencensusTaskDescription, - } - return labelDescriptors -} - -func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error { - var aggTypeMatch bool - switch md.ValueType { - case metricpb.MetricDescriptor_INT64: - if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) { - return fmt.Errorf("stackdriver metric descriptor was not created as int64") - } - aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue - case metricpb.MetricDescriptor_DOUBLE: - if _, ok := m.(*stats.Float64Measure); !ok { - return fmt.Errorf("stackdriver metric descriptor was not created as double") - } - aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue - case metricpb.MetricDescriptor_DISTRIBUTION: - aggTypeMatch = agg.Type == view.AggTypeDistribution - } - - if !aggTypeMatch { - return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) - } - - if len(md.Labels) != len(keys)+1 { - return errors.New("stackdriver metric descriptor was not created with the view labels") - } - - labels := make(map[string]struct{}, len(keys)+1) - for _, k := range keys { - labels[internal.Sanitize(k.Name())] = struct{}{} - } - labels[opencensusTaskKey] = struct{}{} - - for _, k := range md.Labels { - if _, ok := labels[k.Key]; !ok { - return fmt.Errorf("stackdriver metric descriptor was not created with label %q", k) - } - } - - return nil -} - -var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return c.CreateMetricDescriptor(ctx, mdr) -} - -var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return c.GetMetricDescriptor(ctx, mdr) -} diff --git a/exporter/stackdriver/stats_test.go b/exporter/stackdriver/stats_test.go deleted file mode 100644 index d734dc52e..000000000 --- a/exporter/stackdriver/stats_test.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "reflect" - "testing" - "time" - - "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/ptypes/timestamp" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "google.golang.org/api/option" - "google.golang.org/genproto/googleapis/api/label" - "google.golang.org/genproto/googleapis/api/metric" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" -) - -var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})} - -func TestRejectBlankProjectID(t *testing.T) { - ids := []string{"", " ", " "} - for _, projectID := range ids { - opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} - exp, err := newStatsExporter(opts) - if err == nil || exp != nil { - t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err) - } - } -} - -// Ensure only one exporter per projectID per process, any -// subsequent invocations of NewExporter should fail. -func TestNewExporterSingletonPerProcess(t *testing.T) { - ids := []string{"open-census.io", "x", "fakeProjectID"} - for _, projectID := range ids { - opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} - exp, err := newStatsExporter(opts) - if err != nil { - t.Errorf("NewExporter() projectID = %q err = %q", projectID, err) - continue - } - if exp == nil { - t.Errorf("NewExporter returned a nil Exporter") - continue - } - exp, err = newStatsExporter(opts) - if err == nil || exp != nil { - t.Errorf("NewExporter more than once should fail; exp (%v) err %v", exp, err) - } - } -} - -func TestExporter_makeReq(t *testing.T) { - m := stats.Float64("test-measure", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "testview", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - distView := &view.View{ - Name: "distview", - Description: "desc", - Measure: m, - Aggregation: view.Distribution(2, 4, 7), - } - - start := time.Now() - end := start.Add(time.Minute) - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - sum1 := &view.SumData{Value: 5.5} - sum2 := &view.SumData{Value: -11.1} - last1 := view.LastValueData{Value: 100} - last2 := view.LastValueData{Value: 200} - taskValue := getTaskValue() - - tests := []struct { - name string - projID string - vd *view.Data - want []*monitoringpb.CreateTimeSeriesRequest - }{ - { - name: "count agg + timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, count1, count2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 10, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 16, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "sum agg + timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, sum1, sum2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 5.5, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: -11.1, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "last value agg", - projID: "proj-id", - vd: newTestViewData(v, start, end, &last1, &last2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 100, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 200, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "dist agg + time window", - projID: "proj-id", - vd: newTestDistViewData(distView, start, end), - want: nil, //TODO: add expectation for distribution - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &statsExporter{ - o: Options{ProjectID: tt.projID}, - taskValue: taskValue, - } - resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) - if tt.want == nil { - t.Skip("Missing expectation") - } - if got, want := len(resps), len(tt.want); got != want { - t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) - } - if len(tt.want) == 0 { - return - } - if !reflect.DeepEqual(resps, tt.want) { - t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) - } - }) - } -} - -func TestExporter_makeReq_batching(t *testing.T) { - m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "view", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - - tests := []struct { - name string - iter int - limit int - wantReqs int - wantTotal int - }{ - { - name: "4 vds; 3 limit", - iter: 2, - limit: 3, - wantReqs: 2, - wantTotal: 4, - }, - { - name: "4 vds; 4 limit", - iter: 2, - limit: 4, - wantReqs: 1, - wantTotal: 4, - }, - { - name: "4 vds; 5 limit", - iter: 2, - limit: 5, - wantReqs: 1, - wantTotal: 4, - }, - } - - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - - for _, tt := range tests { - var vds []*view.Data - for i := 0; i < tt.iter; i++ { - vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2)) - } - - e := &statsExporter{} - resps := e.makeReq(vds, tt.limit) - if len(resps) != tt.wantReqs { - t.Errorf("%v: got %v; want %d requests", tt.name, resps, tt.wantReqs) - } - - var total int - for _, resp := range resps { - total += len(resp.TimeSeries) - } - if got, want := total, tt.wantTotal; got != want { - t.Errorf("%v: len(resps[...].TimeSeries) = %d; want %d", tt.name, got, want) - } - } -} - -func TestEqualAggWindowTagKeys(t *testing.T) { - key1, _ := tag.NewKey("test-key-one") - key2, _ := tag.NewKey("test-key-two") - tests := []struct { - name string - md *metricpb.MetricDescriptor - m stats.Measure - agg *view.Aggregation - keys []tag.Key - wantErr bool - }{ - { - name: "count agg with in64 measure", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - { - name: "count agg with double measure", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - { - name: "sum agg double", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.Sum(), - wantErr: false, - }, - { - name: "sum agg int64", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Sum(), - wantErr: false, - }, - { - name: "last value agg double", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.LastValue(), - wantErr: false, - }, - { - name: "last value agg int64", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.LastValue(), - wantErr: false, - }, - { - name: "distribution - mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: true, - }, - { - name: "last value - measure mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.LastValue(), - wantErr: true, - }, - { - name: "distribution agg with keys", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - Labels: []*label.LabelDescriptor{ - {Key: "test_key_one"}, - {Key: "test_key_two"}, - {Key: opencensusTaskKey}, - }, - }, - m: stats.Int64("name", "", ""), - agg: view.Distribution(), - keys: []tag.Key{key1, key2}, - wantErr: false, - }, - { - name: "distribution agg with keys -- mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - }, - m: stats.Int64("name", "", ""), - agg: view.Distribution(), - keys: []tag.Key{key1, key2}, - wantErr: true, - }, - { - name: "count agg with pointers", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys) - if err != nil && !tt.wantErr { - t.Errorf("equalAggTagKeys() = %q; want no error", err) - } - if err == nil && tt.wantErr { - t.Errorf("equalAggTagKeys() = %q; want error", err) - } - - }) - } -} - -func TestExporter_createMeasure(t *testing.T) { - oldCreateMetricDescriptor := createMetricDescriptor - - defer func() { - createMetricDescriptor = oldCreateMetricDescriptor - }() - - key, _ := tag.NewKey("test-key-one") - m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) - - v := &view.View{ - Name: "test_view_sum", - Description: "view_description", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Sum(), - } - - data := &view.CountData{Value: 0} - vd := newTestViewData(v, time.Now(), time.Now(), data, data) - - e := &statsExporter{ - createdViews: make(map[string]*metricpb.MetricDescriptor), - o: Options{ProjectID: "test_project"}, - } - - var createCalls int - createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - createCalls++ - if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want { - t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { - t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { - t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want { - t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) - } - return &metric.MetricDescriptor{ - DisplayName: "OpenCensus/test_view_sum", - Description: "view_description", - Unit: stats.UnitMilliseconds, - Type: "custom.googleapis.com/opencensus/test_view_sum", - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, nil - } - - ctx := context.Background() - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } - if count := createCalls; count != 1 { - t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count) - } - if count := len(e.createdViews); count != 1 { - t.Errorf("len(e.createdViews) = %v; want 1", count) - } -} - -func TestExporter_createMeasure_CountAggregation(t *testing.T) { - oldCreateMetricDescriptor := createMetricDescriptor - - defer func() { - createMetricDescriptor = oldCreateMetricDescriptor - }() - - key, _ := tag.NewKey("test-key-one") - m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) - - v := &view.View{ - Name: "test_view_count", - Description: "view_description", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - - data := &view.CountData{Value: 0} - vd := newTestViewData(v, time.Now(), time.Now(), data, data) - - e := &statsExporter{ - createdViews: make(map[string]*metricpb.MetricDescriptor), - o: Options{ProjectID: "test_project"}, - } - - createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want { - t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { - t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { - t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want { - t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) - } - return &metric.MetricDescriptor{ - DisplayName: "OpenCensus/test_view_sum", - Description: "view_description", - Unit: stats.UnitDimensionless, - Type: "custom.googleapis.com/opencensus/test_view_count", - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, nil - } - ctx := context.Background() - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } -} - -func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) { - m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "testview", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - if err := view.Register(v); err != nil { - t.Fatal(err) - } - defer view.Unregister(v) - - start := time.Now() - end := start.Add(time.Minute) - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - taskValue := getTaskValue() - - resource := &monitoredrespb.MonitoredResource{ - Type: "gce_instance", - Labels: map[string]string{"instance_id": "instance", "zone": "us-west-1a"}, - } - - tests := []struct { - name string - projID string - vd *view.Data - want []*monitoringpb.CreateTimeSeriesRequest - }{ - { - name: "count agg timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, count1, count2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: resource, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 10, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: resource, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 16, - }}, - }, - }, - }, - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &statsExporter{ - o: Options{ProjectID: tt.projID, Resource: resource}, - taskValue: taskValue, - } - resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) - if got, want := len(resps), len(tt.want); got != want { - t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) - } - if len(tt.want) == 0 { - return - } - if !reflect.DeepEqual(resps, tt.want) { - t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) - } - }) - } -} - -func newTestViewData(v *view.View, start, end time.Time, data1, data2 view.AggregationData) *view.Data { - key, _ := tag.NewKey("test-key") - tag1 := tag.Tag{Key: key, Value: "test-value-1"} - tag2 := tag.Tag{Key: key, Value: "test-value-2"} - return &view.Data{ - View: v, - Rows: []*view.Row{ - { - Tags: []tag.Tag{tag1}, - Data: data1, - }, - { - Tags: []tag.Tag{tag2}, - Data: data2, - }, - }, - Start: start, - End: end, - } -} - -func newTestDistViewData(v *view.View, start, end time.Time) *view.Data { - return &view.Data{ - View: v, - Rows: []*view.Row{ - {Data: &view.DistributionData{ - Count: 5, - Min: 1, - Max: 7, - Mean: 3, - SumOfSquaredDev: 1.5, - CountPerBucket: []int64{2, 2, 1}, - }}, - }, - Start: start, - End: end, - } -} diff --git a/exporter/stackdriver/trace.go b/exporter/stackdriver/trace.go deleted file mode 100644 index e3fd6bab7..000000000 --- a/exporter/stackdriver/trace.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - tracingclient "cloud.google.com/go/trace/apiv2" - "go.opencensus.io/trace" - "google.golang.org/api/support/bundler" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" -) - -// traceExporter is an implementation of trace.Exporter that uploads spans to -// Stackdriver. -// -type traceExporter struct { - o Options - projectID string - bundler *bundler.Bundler - // uploadFn defaults to uploadSpans; it can be replaced for tests. - uploadFn func(spans []*trace.SpanData) - overflowLogger - client *tracingclient.Client -} - -var _ trace.Exporter = (*traceExporter)(nil) - -func newTraceExporter(o Options) (*traceExporter, error) { - client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...) - if err != nil { - return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) - } - return newTraceExporterWithClient(o, client), nil -} - -func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { - e := &traceExporter{ - projectID: o.ProjectID, - client: c, - o: o, - } - bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) { - e.uploadFn(bundle.([]*trace.SpanData)) - }) - if o.BundleDelayThreshold > 0 { - bundler.DelayThreshold = o.BundleDelayThreshold - } else { - bundler.DelayThreshold = 2 * time.Second - } - if o.BundleCountThreshold > 0 { - bundler.BundleCountThreshold = o.BundleCountThreshold - } else { - bundler.BundleCountThreshold = 50 - } - // The measured "bytes" are not really bytes, see exportReceiver. - bundler.BundleByteThreshold = bundler.BundleCountThreshold * 200 - bundler.BundleByteLimit = bundler.BundleCountThreshold * 1000 - bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000 - - e.bundler = bundler - e.uploadFn = e.uploadSpans - return e -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *traceExporter) ExportSpan(s *trace.SpanData) { - // n is a length heuristic. - n := 1 - n += len(s.Attributes) - n += len(s.Annotations) - n += len(s.MessageEvents) - err := e.bundler.Add(s, n) - switch err { - case nil: - return - case bundler.ErrOversizedItem: - go e.uploadFn([]*trace.SpanData{s}) - case bundler.ErrOverflow: - e.overflowLogger.log() - default: - e.o.handleError(err) - } -} - -// Flush waits for exported trace spans to be uploaded. -// -// This is useful if your program is ending and you do not want to lose recent -// spans. -func (e *traceExporter) Flush() { - e.bundler.Flush() -} - -// uploadSpans uploads a set of spans to Stackdriver. -func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { - req := tracepb.BatchWriteSpansRequest{ - Name: "projects/" + e.projectID, - Spans: make([]*tracepb.Span, 0, len(spans)), - } - for _, span := range spans { - req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID)) - } - // Create a never-sampled span to prevent traces associated with exporter. - ctx, span := trace.StartSpan( // TODO: add timeouts - context.Background(), - "go.opencensus.io/exporter/stackdriver.uploadSpans", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) - - err := e.client.BatchWriteSpans(ctx, &req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - e.o.handleError(err) - } -} - -// overflowLogger ensures that at most one overflow error log message is -// written every 5 seconds. -type overflowLogger struct { - mu sync.Mutex - pause bool - accum int -} - -func (o *overflowLogger) delay() { - o.pause = true - time.AfterFunc(5*time.Second, func() { - o.mu.Lock() - defer o.mu.Unlock() - switch { - case o.accum == 0: - o.pause = false - case o.accum == 1: - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.accum = 0 - o.delay() - default: - log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) - o.accum = 0 - o.delay() - } - }) -} - -func (o *overflowLogger) log() { - o.mu.Lock() - defer o.mu.Unlock() - if !o.pause { - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.delay() - } else { - o.accum++ - } -} diff --git a/exporter/stackdriver/trace_proto.go b/exporter/stackdriver/trace_proto.go deleted file mode 100644 index 5c2dc2d46..000000000 --- a/exporter/stackdriver/trace_proto.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "math" - "time" - "unicode/utf8" - - "go.opencensus.io/internal" - "go.opencensus.io/plugin/ochttp" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "go.opencensus.io/trace" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 - maxAttributeStringValue = 256 - agentLabel = "g.co/agent" - - labelHTTPHost = `/http/host` - labelHTTPMethod = `/http/method` - labelHTTPStatusCode = `/http/status_code` - labelHTTPPath = `/http/path` - labelHTTPUserAgent = `/http/user_agent` -) - -// proto returns a protocol buffer representation of a SpanData. -func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span { - if s == nil { - return nil - } - - traceIDString := s.SpanContext.TraceID.String() - spanIDString := s.SpanContext.SpanID.String() - - name := s.Name - switch s.SpanKind { - case trace.SpanKindClient: - name = "Sent." + name - case trace.SpanKindServer: - name = "Recv." + name - } - - sp := &tracepb.Span{ - Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, - SpanId: spanIDString, - DisplayName: trunc(name, 128), - StartTime: timestampProto(s.StartTime), - EndTime: timestampProto(s.EndTime), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, - } - if p := s.ParentSpanID; p != (trace.SpanID{}) { - sp.ParentSpanId = p.String() - } - if s.Status.Code != 0 || s.Status.Message != "" { - sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} - } - - var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int - copyAttributes(&sp.Attributes, s.Attributes) - - as := s.Annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} - copyAttributes(&annotation.Attributes, a.Attributes) - event := &tracepb.Span_TimeEvent{ - Time: timestampProto(a.Time), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, - } - annotations++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) - } - - if sp.Attributes == nil { - sp.Attributes = &tracepb.Span_Attributes{ - AttributeMap: make(map[string]*tracepb.AttributeValue), - } - } - sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: trunc(internal.UserAgent, maxAttributeStringValue), - }, - } - - es := s.MessageEvents - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ - Time: timestampProto(e.Time), - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: e.MessageID, - UncompressedSizeBytes: e.UncompressedByteSize, - CompressedSizeBytes: e.CompressedByteSize, - }, - }, - }) - } - - if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - } - - if len(s.Links) > 0 { - sp.Links = &tracepb.Span_Links{} - sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) - for _, l := range s.Links { - link := &tracepb.Span_Link{ - TraceId: l.TraceID.String(), - SpanId: l.SpanID.String(), - Type: tracepb.Span_Link_Type(l.Type), - } - copyAttributes(&link.Attributes, l.Attributes) - sp.Links.Link = append(sp.Links.Link, link) - } - } - return sp -} - -// timestampProto creates a timestamp proto for a time.Time. -func timestampProto(t time.Time) *timestamppb.Timestamp { - return ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } -} - -// copyAttributes copies a map of attributes to a proto map field. -// It creates the map if it is nil. -func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { - if len(in) == 0 { - return - } - if *out == nil { - *out = &tracepb.Span_Attributes{} - } - if (*out).AttributeMap == nil { - (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) - } - var dropped int32 - for key, value := range in { - av := attributeValue(value) - if av == nil { - continue - } - switch key { - case ochttp.PathAttribute: - (*out).AttributeMap[labelHTTPPath] = av - case ochttp.HostAttribute: - (*out).AttributeMap[labelHTTPHost] = av - case ochttp.MethodAttribute: - (*out).AttributeMap[labelHTTPMethod] = av - case ochttp.UserAgentAttribute: - (*out).AttributeMap[labelHTTPUserAgent] = av - case ochttp.StatusCodeAttribute: - (*out).AttributeMap[labelHTTPStatusCode] = av - default: - if len(key) > 128 { - dropped++ - continue - } - (*out).AttributeMap[key] = av - } - } - (*out).DroppedAttributesCount = dropped -} - -func attributeValue(v interface{}) *tracepb.AttributeValue { - switch value := v.(type) { - case bool: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, - } - case int64: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_IntValue{IntValue: value}, - } - case string: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, - } - } - return nil -} - -// trunc returns a TruncatableString truncated to the given limit. -func trunc(s string, limit int) *tracepb.TruncatableString { - if len(s) > limit { - b := []byte(s[:limit]) - for { - r, size := utf8.DecodeLastRune(b) - if r == utf8.RuneError && size == 1 { - b = b[:len(b)-1] - } else { - break - } - } - return &tracepb.TruncatableString{ - Value: string(b), - TruncatedByteCount: clip32(len(s) - len(b)), - } - } - return &tracepb.TruncatableString{ - Value: s, - TruncatedByteCount: 0, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} diff --git a/exporter/stackdriver/trace_proto_test.go b/exporter/stackdriver/trace_proto_test.go deleted file mode 100644 index 2597b08b5..000000000 --- a/exporter/stackdriver/trace_proto_test.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "math/big" - "reflect" - "sort" - "strings" - "testing" - "time" - - "go.opencensus.io/internal" - - "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "go.opencensus.io/trace" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - codepb "google.golang.org/genproto/googleapis/rpc/code" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -const projectID = "testproject" - -var ( - traceID = trace.TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f} - spanID = trace.SpanID{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1} -) - -type spans []*tracepb.Span - -func (s spans) Len() int { return len(s) } -func (s spans) Less(x, y int) bool { return s[x].DisplayName.Value < s[y].DisplayName.Value } -func (s spans) Swap(x, y int) { s[x], s[y] = s[y], s[x] } - -type testExporter struct { - spans []*trace.SpanData -} - -func (t *testExporter) ExportSpan(s *trace.SpanData) { - t.spans = append(t.spans, s) -} - -func TestExportTrace(t *testing.T) { - ctx := context.Background() - - var te testExporter - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - ctx, span0 := trace.StartSpanWithRemoteParent( - ctx, - "span0", - trace.SpanContext{ - TraceID: traceID, - SpanID: spanID, - TraceOptions: 1, - }, - ) - { - ctx1, span1 := trace.StartSpan(ctx, "span1") - { - _, span2 := trace.StartSpan(ctx1, "span2") - span2.AddMessageSendEvent(0x123, 1024, 512) - span2.Annotatef(nil, "in span%d", 2) - span2.Annotate(nil, big.NewRat(2, 4).String()) - span2.AddAttributes( - trace.StringAttribute("key1", "value1"), - trace.StringAttribute("key2", "value2")) - span2.AddAttributes(trace.Int64Attribute("key1", 100)) - span2.End() - } - { - ctx3, span3 := trace.StartSpan(ctx1, "span3") - span3.Annotate(nil, "in span3") - span3.AddMessageReceiveEvent(0x456, 2048, 1536) - span3.SetStatus(trace.Status{Code: int32(codepb.Code_UNAVAILABLE)}) - span3.End() - { - _, span4 := trace.StartSpan(ctx3, "span4") - x := 42 - a1 := []trace.Attribute{trace.StringAttribute("k1", "v1")} - a2 := []trace.Attribute{trace.StringAttribute("k2", "v2")} - a3 := []trace.Attribute{trace.StringAttribute("k3", "v3")} - a4 := map[string]interface{}{"k4": "v4"} - r := big.NewRat(2, 4) - span4.Annotate(a1, r.String()) - span4.Annotatef(a2, "foo %d", x) - span4.Annotate(a3, "in span4") - span4.AddLink(trace.Link{TraceID: trace.TraceID{1, 2}, SpanID: trace.SpanID{3}, Type: trace.LinkTypeParent, Attributes: a4}) - span4.End() - } - } - span1.End() - } - span0.End() - if len(te.spans) != 5 { - t.Errorf("got %d exported spans, want 5", len(te.spans)) - } - - var spbs spans - for _, s := range te.spans { - spbs = append(spbs, protoFromSpanData(s, "testproject")) - } - sort.Sort(spbs) - - for i, want := range []string{ - spanID.String(), - spbs[0].SpanId, - spbs[1].SpanId, - spbs[1].SpanId, - spbs[3].SpanId, - } { - if got := spbs[i].ParentSpanId; got != want { - t.Errorf("span %d: got ParentSpanID %q want %q", i, got, want) - } - } - checkTime := func(ts **timestamppb.Timestamp) { - if *ts == nil { - t.Error("expected timestamp") - } - *ts = nil - } - for _, span := range spbs { - checkTime(&span.StartTime) - checkTime(&span.EndTime) - if span.TimeEvents != nil { - for _, te := range span.TimeEvents.TimeEvent { - checkTime(&te.Time) - } - } - if want := fmt.Sprintf("projects/testproject/traces/%s/spans/%s", traceID, span.SpanId); span.Name != want { - t.Errorf("got span name %q want %q", span.Name, want) - } - span.Name, span.SpanId, span.ParentSpanId = "", "", "" - } - - expectedSpans := spans{ - &tracepb.Span{ - DisplayName: trunc("span0", 128), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: false}, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - }, - &tracepb.Span{ - DisplayName: trunc("span1", 128), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - }, - &tracepb.Span{ - DisplayName: trunc("span2", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "key2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("value2", 256)}}, - "key1": {Value: &tracepb.AttributeValue_IntValue{IntValue: 100}}, - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span2", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("1/2", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_SENT, - Id: 0x123, - UncompressedSizeBytes: 1024, - CompressedSizeBytes: 512, - }, - }, - }, - }, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - &tracepb.Span{ - DisplayName: trunc("span3", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span3", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_RECEIVED, - Id: 0x456, - UncompressedSizeBytes: 2048, - CompressedSizeBytes: 1536, - }, - }, - }, - }, - }, - Status: &statuspb.Status{ - Code: 14, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - &tracepb.Span{ - DisplayName: trunc("span4", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("1/2", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k1": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v1", 256)}}, - }, - }, - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("foo 42", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v2", 256)}}, - }, - }, - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span4", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k3": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v3", 256)}}, - }, - }, - }, - }, - }, - }, - }, - Links: &tracepb.Span_Links{ - Link: []*tracepb.Span_Link{ - { - TraceId: "01020000000000000000000000000000", - SpanId: "0300000000000000", - Type: tracepb.Span_Link_PARENT_LINKED_SPAN, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k4": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v4", 256)}}, - }, - }, - }, - }, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - } - - if !reflect.DeepEqual(spbs, expectedSpans) { - var got, want []string - for _, s := range spbs { - got = append(got, proto.MarshalTextString(s)) - } - for _, s := range expectedSpans { - want = append(want, proto.MarshalTextString(s)) - } - t.Errorf("got spans:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) - } -} - -func TestEnums(t *testing.T) { - for _, test := range []struct { - x trace.LinkType - y tracepb.Span_Link_Type - }{ - {trace.LinkTypeUnspecified, tracepb.Span_Link_TYPE_UNSPECIFIED}, - {trace.LinkTypeChild, tracepb.Span_Link_CHILD_LINKED_SPAN}, - {trace.LinkTypeParent, tracepb.Span_Link_PARENT_LINKED_SPAN}, - } { - if test.x != trace.LinkType(test.y) { - t.Errorf("got link type values %d and %d, want equal", test.x, test.y) - } - } - - for _, test := range []struct { - x trace.MessageEventType - y tracepb.Span_TimeEvent_MessageEvent_Type - }{ - {trace.MessageEventTypeUnspecified, tracepb.Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED}, - {trace.MessageEventTypeSent, tracepb.Span_TimeEvent_MessageEvent_SENT}, - {trace.MessageEventTypeRecv, tracepb.Span_TimeEvent_MessageEvent_RECEIVED}, - } { - if test.x != trace.MessageEventType(test.y) { - t.Errorf("got network event type values %d and %d, want equal", test.x, test.y) - } - } -} - -func BenchmarkProto(b *testing.B) { - sd := &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID, - }, - Name: "foo", - StartTime: time.Now().Add(-time.Second), - EndTime: time.Now(), - Attributes: map[string]interface{}{"foo": "bar"}, - Annotations: []trace.Annotation{ - { - Time: time.Now().Add(-time.Millisecond), - Message: "hello, world", - Attributes: map[string]interface{}{"foo": "bar"}, - }, - }, - MessageEvents: []trace.MessageEvent{ - { - Time: time.Now().Add(-time.Microsecond), - EventType: 1, - MessageID: 2, - UncompressedByteSize: 4, - CompressedByteSize: 3, - }, - }, - Status: trace.Status{ - Code: 42, - Message: "failed", - }, - HasRemoteParent: true, - } - var x int - for i := 0; i < b.N; i++ { - s := protoFromSpanData(sd, `testproject`) - x += len(s.Name) - } - if x == 0 { - fmt.Println(x) - } -} diff --git a/exporter/stackdriver/trace_test.go b/exporter/stackdriver/trace_test.go deleted file mode 100644 index 03a24700d..000000000 --- a/exporter/stackdriver/trace_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "testing" - "time" - - "go.opencensus.io/trace" -) - -func TestBundling(t *testing.T) { - exporter := newTraceExporterWithClient(Options{ - ProjectID: "fakeProjectID", - BundleDelayThreshold: time.Second / 10, - BundleCountThreshold: 10, - }, nil) - - ch := make(chan []*trace.SpanData) - exporter.uploadFn = func(spans []*trace.SpanData) { - ch <- spans - } - trace.RegisterExporter(exporter) - - for i := 0; i < 35; i++ { - _, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample())) - span.End() - } - - // Read the first three bundles. - <-ch - <-ch - <-ch - - // Test that the fourth bundle isn't sent early. - select { - case <-ch: - t.Errorf("bundle sent too early") - case <-time.After(time.Second / 20): - <-ch - } - - // Test that there aren't extra bundles. - select { - case <-ch: - t.Errorf("too many bundles sent") - case <-time.After(time.Second / 5): - } -} From 745e4f464ff2304a360c7c11459a93c264518df8 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 2 Aug 2018 11:00:13 -0700 Subject: [PATCH 042/212] Revert "Remove Stackdriver exporter (#790)" (#854) This reverts commit df6bd8075012c9e7088c96bc588eac92ab36d7d1. --- exporter/stackdriver/propagation/http.go | 94 ++ exporter/stackdriver/propagation/http_test.go | 70 ++ exporter/stackdriver/stackdriver.go | 148 +++ exporter/stackdriver/stackdriver_test.go | 125 +++ exporter/stackdriver/stats.go | 439 +++++++++ exporter/stackdriver/stats_test.go | 866 ++++++++++++++++++ exporter/stackdriver/trace.go | 172 ++++ exporter/stackdriver/trace_proto.go | 255 ++++++ exporter/stackdriver/trace_proto_test.go | 389 ++++++++ exporter/stackdriver/trace_test.go | 62 ++ 10 files changed, 2620 insertions(+) create mode 100644 exporter/stackdriver/propagation/http.go create mode 100644 exporter/stackdriver/propagation/http_test.go create mode 100644 exporter/stackdriver/stackdriver.go create mode 100644 exporter/stackdriver/stackdriver_test.go create mode 100644 exporter/stackdriver/stats.go create mode 100644 exporter/stackdriver/stats_test.go create mode 100644 exporter/stackdriver/trace.go create mode 100644 exporter/stackdriver/trace_proto.go create mode 100644 exporter/stackdriver/trace_proto_test.go create mode 100644 exporter/stackdriver/trace_test.go diff --git a/exporter/stackdriver/propagation/http.go b/exporter/stackdriver/propagation/http.go new file mode 100644 index 000000000..7cc02a110 --- /dev/null +++ b/exporter/stackdriver/propagation/http.go @@ -0,0 +1,94 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package propagation implement X-Cloud-Trace-Context header propagation used +// by Google Cloud products. +package propagation // import "go.opencensus.io/exporter/stackdriver/propagation" + +import ( + "encoding/binary" + "encoding/hex" + "fmt" + "net/http" + "strconv" + "strings" + + "go.opencensus.io/trace" + "go.opencensus.io/trace/propagation" +) + +const ( + httpHeaderMaxSize = 200 + httpHeader = `X-Cloud-Trace-Context` +) + +var _ propagation.HTTPFormat = (*HTTPFormat)(nil) + +// HTTPFormat implements propagation.HTTPFormat to propagate +// traces in HTTP headers for Google Cloud Platform and Stackdriver Trace. +type HTTPFormat struct{} + +// SpanContextFromRequest extracts a Stackdriver Trace span context from incoming requests. +func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { + h := req.Header.Get(httpHeader) + // See https://cloud.google.com/trace/docs/faq for the header HTTPFormat. + // Return if the header is empty or missing, or if the header is unreasonably + // large, to avoid making unnecessary copies of a large string. + if h == "" || len(h) > httpHeaderMaxSize { + return trace.SpanContext{}, false + } + + // Parse the trace id field. + slash := strings.Index(h, `/`) + if slash == -1 { + return trace.SpanContext{}, false + } + tid, h := h[:slash], h[slash+1:] + + buf, err := hex.DecodeString(tid) + if err != nil { + return trace.SpanContext{}, false + } + copy(sc.TraceID[:], buf) + + // Parse the span id field. + spanstr := h + semicolon := strings.Index(h, `;`) + if semicolon != -1 { + spanstr, h = h[:semicolon], h[semicolon+1:] + } + sid, err := strconv.ParseUint(spanstr, 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + binary.BigEndian.PutUint64(sc.SpanID[:], sid) + + // Parse the options field, options field is optional. + if !strings.HasPrefix(h, "o=") { + return sc, true + } + o, err := strconv.ParseUint(h[2:], 10, 64) + if err != nil { + return trace.SpanContext{}, false + } + sc.TraceOptions = trace.TraceOptions(o) + return sc, true +} + +// SpanContextToRequest modifies the given request to include a Stackdriver Trace header. +func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { + sid := binary.BigEndian.Uint64(sc.SpanID[:]) + header := fmt.Sprintf("%s/%d;o=%d", hex.EncodeToString(sc.TraceID[:]), sid, int64(sc.TraceOptions)) + req.Header.Set(httpHeader, header) +} diff --git a/exporter/stackdriver/propagation/http_test.go b/exporter/stackdriver/propagation/http_test.go new file mode 100644 index 000000000..9ad93b714 --- /dev/null +++ b/exporter/stackdriver/propagation/http_test.go @@ -0,0 +1,70 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation + +import ( + "net/http" + "reflect" + "testing" + + "go.opencensus.io/trace" +) + +func TestHTTPFormat(t *testing.T) { + format := &HTTPFormat{} + traceID := [16]byte{16, 84, 69, 170, 120, 67, 188, 139, 242, 6, 177, 32, 0, 16, 0, 0} + spanID1 := [8]byte{255, 0, 0, 0, 0, 0, 0, 123} + spanID2 := [8]byte{0, 0, 0, 0, 0, 0, 0, 123} + tests := []struct { + incoming string + wantSpanContext trace.SpanContext + }{ + { + incoming: "105445aa7843bc8bf206b12000100000/18374686479671623803;o=1", + wantSpanContext: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID1, + TraceOptions: 1, + }, + }, + { + incoming: "105445aa7843bc8bf206b12000100000/123;o=0", + wantSpanContext: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID2, + TraceOptions: 0, + }, + }, + } + for _, tt := range tests { + t.Run(tt.incoming, func(t *testing.T) { + req, _ := http.NewRequest("GET", "http://example.com", nil) + req.Header.Add(httpHeader, tt.incoming) + sc, ok := format.SpanContextFromRequest(req) + if !ok { + t.Errorf("exporter.SpanContextFromRequest() = false; want true") + } + if got, want := sc, tt.wantSpanContext; !reflect.DeepEqual(got, want) { + t.Errorf("exporter.SpanContextFromRequest() returned span context %v; want %v", got, want) + } + + req, _ = http.NewRequest("GET", "http://example.com", nil) + format.SpanContextToRequest(sc, req) + if got, want := req.Header.Get(httpHeader), tt.incoming; got != want { + t.Errorf("exporter.SpanContextToRequest() returned header %q; want %q", got, want) + } + }) + } +} diff --git a/exporter/stackdriver/stackdriver.go b/exporter/stackdriver/stackdriver.go new file mode 100644 index 000000000..b4f152149 --- /dev/null +++ b/exporter/stackdriver/stackdriver.go @@ -0,0 +1,148 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package stackdriver has moved. +// +// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead. +package stackdriver // import "go.opencensus.io/exporter/stackdriver" + +import ( + "context" + "errors" + "fmt" + "log" + "time" + + traceapi "cloud.google.com/go/trace/apiv2" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "golang.org/x/oauth2/google" + "google.golang.org/api/option" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" +) + +// Options contains options for configuring the exporter. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. +type Options struct { + // ProjectID is the identifier of the Stackdriver + // project the user is uploading the stats data to. + // If not set, this will default to your "Application Default Credentials". + // For details see: https://developers.google.com/accounts/docs/application-default-credentials + ProjectID string + + // OnError is the hook to be called when there is + // an error uploading the stats or tracing data. + // If no custom hook is set, errors are logged. + // Optional. + OnError func(err error) + + // MonitoringClientOptions are additional options to be passed + // to the underlying Stackdriver Monitoring API client. + // Optional. + MonitoringClientOptions []option.ClientOption + + // TraceClientOptions are additional options to be passed + // to the underlying Stackdriver Trace API client. + // Optional. + TraceClientOptions []option.ClientOption + + // BundleDelayThreshold determines the max amount of time + // the exporter can wait before uploading view data to + // the backend. + // Optional. + BundleDelayThreshold time.Duration + + // BundleCountThreshold determines how many view data events + // can be buffered before batch uploading them to the backend. + // Optional. + BundleCountThreshold int + + // Resource is an optional field that represents the Stackdriver + // MonitoredResource, a resource that can be used for monitoring. + // If no custom ResourceDescriptor is set, a default MonitoredResource + // with type global and no resource labels will be used. + // Optional. + Resource *monitoredrespb.MonitoredResource + + // MetricPrefix overrides the OpenCensus prefix of a stackdriver metric. + // Optional. + MetricPrefix string +} + +// Exporter is a stats.Exporter and trace.Exporter +// implementation that uploads data to Stackdriver. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. +type Exporter struct { + traceExporter *traceExporter + statsExporter *statsExporter +} + +// NewExporter creates a new Exporter that implements both stats.Exporter and +// trace.Exporter. +// +// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. +func NewExporter(o Options) (*Exporter, error) { + if o.ProjectID == "" { + creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...) + if err != nil { + return nil, fmt.Errorf("stackdriver: %v", err) + } + if creds.ProjectID == "" { + return nil, errors.New("stackdriver: no project found with application default credentials") + } + o.ProjectID = creds.ProjectID + } + se, err := newStatsExporter(o) + if err != nil { + return nil, err + } + te, err := newTraceExporter(o) + if err != nil { + return nil, err + } + return &Exporter{ + statsExporter: se, + traceExporter: te, + }, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *Exporter) ExportView(vd *view.Data) { + e.statsExporter.ExportView(vd) +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *Exporter) ExportSpan(sd *trace.SpanData) { + e.traceExporter.ExportSpan(sd) +} + +// Flush waits for exported data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent stats or spans. +func (e *Exporter) Flush() { + e.statsExporter.Flush() + e.traceExporter.Flush() +} + +func (o Options) handleError(err error) { + if o.OnError != nil { + o.OnError(err) + return + } + log.Printf("Error exporting to Stackdriver: %v", err) +} diff --git a/exporter/stackdriver/stackdriver_test.go b/exporter/stackdriver/stackdriver_test.go new file mode 100644 index 000000000..55cc81e93 --- /dev/null +++ b/exporter/stackdriver/stackdriver_test.go @@ -0,0 +1,125 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "io/ioutil" + "net/http" + "net/http/httptest" + "os" + "testing" + "time" + + "go.opencensus.io/internal/testpb" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" + "golang.org/x/net/context/ctxhttp" +) + +func TestExport(t *testing.T) { + projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") + if !ok { + t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") + } + + var exportErrors []error + + exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) { + exportErrors = append(exportErrors, err) + }}) + if err != nil { + t.Fatal(err) + } + defer exporter.Flush() + + trace.RegisterExporter(exporter) + defer trace.UnregisterExporter(exporter) + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + _, span := trace.StartSpan(context.Background(), "custom-span") + time.Sleep(10 * time.Millisecond) + span.End() + + // Test HTTP spans + + handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { + _, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork") + spanContext := backgroundSpan.SpanContext() + time.Sleep(10 * time.Millisecond) + backgroundSpan.End() + + _, span := trace.StartSpan(req.Context(), "Sleep") + span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID}) + time.Sleep(150 * time.Millisecond) // do work + span.End() + rw.Write([]byte("Hello, world!")) + }) + server := httptest.NewServer(&ochttp.Handler{Handler: handler}) + defer server.Close() + + ctx := context.Background() + client := &http.Client{ + Transport: &ochttp.Transport{}, + } + resp, err := ctxhttp.Get(ctx, client, server.URL+"/test/123?abc=xyz") + if err != nil { + t.Fatal(err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatal(err) + } + if want, got := "Hello, world!", string(body); want != got { + t.Fatalf("resp.Body = %q; want %q", want, got) + } + + // Flush twice to expose issue of exporter creating traces internally (#557) + exporter.Flush() + exporter.Flush() + + for _, err := range exportErrors { + t.Error(err) + } +} + +func TestGRPC(t *testing.T) { + projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") + if !ok { + t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") + } + + exporter, err := NewExporter(Options{ProjectID: projectID}) + if err != nil { + t.Fatal(err) + } + defer exporter.Flush() + + trace.RegisterExporter(exporter) + defer trace.UnregisterExporter(exporter) + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + client, done := testpb.NewTestClient(t) + defer done() + + client.Single(context.Background(), &testpb.FooRequest{SleepNanos: int64(42 * time.Millisecond)}) +} diff --git a/exporter/stackdriver/stats.go b/exporter/stackdriver/stats.go new file mode 100644 index 000000000..93635dac8 --- /dev/null +++ b/exporter/stackdriver/stats.go @@ -0,0 +1,439 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "errors" + "fmt" + "os" + "path" + "strconv" + "strings" + "sync" + "time" + + "go.opencensus.io/internal" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" + + "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" + "google.golang.org/api/option" + "google.golang.org/api/support/bundler" + distributionpb "google.golang.org/genproto/googleapis/api/distribution" + labelpb "google.golang.org/genproto/googleapis/api/label" + "google.golang.org/genproto/googleapis/api/metric" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" +) + +const maxTimeSeriesPerUpload = 200 +const opencensusTaskKey = "opencensus_task" +const opencensusTaskDescription = "Opencensus task identifier" +const defaultDisplayNamePrefix = "OpenCensus" + +// statsExporter exports stats to the Stackdriver Monitoring. +type statsExporter struct { + bundler *bundler.Bundler + o Options + + createdViewsMu sync.Mutex + createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely + + c *monitoring.MetricClient + taskValue string +} + +// Enforces the singleton on NewExporter per projectID per process +// lest there will be races with Stackdriver. +var ( + seenProjectsMu sync.Mutex + seenProjects = make(map[string]bool) +) + +var ( + errBlankProjectID = errors.New("expecting a non-blank ProjectID") + errSingletonExporter = errors.New("only one exporter can be created per unique ProjectID per process") +) + +// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. +// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent +// invocations of NewExporter with the same ProjectID will return an error. +func newStatsExporter(o Options) (*statsExporter, error) { + if strings.TrimSpace(o.ProjectID) == "" { + return nil, errBlankProjectID + } + + seenProjectsMu.Lock() + defer seenProjectsMu.Unlock() + _, seen := seenProjects[o.ProjectID] + if seen { + return nil, errSingletonExporter + } + + seenProjects[o.ProjectID] = true + + opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent)) + client, err := monitoring.NewMetricClient(context.Background(), opts...) + if err != nil { + return nil, err + } + e := &statsExporter{ + c: client, + o: o, + createdViews: make(map[string]*metricpb.MetricDescriptor), + taskValue: getTaskValue(), + } + e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { + vds := bundle.([]*view.Data) + e.handleUpload(vds...) + }) + e.bundler.DelayThreshold = e.o.BundleDelayThreshold + e.bundler.BundleCountThreshold = e.o.BundleCountThreshold + return e, nil +} + +// ExportView exports to the Stackdriver Monitoring if view data +// has one or more rows. +func (e *statsExporter) ExportView(vd *view.Data) { + if len(vd.Rows) == 0 { + return + } + err := e.bundler.Add(vd, 1) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + go e.handleUpload(vd) + case bundler.ErrOverflow: + e.o.handleError(errors.New("failed to upload: buffer full")) + default: + e.o.handleError(err) + } +} + +// getTaskValue returns a task label value in the format of +// "go-@". +func getTaskValue() string { + hostname, err := os.Hostname() + if err != nil { + hostname = "localhost" + } + return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname +} + +// handleUpload handles uploading a slice +// of Data, as well as error handling. +func (e *statsExporter) handleUpload(vds ...*view.Data) { + if err := e.uploadStats(vds); err != nil { + e.o.handleError(err) + } +} + +// Flush waits for exported view data to be uploaded. +// +// This is useful if your program is ending and you do not +// want to lose recent spans. +func (e *statsExporter) Flush() { + e.bundler.Flush() +} + +func (e *statsExporter) uploadStats(vds []*view.Data) error { + ctx, span := trace.StartSpan( + context.Background(), + "go.opencensus.io/exporter/stackdriver.uploadStats", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + + for _, vd := range vds { + if err := e.createMeasure(ctx, vd); err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + return err + } + } + for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { + if err := e.c.CreateTimeSeries(ctx, req); err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + // TODO(jbd): Don't fail fast here, batch errors? + return err + } + } + return nil +} + +func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { + var reqs []*monitoringpb.CreateTimeSeriesRequest + var timeSeries []*monitoringpb.TimeSeries + + resource := e.o.Resource + if resource == nil { + resource = &monitoredrespb.MonitoredResource{ + Type: "global", + } + } + + for _, vd := range vds { + for _, row := range vd.Rows { + ts := &monitoringpb.TimeSeries{ + Metric: &metricpb.Metric{ + Type: namespacedViewName(vd.View.Name), + Labels: newLabels(row.Tags, e.taskValue), + }, + Resource: resource, + Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, + } + timeSeries = append(timeSeries, ts) + if len(timeSeries) == limit { + reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(e.o.ProjectID), + TimeSeries: timeSeries, + }) + timeSeries = []*monitoringpb.TimeSeries{} + } + } + } + if len(timeSeries) > 0 { + reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ + Name: monitoring.MetricProjectPath(e.o.ProjectID), + TimeSeries: timeSeries, + }) + } + return reqs +} + +// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring. +// An error will be returned if there is already a metric descriptor created with the same name +// but it has a different aggregation or keys. +func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error { + e.createdViewsMu.Lock() + defer e.createdViewsMu.Unlock() + + m := vd.View.Measure + agg := vd.View.Aggregation + tagKeys := vd.View.TagKeys + viewName := vd.View.Name + + if md, ok := e.createdViews[viewName]; ok { + return equalMeasureAggTagKeys(md, m, agg, tagKeys) + } + + metricType := namespacedViewName(viewName) + var valueType metricpb.MetricDescriptor_ValueType + unit := m.Unit() + + switch agg.Type { + case view.AggTypeCount: + valueType = metricpb.MetricDescriptor_INT64 + // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", + // because this view does not apply to the recorded values. + unit = stats.UnitDimensionless + case view.AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + case view.AggTypeDistribution: + valueType = metricpb.MetricDescriptor_DISTRIBUTION + case view.AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + valueType = metricpb.MetricDescriptor_INT64 + case *stats.Float64Measure: + valueType = metricpb.MetricDescriptor_DOUBLE + } + default: + return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) + } + + metricKind := metricpb.MetricDescriptor_CUMULATIVE + displayNamePrefix := defaultDisplayNamePrefix + if e.o.MetricPrefix != "" { + displayNamePrefix = e.o.MetricPrefix + } + + md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{ + Name: fmt.Sprintf("projects/%s", e.o.ProjectID), + MetricDescriptor: &metricpb.MetricDescriptor{ + Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), + DisplayName: path.Join(displayNamePrefix, viewName), + Description: vd.View.Description, + Unit: unit, + Type: metricType, + MetricKind: metricKind, + ValueType: valueType, + Labels: newLabelDescriptors(vd.View.TagKeys), + }, + }) + if err != nil { + return err + } + + e.createdViews[viewName] = md + return nil +} + +func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { + return &monitoringpb.Point{ + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: newTypedValue(v, row), + } +} + +func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { + switch v := r.Data.(type) { + case *view.CountData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: v.Value, + }} + case *view.SumData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + case *view.DistributionData: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ + DistributionValue: &distributionpb.Distribution{ + Count: v.Count, + Mean: v.Mean, + SumOfSquaredDeviation: v.SumOfSquaredDev, + // TODO(songya): uncomment this once Stackdriver supports min/max. + // Range: &distributionpb.Distribution_Range{ + // Min: v.Min, + // Max: v.Max, + // }, + BucketOptions: &distributionpb.Distribution_BucketOptions{ + Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ + ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ + Bounds: vd.Aggregation.Buckets, + }, + }, + }, + BucketCounts: v.CountPerBucket, + }, + }} + case *view.LastValueData: + switch vd.Measure.(type) { + case *stats.Int64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: int64(v.Value), + }} + case *stats.Float64Measure: + return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: v.Value, + }} + } + } + return nil +} + +func namespacedViewName(v string) string { + return path.Join("custom.googleapis.com", "opencensus", v) +} + +func newLabels(tags []tag.Tag, taskValue string) map[string]string { + labels := make(map[string]string) + for _, tag := range tags { + labels[internal.Sanitize(tag.Key.Name())] = tag.Value + } + labels[opencensusTaskKey] = taskValue + return labels +} + +func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor { + labelDescriptors := make([]*labelpb.LabelDescriptor, len(keys)+1) + for i, key := range keys { + labelDescriptors[i] = &labelpb.LabelDescriptor{ + Key: internal.Sanitize(key.Name()), + ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags + } + } + // Add a specific open census task id label. + labelDescriptors[len(keys)] = &labelpb.LabelDescriptor{ + Key: opencensusTaskKey, + ValueType: labelpb.LabelDescriptor_STRING, + Description: opencensusTaskDescription, + } + return labelDescriptors +} + +func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error { + var aggTypeMatch bool + switch md.ValueType { + case metricpb.MetricDescriptor_INT64: + if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) { + return fmt.Errorf("stackdriver metric descriptor was not created as int64") + } + aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DOUBLE: + if _, ok := m.(*stats.Float64Measure); !ok { + return fmt.Errorf("stackdriver metric descriptor was not created as double") + } + aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue + case metricpb.MetricDescriptor_DISTRIBUTION: + aggTypeMatch = agg.Type == view.AggTypeDistribution + } + + if !aggTypeMatch { + return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) + } + + if len(md.Labels) != len(keys)+1 { + return errors.New("stackdriver metric descriptor was not created with the view labels") + } + + labels := make(map[string]struct{}, len(keys)+1) + for _, k := range keys { + labels[internal.Sanitize(k.Name())] = struct{}{} + } + labels[opencensusTaskKey] = struct{}{} + + for _, k := range md.Labels { + if _, ok := labels[k.Key]; !ok { + return fmt.Errorf("stackdriver metric descriptor was not created with label %q", k) + } + } + + return nil +} + +var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.CreateMetricDescriptor(ctx, mdr) +} + +var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + return c.GetMetricDescriptor(ctx, mdr) +} diff --git a/exporter/stackdriver/stats_test.go b/exporter/stackdriver/stats_test.go new file mode 100644 index 000000000..d734dc52e --- /dev/null +++ b/exporter/stackdriver/stats_test.go @@ -0,0 +1,866 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "reflect" + "testing" + "time" + + "cloud.google.com/go/monitoring/apiv3" + "github.com/golang/protobuf/ptypes/timestamp" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "google.golang.org/api/option" + "google.golang.org/genproto/googleapis/api/label" + "google.golang.org/genproto/googleapis/api/metric" + metricpb "google.golang.org/genproto/googleapis/api/metric" + monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" + monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" + "google.golang.org/grpc" +) + +var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})} + +func TestRejectBlankProjectID(t *testing.T) { + ids := []string{"", " ", " "} + for _, projectID := range ids { + opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} + exp, err := newStatsExporter(opts) + if err == nil || exp != nil { + t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err) + } + } +} + +// Ensure only one exporter per projectID per process, any +// subsequent invocations of NewExporter should fail. +func TestNewExporterSingletonPerProcess(t *testing.T) { + ids := []string{"open-census.io", "x", "fakeProjectID"} + for _, projectID := range ids { + opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} + exp, err := newStatsExporter(opts) + if err != nil { + t.Errorf("NewExporter() projectID = %q err = %q", projectID, err) + continue + } + if exp == nil { + t.Errorf("NewExporter returned a nil Exporter") + continue + } + exp, err = newStatsExporter(opts) + if err == nil || exp != nil { + t.Errorf("NewExporter more than once should fail; exp (%v) err %v", exp, err) + } + } +} + +func TestExporter_makeReq(t *testing.T) { + m := stats.Float64("test-measure", "measure desc", "unit") + + key, err := tag.NewKey("test_key") + if err != nil { + t.Fatal(err) + } + + v := &view.View{ + Name: "testview", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.Count(), + } + distView := &view.View{ + Name: "distview", + Description: "desc", + Measure: m, + Aggregation: view.Distribution(2, 4, 7), + } + + start := time.Now() + end := start.Add(time.Minute) + count1 := &view.CountData{Value: 10} + count2 := &view.CountData{Value: 16} + sum1 := &view.SumData{Value: 5.5} + sum2 := &view.SumData{Value: -11.1} + last1 := view.LastValueData{Value: 100} + last2 := view.LastValueData{Value: 200} + taskValue := getTaskValue() + + tests := []struct { + name string + projID string + vd *view.Data + want []*monitoringpb.CreateTimeSeriesRequest + }{ + { + name: "count agg + timeline", + projID: "proj-id", + vd: newTestViewData(v, start, end, count1, count2), + want: []*monitoringpb.CreateTimeSeriesRequest{{ + Name: monitoring.MetricProjectPath("proj-id"), + TimeSeries: []*monitoringpb.TimeSeries{ + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-1", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 10, + }}, + }, + }, + }, + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-2", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 16, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "sum agg + timeline", + projID: "proj-id", + vd: newTestViewData(v, start, end, sum1, sum2), + want: []*monitoringpb.CreateTimeSeriesRequest{{ + Name: monitoring.MetricProjectPath("proj-id"), + TimeSeries: []*monitoringpb.TimeSeries{ + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-1", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 5.5, + }}, + }, + }, + }, + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-2", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: -11.1, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "last value agg", + projID: "proj-id", + vd: newTestViewData(v, start, end, &last1, &last2), + want: []*monitoringpb.CreateTimeSeriesRequest{{ + Name: monitoring.MetricProjectPath("proj-id"), + TimeSeries: []*monitoringpb.TimeSeries{ + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-1", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 100, + }}, + }, + }, + }, + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-2", + opencensusTaskKey: taskValue, + }, + }, + Resource: &monitoredrespb.MonitoredResource{ + Type: "global", + }, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ + DoubleValue: 200, + }}, + }, + }, + }, + }, + }}, + }, + { + name: "dist agg + time window", + projID: "proj-id", + vd: newTestDistViewData(distView, start, end), + want: nil, //TODO: add expectation for distribution + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &statsExporter{ + o: Options{ProjectID: tt.projID}, + taskValue: taskValue, + } + resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) + if tt.want == nil { + t.Skip("Missing expectation") + } + if got, want := len(resps), len(tt.want); got != want { + t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) + } + if len(tt.want) == 0 { + return + } + if !reflect.DeepEqual(resps, tt.want) { + t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) + } + }) + } +} + +func TestExporter_makeReq_batching(t *testing.T) { + m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit") + + key, err := tag.NewKey("test_key") + if err != nil { + t.Fatal(err) + } + + v := &view.View{ + Name: "view", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.Count(), + } + + tests := []struct { + name string + iter int + limit int + wantReqs int + wantTotal int + }{ + { + name: "4 vds; 3 limit", + iter: 2, + limit: 3, + wantReqs: 2, + wantTotal: 4, + }, + { + name: "4 vds; 4 limit", + iter: 2, + limit: 4, + wantReqs: 1, + wantTotal: 4, + }, + { + name: "4 vds; 5 limit", + iter: 2, + limit: 5, + wantReqs: 1, + wantTotal: 4, + }, + } + + count1 := &view.CountData{Value: 10} + count2 := &view.CountData{Value: 16} + + for _, tt := range tests { + var vds []*view.Data + for i := 0; i < tt.iter; i++ { + vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2)) + } + + e := &statsExporter{} + resps := e.makeReq(vds, tt.limit) + if len(resps) != tt.wantReqs { + t.Errorf("%v: got %v; want %d requests", tt.name, resps, tt.wantReqs) + } + + var total int + for _, resp := range resps { + total += len(resp.TimeSeries) + } + if got, want := total, tt.wantTotal; got != want { + t.Errorf("%v: len(resps[...].TimeSeries) = %d; want %d", tt.name, got, want) + } + } +} + +func TestEqualAggWindowTagKeys(t *testing.T) { + key1, _ := tag.NewKey("test-key-one") + key2, _ := tag.NewKey("test-key-two") + tests := []struct { + name string + md *metricpb.MetricDescriptor + m stats.Measure + agg *view.Aggregation + keys []tag.Key + wantErr bool + }{ + { + name: "count agg with in64 measure", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Int64("name", "", ""), + agg: view.Count(), + wantErr: false, + }, + { + name: "count agg with double measure", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Float64("name", "", ""), + agg: view.Count(), + wantErr: false, + }, + { + name: "sum agg double", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DOUBLE, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Float64("name", "", ""), + agg: view.Sum(), + wantErr: false, + }, + { + name: "sum agg int64", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Int64("name", "", ""), + agg: view.Sum(), + wantErr: false, + }, + { + name: "last value agg double", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DOUBLE, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Float64("name", "", ""), + agg: view.LastValue(), + wantErr: false, + }, + { + name: "last value agg int64", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Int64("name", "", ""), + agg: view.LastValue(), + wantErr: false, + }, + { + name: "distribution - mismatch", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Int64("name", "", ""), + agg: view.Count(), + wantErr: true, + }, + { + name: "last value - measure mismatch", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Float64("name", "", ""), + agg: view.LastValue(), + wantErr: true, + }, + { + name: "distribution agg with keys", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + Labels: []*label.LabelDescriptor{ + {Key: "test_key_one"}, + {Key: "test_key_two"}, + {Key: opencensusTaskKey}, + }, + }, + m: stats.Int64("name", "", ""), + agg: view.Distribution(), + keys: []tag.Key{key1, key2}, + wantErr: false, + }, + { + name: "distribution agg with keys -- mismatch", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DISTRIBUTION, + }, + m: stats.Int64("name", "", ""), + agg: view.Distribution(), + keys: []tag.Key{key1, key2}, + wantErr: true, + }, + { + name: "count agg with pointers", + md: &metricpb.MetricDescriptor{ + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, + }, + m: stats.Int64("name", "", ""), + agg: view.Count(), + wantErr: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys) + if err != nil && !tt.wantErr { + t.Errorf("equalAggTagKeys() = %q; want no error", err) + } + if err == nil && tt.wantErr { + t.Errorf("equalAggTagKeys() = %q; want error", err) + } + + }) + } +} + +func TestExporter_createMeasure(t *testing.T) { + oldCreateMetricDescriptor := createMetricDescriptor + + defer func() { + createMetricDescriptor = oldCreateMetricDescriptor + }() + + key, _ := tag.NewKey("test-key-one") + m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) + + v := &view.View{ + Name: "test_view_sum", + Description: "view_description", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.Sum(), + } + + data := &view.CountData{Value: 0} + vd := newTestViewData(v, time.Now(), time.Now(), data, data) + + e := &statsExporter{ + createdViews: make(map[string]*metricpb.MetricDescriptor), + o: Options{ProjectID: "test_project"}, + } + + var createCalls int + createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + createCalls++ + if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want { + t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want { + t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want { + t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { + t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { + t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want { + t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want { + t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) + } + return &metric.MetricDescriptor{ + DisplayName: "OpenCensus/test_view_sum", + Description: "view_description", + Unit: stats.UnitMilliseconds, + Type: "custom.googleapis.com/opencensus/test_view_sum", + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_DOUBLE, + Labels: newLabelDescriptors(vd.View.TagKeys), + }, nil + } + + ctx := context.Background() + if err := e.createMeasure(ctx, vd); err != nil { + t.Errorf("Exporter.createMeasure() error = %v", err) + } + if err := e.createMeasure(ctx, vd); err != nil { + t.Errorf("Exporter.createMeasure() error = %v", err) + } + if count := createCalls; count != 1 { + t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count) + } + if count := len(e.createdViews); count != 1 { + t.Errorf("len(e.createdViews) = %v; want 1", count) + } +} + +func TestExporter_createMeasure_CountAggregation(t *testing.T) { + oldCreateMetricDescriptor := createMetricDescriptor + + defer func() { + createMetricDescriptor = oldCreateMetricDescriptor + }() + + key, _ := tag.NewKey("test-key-one") + m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) + + v := &view.View{ + Name: "test_view_count", + Description: "view_description", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.Count(), + } + + data := &view.CountData{Value: 0} + vd := newTestViewData(v, time.Now(), time.Now(), data, data) + + e := &statsExporter{ + createdViews: make(map[string]*metricpb.MetricDescriptor), + o: Options{ProjectID: "test_project"}, + } + + createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { + if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want { + t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want { + t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want { + t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { + t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { + t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want { + t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) + } + if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want { + t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) + } + return &metric.MetricDescriptor{ + DisplayName: "OpenCensus/test_view_sum", + Description: "view_description", + Unit: stats.UnitDimensionless, + Type: "custom.googleapis.com/opencensus/test_view_count", + MetricKind: metricpb.MetricDescriptor_CUMULATIVE, + ValueType: metricpb.MetricDescriptor_INT64, + Labels: newLabelDescriptors(vd.View.TagKeys), + }, nil + } + ctx := context.Background() + if err := e.createMeasure(ctx, vd); err != nil { + t.Errorf("Exporter.createMeasure() error = %v", err) + } +} + +func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) { + m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit") + + key, err := tag.NewKey("test_key") + if err != nil { + t.Fatal(err) + } + + v := &view.View{ + Name: "testview", + Description: "desc", + TagKeys: []tag.Key{key}, + Measure: m, + Aggregation: view.Count(), + } + if err := view.Register(v); err != nil { + t.Fatal(err) + } + defer view.Unregister(v) + + start := time.Now() + end := start.Add(time.Minute) + count1 := &view.CountData{Value: 10} + count2 := &view.CountData{Value: 16} + taskValue := getTaskValue() + + resource := &monitoredrespb.MonitoredResource{ + Type: "gce_instance", + Labels: map[string]string{"instance_id": "instance", "zone": "us-west-1a"}, + } + + tests := []struct { + name string + projID string + vd *view.Data + want []*monitoringpb.CreateTimeSeriesRequest + }{ + { + name: "count agg timeline", + projID: "proj-id", + vd: newTestViewData(v, start, end, count1, count2), + want: []*monitoringpb.CreateTimeSeriesRequest{{ + Name: monitoring.MetricProjectPath("proj-id"), + TimeSeries: []*monitoringpb.TimeSeries{ + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-1", + opencensusTaskKey: taskValue, + }, + }, + Resource: resource, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 10, + }}, + }, + }, + }, + { + Metric: &metricpb.Metric{ + Type: "custom.googleapis.com/opencensus/testview", + Labels: map[string]string{ + "test_key": "test-value-2", + opencensusTaskKey: taskValue, + }, + }, + Resource: resource, + Points: []*monitoringpb.Point{ + { + Interval: &monitoringpb.TimeInterval{ + StartTime: ×tamp.Timestamp{ + Seconds: start.Unix(), + Nanos: int32(start.Nanosecond()), + }, + EndTime: ×tamp.Timestamp{ + Seconds: end.Unix(), + Nanos: int32(end.Nanosecond()), + }, + }, + Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ + Int64Value: 16, + }}, + }, + }, + }, + }, + }}, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + e := &statsExporter{ + o: Options{ProjectID: tt.projID, Resource: resource}, + taskValue: taskValue, + } + resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) + if got, want := len(resps), len(tt.want); got != want { + t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) + } + if len(tt.want) == 0 { + return + } + if !reflect.DeepEqual(resps, tt.want) { + t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) + } + }) + } +} + +func newTestViewData(v *view.View, start, end time.Time, data1, data2 view.AggregationData) *view.Data { + key, _ := tag.NewKey("test-key") + tag1 := tag.Tag{Key: key, Value: "test-value-1"} + tag2 := tag.Tag{Key: key, Value: "test-value-2"} + return &view.Data{ + View: v, + Rows: []*view.Row{ + { + Tags: []tag.Tag{tag1}, + Data: data1, + }, + { + Tags: []tag.Tag{tag2}, + Data: data2, + }, + }, + Start: start, + End: end, + } +} + +func newTestDistViewData(v *view.View, start, end time.Time) *view.Data { + return &view.Data{ + View: v, + Rows: []*view.Row{ + {Data: &view.DistributionData{ + Count: 5, + Min: 1, + Max: 7, + Mean: 3, + SumOfSquaredDev: 1.5, + CountPerBucket: []int64{2, 2, 1}, + }}, + }, + Start: start, + End: end, + } +} diff --git a/exporter/stackdriver/trace.go b/exporter/stackdriver/trace.go new file mode 100644 index 000000000..e3fd6bab7 --- /dev/null +++ b/exporter/stackdriver/trace.go @@ -0,0 +1,172 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "fmt" + "log" + "sync" + "time" + + tracingclient "cloud.google.com/go/trace/apiv2" + "go.opencensus.io/trace" + "google.golang.org/api/support/bundler" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" +) + +// traceExporter is an implementation of trace.Exporter that uploads spans to +// Stackdriver. +// +type traceExporter struct { + o Options + projectID string + bundler *bundler.Bundler + // uploadFn defaults to uploadSpans; it can be replaced for tests. + uploadFn func(spans []*trace.SpanData) + overflowLogger + client *tracingclient.Client +} + +var _ trace.Exporter = (*traceExporter)(nil) + +func newTraceExporter(o Options) (*traceExporter, error) { + client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...) + if err != nil { + return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) + } + return newTraceExporterWithClient(o, client), nil +} + +func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { + e := &traceExporter{ + projectID: o.ProjectID, + client: c, + o: o, + } + bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) { + e.uploadFn(bundle.([]*trace.SpanData)) + }) + if o.BundleDelayThreshold > 0 { + bundler.DelayThreshold = o.BundleDelayThreshold + } else { + bundler.DelayThreshold = 2 * time.Second + } + if o.BundleCountThreshold > 0 { + bundler.BundleCountThreshold = o.BundleCountThreshold + } else { + bundler.BundleCountThreshold = 50 + } + // The measured "bytes" are not really bytes, see exportReceiver. + bundler.BundleByteThreshold = bundler.BundleCountThreshold * 200 + bundler.BundleByteLimit = bundler.BundleCountThreshold * 1000 + bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000 + + e.bundler = bundler + e.uploadFn = e.uploadSpans + return e +} + +// ExportSpan exports a SpanData to Stackdriver Trace. +func (e *traceExporter) ExportSpan(s *trace.SpanData) { + // n is a length heuristic. + n := 1 + n += len(s.Attributes) + n += len(s.Annotations) + n += len(s.MessageEvents) + err := e.bundler.Add(s, n) + switch err { + case nil: + return + case bundler.ErrOversizedItem: + go e.uploadFn([]*trace.SpanData{s}) + case bundler.ErrOverflow: + e.overflowLogger.log() + default: + e.o.handleError(err) + } +} + +// Flush waits for exported trace spans to be uploaded. +// +// This is useful if your program is ending and you do not want to lose recent +// spans. +func (e *traceExporter) Flush() { + e.bundler.Flush() +} + +// uploadSpans uploads a set of spans to Stackdriver. +func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { + req := tracepb.BatchWriteSpansRequest{ + Name: "projects/" + e.projectID, + Spans: make([]*tracepb.Span, 0, len(spans)), + } + for _, span := range spans { + req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID)) + } + // Create a never-sampled span to prevent traces associated with exporter. + ctx, span := trace.StartSpan( // TODO: add timeouts + context.Background(), + "go.opencensus.io/exporter/stackdriver.uploadSpans", + trace.WithSampler(trace.NeverSample()), + ) + defer span.End() + span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) + + err := e.client.BatchWriteSpans(ctx, &req) + if err != nil { + span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) + e.o.handleError(err) + } +} + +// overflowLogger ensures that at most one overflow error log message is +// written every 5 seconds. +type overflowLogger struct { + mu sync.Mutex + pause bool + accum int +} + +func (o *overflowLogger) delay() { + o.pause = true + time.AfterFunc(5*time.Second, func() { + o.mu.Lock() + defer o.mu.Unlock() + switch { + case o.accum == 0: + o.pause = false + case o.accum == 1: + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.accum = 0 + o.delay() + default: + log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) + o.accum = 0 + o.delay() + } + }) +} + +func (o *overflowLogger) log() { + o.mu.Lock() + defer o.mu.Unlock() + if !o.pause { + log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") + o.delay() + } else { + o.accum++ + } +} diff --git a/exporter/stackdriver/trace_proto.go b/exporter/stackdriver/trace_proto.go new file mode 100644 index 000000000..5c2dc2d46 --- /dev/null +++ b/exporter/stackdriver/trace_proto.go @@ -0,0 +1,255 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "math" + "time" + "unicode/utf8" + + "go.opencensus.io/internal" + "go.opencensus.io/plugin/ochttp" + + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "go.opencensus.io/trace" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +const ( + maxAnnotationEventsPerSpan = 32 + maxMessageEventsPerSpan = 128 + maxAttributeStringValue = 256 + agentLabel = "g.co/agent" + + labelHTTPHost = `/http/host` + labelHTTPMethod = `/http/method` + labelHTTPStatusCode = `/http/status_code` + labelHTTPPath = `/http/path` + labelHTTPUserAgent = `/http/user_agent` +) + +// proto returns a protocol buffer representation of a SpanData. +func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span { + if s == nil { + return nil + } + + traceIDString := s.SpanContext.TraceID.String() + spanIDString := s.SpanContext.SpanID.String() + + name := s.Name + switch s.SpanKind { + case trace.SpanKindClient: + name = "Sent." + name + case trace.SpanKindServer: + name = "Recv." + name + } + + sp := &tracepb.Span{ + Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, + SpanId: spanIDString, + DisplayName: trunc(name, 128), + StartTime: timestampProto(s.StartTime), + EndTime: timestampProto(s.EndTime), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, + } + if p := s.ParentSpanID; p != (trace.SpanID{}) { + sp.ParentSpanId = p.String() + } + if s.Status.Code != 0 || s.Status.Message != "" { + sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} + } + + var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int + copyAttributes(&sp.Attributes, s.Attributes) + + as := s.Annotations + for i, a := range as { + if annotations >= maxAnnotationEventsPerSpan { + droppedAnnotationsCount = len(as) - i + break + } + annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} + copyAttributes(&annotation.Attributes, a.Attributes) + event := &tracepb.Span_TimeEvent{ + Time: timestampProto(a.Time), + Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, + } + annotations++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) + } + + if sp.Attributes == nil { + sp.Attributes = &tracepb.Span_Attributes{ + AttributeMap: make(map[string]*tracepb.AttributeValue), + } + } + sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{ + StringValue: trunc(internal.UserAgent, maxAttributeStringValue), + }, + } + + es := s.MessageEvents + for i, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + droppedMessageEventsCount = len(es) - i + break + } + messageEvents++ + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ + Time: timestampProto(e.Time), + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), + Id: e.MessageID, + UncompressedSizeBytes: e.UncompressedByteSize, + CompressedSizeBytes: e.CompressedByteSize, + }, + }, + }) + } + + if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { + if sp.TimeEvents == nil { + sp.TimeEvents = &tracepb.Span_TimeEvents{} + } + sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) + sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) + } + + if len(s.Links) > 0 { + sp.Links = &tracepb.Span_Links{} + sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) + for _, l := range s.Links { + link := &tracepb.Span_Link{ + TraceId: l.TraceID.String(), + SpanId: l.SpanID.String(), + Type: tracepb.Span_Link_Type(l.Type), + } + copyAttributes(&link.Attributes, l.Attributes) + sp.Links.Link = append(sp.Links.Link, link) + } + } + return sp +} + +// timestampProto creates a timestamp proto for a time.Time. +func timestampProto(t time.Time) *timestamppb.Timestamp { + return ×tamppb.Timestamp{ + Seconds: t.Unix(), + Nanos: int32(t.Nanosecond()), + } +} + +// copyAttributes copies a map of attributes to a proto map field. +// It creates the map if it is nil. +func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { + if len(in) == 0 { + return + } + if *out == nil { + *out = &tracepb.Span_Attributes{} + } + if (*out).AttributeMap == nil { + (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) + } + var dropped int32 + for key, value := range in { + av := attributeValue(value) + if av == nil { + continue + } + switch key { + case ochttp.PathAttribute: + (*out).AttributeMap[labelHTTPPath] = av + case ochttp.HostAttribute: + (*out).AttributeMap[labelHTTPHost] = av + case ochttp.MethodAttribute: + (*out).AttributeMap[labelHTTPMethod] = av + case ochttp.UserAgentAttribute: + (*out).AttributeMap[labelHTTPUserAgent] = av + case ochttp.StatusCodeAttribute: + (*out).AttributeMap[labelHTTPStatusCode] = av + default: + if len(key) > 128 { + dropped++ + continue + } + (*out).AttributeMap[key] = av + } + } + (*out).DroppedAttributesCount = dropped +} + +func attributeValue(v interface{}) *tracepb.AttributeValue { + switch value := v.(type) { + case bool: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, + } + case int64: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_IntValue{IntValue: value}, + } + case string: + return &tracepb.AttributeValue{ + Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, + } + } + return nil +} + +// trunc returns a TruncatableString truncated to the given limit. +func trunc(s string, limit int) *tracepb.TruncatableString { + if len(s) > limit { + b := []byte(s[:limit]) + for { + r, size := utf8.DecodeLastRune(b) + if r == utf8.RuneError && size == 1 { + b = b[:len(b)-1] + } else { + break + } + } + return &tracepb.TruncatableString{ + Value: string(b), + TruncatedByteCount: clip32(len(s) - len(b)), + } + } + return &tracepb.TruncatableString{ + Value: s, + TruncatedByteCount: 0, + } +} + +// clip32 clips an int to the range of an int32. +func clip32(x int) int32 { + if x < math.MinInt32 { + return math.MinInt32 + } + if x > math.MaxInt32 { + return math.MaxInt32 + } + return int32(x) +} diff --git a/exporter/stackdriver/trace_proto_test.go b/exporter/stackdriver/trace_proto_test.go new file mode 100644 index 000000000..2597b08b5 --- /dev/null +++ b/exporter/stackdriver/trace_proto_test.go @@ -0,0 +1,389 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "fmt" + "math/big" + "reflect" + "sort" + "strings" + "testing" + "time" + + "go.opencensus.io/internal" + + "github.com/golang/protobuf/proto" + timestamppb "github.com/golang/protobuf/ptypes/timestamp" + wrapperspb "github.com/golang/protobuf/ptypes/wrappers" + "go.opencensus.io/trace" + tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" + codepb "google.golang.org/genproto/googleapis/rpc/code" + statuspb "google.golang.org/genproto/googleapis/rpc/status" +) + +const projectID = "testproject" + +var ( + traceID = trace.TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f} + spanID = trace.SpanID{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1} +) + +type spans []*tracepb.Span + +func (s spans) Len() int { return len(s) } +func (s spans) Less(x, y int) bool { return s[x].DisplayName.Value < s[y].DisplayName.Value } +func (s spans) Swap(x, y int) { s[x], s[y] = s[y], s[x] } + +type testExporter struct { + spans []*trace.SpanData +} + +func (t *testExporter) ExportSpan(s *trace.SpanData) { + t.spans = append(t.spans, s) +} + +func TestExportTrace(t *testing.T) { + ctx := context.Background() + + var te testExporter + trace.RegisterExporter(&te) + defer trace.UnregisterExporter(&te) + + ctx, span0 := trace.StartSpanWithRemoteParent( + ctx, + "span0", + trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: 1, + }, + ) + { + ctx1, span1 := trace.StartSpan(ctx, "span1") + { + _, span2 := trace.StartSpan(ctx1, "span2") + span2.AddMessageSendEvent(0x123, 1024, 512) + span2.Annotatef(nil, "in span%d", 2) + span2.Annotate(nil, big.NewRat(2, 4).String()) + span2.AddAttributes( + trace.StringAttribute("key1", "value1"), + trace.StringAttribute("key2", "value2")) + span2.AddAttributes(trace.Int64Attribute("key1", 100)) + span2.End() + } + { + ctx3, span3 := trace.StartSpan(ctx1, "span3") + span3.Annotate(nil, "in span3") + span3.AddMessageReceiveEvent(0x456, 2048, 1536) + span3.SetStatus(trace.Status{Code: int32(codepb.Code_UNAVAILABLE)}) + span3.End() + { + _, span4 := trace.StartSpan(ctx3, "span4") + x := 42 + a1 := []trace.Attribute{trace.StringAttribute("k1", "v1")} + a2 := []trace.Attribute{trace.StringAttribute("k2", "v2")} + a3 := []trace.Attribute{trace.StringAttribute("k3", "v3")} + a4 := map[string]interface{}{"k4": "v4"} + r := big.NewRat(2, 4) + span4.Annotate(a1, r.String()) + span4.Annotatef(a2, "foo %d", x) + span4.Annotate(a3, "in span4") + span4.AddLink(trace.Link{TraceID: trace.TraceID{1, 2}, SpanID: trace.SpanID{3}, Type: trace.LinkTypeParent, Attributes: a4}) + span4.End() + } + } + span1.End() + } + span0.End() + if len(te.spans) != 5 { + t.Errorf("got %d exported spans, want 5", len(te.spans)) + } + + var spbs spans + for _, s := range te.spans { + spbs = append(spbs, protoFromSpanData(s, "testproject")) + } + sort.Sort(spbs) + + for i, want := range []string{ + spanID.String(), + spbs[0].SpanId, + spbs[1].SpanId, + spbs[1].SpanId, + spbs[3].SpanId, + } { + if got := spbs[i].ParentSpanId; got != want { + t.Errorf("span %d: got ParentSpanID %q want %q", i, got, want) + } + } + checkTime := func(ts **timestamppb.Timestamp) { + if *ts == nil { + t.Error("expected timestamp") + } + *ts = nil + } + for _, span := range spbs { + checkTime(&span.StartTime) + checkTime(&span.EndTime) + if span.TimeEvents != nil { + for _, te := range span.TimeEvents.TimeEvent { + checkTime(&te.Time) + } + } + if want := fmt.Sprintf("projects/testproject/traces/%s/spans/%s", traceID, span.SpanId); span.Name != want { + t.Errorf("got span name %q want %q", span.Name, want) + } + span.Name, span.SpanId, span.ParentSpanId = "", "", "" + } + + expectedSpans := spans{ + &tracepb.Span{ + DisplayName: trunc("span0", 128), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: false}, + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, + }, + }, + }, + &tracepb.Span{ + DisplayName: trunc("span1", 128), + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, + }, + }, + }, + &tracepb.Span{ + DisplayName: trunc("span2", 128), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "key2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("value2", 256)}}, + "key1": {Value: &tracepb.AttributeValue_IntValue{IntValue: 100}}, + agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, + }, + }, + TimeEvents: &tracepb.Span_TimeEvents{ + TimeEvent: []*tracepb.Span_TimeEvent{ + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("in span2", 256), + }, + }, + }, + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("1/2", 256), + }, + }, + }, + { + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_SENT, + Id: 0x123, + UncompressedSizeBytes: 1024, + CompressedSizeBytes: 512, + }, + }, + }, + }, + }, + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, + }, + &tracepb.Span{ + DisplayName: trunc("span3", 128), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, + }, + }, + TimeEvents: &tracepb.Span_TimeEvents{ + TimeEvent: []*tracepb.Span_TimeEvent{ + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("in span3", 256), + }, + }, + }, + { + Value: &tracepb.Span_TimeEvent_MessageEvent_{ + MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ + Type: tracepb.Span_TimeEvent_MessageEvent_RECEIVED, + Id: 0x456, + UncompressedSizeBytes: 2048, + CompressedSizeBytes: 1536, + }, + }, + }, + }, + }, + Status: &statuspb.Status{ + Code: 14, + }, + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, + }, + &tracepb.Span{ + DisplayName: trunc("span4", 128), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, + }, + }, + TimeEvents: &tracepb.Span_TimeEvents{ + TimeEvent: []*tracepb.Span_TimeEvent{ + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("1/2", 256), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "k1": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v1", 256)}}, + }, + }, + }, + }, + }, + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("foo 42", 256), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "k2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v2", 256)}}, + }, + }, + }, + }, + }, + { + Value: &tracepb.Span_TimeEvent_Annotation_{ + Annotation: &tracepb.Span_TimeEvent_Annotation{ + Description: trunc("in span4", 256), + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "k3": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v3", 256)}}, + }, + }, + }, + }, + }, + }, + }, + Links: &tracepb.Span_Links{ + Link: []*tracepb.Span_Link{ + { + TraceId: "01020000000000000000000000000000", + SpanId: "0300000000000000", + Type: tracepb.Span_Link_PARENT_LINKED_SPAN, + Attributes: &tracepb.Span_Attributes{ + AttributeMap: map[string]*tracepb.AttributeValue{ + "k4": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v4", 256)}}, + }, + }, + }, + }, + }, + SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, + }, + } + + if !reflect.DeepEqual(spbs, expectedSpans) { + var got, want []string + for _, s := range spbs { + got = append(got, proto.MarshalTextString(s)) + } + for _, s := range expectedSpans { + want = append(want, proto.MarshalTextString(s)) + } + t.Errorf("got spans:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) + } +} + +func TestEnums(t *testing.T) { + for _, test := range []struct { + x trace.LinkType + y tracepb.Span_Link_Type + }{ + {trace.LinkTypeUnspecified, tracepb.Span_Link_TYPE_UNSPECIFIED}, + {trace.LinkTypeChild, tracepb.Span_Link_CHILD_LINKED_SPAN}, + {trace.LinkTypeParent, tracepb.Span_Link_PARENT_LINKED_SPAN}, + } { + if test.x != trace.LinkType(test.y) { + t.Errorf("got link type values %d and %d, want equal", test.x, test.y) + } + } + + for _, test := range []struct { + x trace.MessageEventType + y tracepb.Span_TimeEvent_MessageEvent_Type + }{ + {trace.MessageEventTypeUnspecified, tracepb.Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED}, + {trace.MessageEventTypeSent, tracepb.Span_TimeEvent_MessageEvent_SENT}, + {trace.MessageEventTypeRecv, tracepb.Span_TimeEvent_MessageEvent_RECEIVED}, + } { + if test.x != trace.MessageEventType(test.y) { + t.Errorf("got network event type values %d and %d, want equal", test.x, test.y) + } + } +} + +func BenchmarkProto(b *testing.B) { + sd := &trace.SpanData{ + SpanContext: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + }, + Name: "foo", + StartTime: time.Now().Add(-time.Second), + EndTime: time.Now(), + Attributes: map[string]interface{}{"foo": "bar"}, + Annotations: []trace.Annotation{ + { + Time: time.Now().Add(-time.Millisecond), + Message: "hello, world", + Attributes: map[string]interface{}{"foo": "bar"}, + }, + }, + MessageEvents: []trace.MessageEvent{ + { + Time: time.Now().Add(-time.Microsecond), + EventType: 1, + MessageID: 2, + UncompressedByteSize: 4, + CompressedByteSize: 3, + }, + }, + Status: trace.Status{ + Code: 42, + Message: "failed", + }, + HasRemoteParent: true, + } + var x int + for i := 0; i < b.N; i++ { + s := protoFromSpanData(sd, `testproject`) + x += len(s.Name) + } + if x == 0 { + fmt.Println(x) + } +} diff --git a/exporter/stackdriver/trace_test.go b/exporter/stackdriver/trace_test.go new file mode 100644 index 000000000..03a24700d --- /dev/null +++ b/exporter/stackdriver/trace_test.go @@ -0,0 +1,62 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stackdriver + +import ( + "context" + "testing" + "time" + + "go.opencensus.io/trace" +) + +func TestBundling(t *testing.T) { + exporter := newTraceExporterWithClient(Options{ + ProjectID: "fakeProjectID", + BundleDelayThreshold: time.Second / 10, + BundleCountThreshold: 10, + }, nil) + + ch := make(chan []*trace.SpanData) + exporter.uploadFn = func(spans []*trace.SpanData) { + ch <- spans + } + trace.RegisterExporter(exporter) + + for i := 0; i < 35; i++ { + _, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample())) + span.End() + } + + // Read the first three bundles. + <-ch + <-ch + <-ch + + // Test that the fourth bundle isn't sent early. + select { + case <-ch: + t.Errorf("bundle sent too early") + case <-time.After(time.Second / 20): + <-ch + } + + // Test that there aren't extra bundles. + select { + case <-ch: + t.Errorf("too many bundles sent") + case <-time.After(time.Second / 5): + } +} From db4ae14dbe46ab8ee97944bd6114a7e4161ee771 Mon Sep 17 00:00:00 2001 From: Bas van Beek Date: Fri, 3 Aug 2018 23:39:21 +0800 Subject: [PATCH 043/212] Add NewClientTracer option to allow usage of httptrace package with ochttp (#848) --- plugin/ochttp/client.go | 7 ++ plugin/ochttp/span_annotator.go | 160 +++++++++++++++++++++++++++ plugin/ochttp/span_annotator_test.go | 104 +++++++++++++++++ plugin/ochttp/trace.go | 9 +- 4 files changed, 279 insertions(+), 1 deletion(-) create mode 100644 plugin/ochttp/span_annotator.go create mode 100644 plugin/ochttp/span_annotator_test.go diff --git a/plugin/ochttp/client.go b/plugin/ochttp/client.go index 55c2567e7..68faf24f5 100644 --- a/plugin/ochttp/client.go +++ b/plugin/ochttp/client.go @@ -16,6 +16,7 @@ package ochttp import ( "net/http" + "net/http/httptrace" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" @@ -51,6 +52,11 @@ type Transport struct { // name equals the URL Path. FormatSpanName func(*http.Request) string + // NewClientTrace may be set to a function allowing the current *trace.Span + // to be annotated with HTTP request event information emitted by the + // httptrace package. + NewClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace + // TODO: Implement tag propagation for HTTP. } @@ -77,6 +83,7 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { SpanKind: trace.SpanKindClient, }, formatSpanName: spanNameFormatter, + newClientTrace: t.NewClientTrace, } rt = statsTransport{base: rt} return rt.RoundTrip(req) diff --git a/plugin/ochttp/span_annotator.go b/plugin/ochttp/span_annotator.go new file mode 100644 index 000000000..128228729 --- /dev/null +++ b/plugin/ochttp/span_annotator.go @@ -0,0 +1,160 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "crypto/tls" + "net/http" + "net/http/httptrace" + "strings" + + "go.opencensus.io/trace" +) + +type spanAnnotator struct { + sp *trace.Span +} + +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates all emitted +// httptrace events on the provided Span. +func NewSpanAnnotator(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { + sa := spanAnnotator{sp: s} + + return &httptrace.ClientTrace{ + GetConn: sa.getConn, + GotConn: sa.gotConn, + PutIdleConn: sa.putIdleConn, + GotFirstResponseByte: sa.gotFirstResponseByte, + Got100Continue: sa.got100Continue, + DNSStart: sa.dnsStart, + DNSDone: sa.dnsDone, + ConnectStart: sa.connectStart, + ConnectDone: sa.connectDone, + TLSHandshakeStart: sa.tlsHandshakeStart, + TLSHandshakeDone: sa.tlsHandshakeDone, + WroteHeaders: sa.wroteHeaders, + Wait100Continue: sa.wait100Continue, + WroteRequest: sa.wroteRequest, + } +} + +func (s spanAnnotator) getConn(hostPort string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.get_connection.host_port", hostPort), + } + s.sp.Annotate(attrs, "GetConn") +} + +func (s spanAnnotator) gotConn(info httptrace.GotConnInfo) { + attrs := []trace.Attribute{ + trace.BoolAttribute("httptrace.got_connection.reused", info.Reused), + trace.BoolAttribute("httptrace.got_connection.was_idle", info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, + trace.StringAttribute("httptrace.got_connection.idle_time", info.IdleTime.String())) + } + s.sp.Annotate(attrs, "GotConn") +} + +// PutIdleConn implements a httptrace.ClientTrace hook +func (s spanAnnotator) putIdleConn(err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.put_idle_connection.error", err.Error())) + } + s.sp.Annotate(attrs, "PutIdleConn") +} + +func (s spanAnnotator) gotFirstResponseByte() { + s.sp.Annotate(nil, "GotFirstResponseByte") +} + +func (s spanAnnotator) got100Continue() { + s.sp.Annotate(nil, "Got100Continue") +} + +func (s spanAnnotator) dnsStart(info httptrace.DNSStartInfo) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_start.host", info.Host), + } + s.sp.Annotate(attrs, "DNSStart") +} + +func (s spanAnnotator) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, addr := range info.Addrs { + addrs = append(addrs, addr.String()) + } + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.dns_done.addrs", strings.Join(addrs, " , ")), + } + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.dns_done.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "DNSDone") +} + +func (s spanAnnotator) connectStart(network, addr string) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_start.network", network), + trace.StringAttribute("httptrace.connect_start.addr", addr), + } + s.sp.Annotate(attrs, "ConnectStart") +} + +func (s spanAnnotator) connectDone(network, addr string, err error) { + attrs := []trace.Attribute{ + trace.StringAttribute("httptrace.connect_done.network", network), + trace.StringAttribute("httptrace.connect_done.addr", addr), + } + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.connect_done.error", err.Error())) + } + s.sp.Annotate(attrs, "ConnectDone") +} + +func (s spanAnnotator) tlsHandshakeStart() { + s.sp.Annotate(nil, "TLSHandshakeStart") +} + +func (s spanAnnotator) tlsHandshakeDone(_ tls.ConnectionState, err error) { + var attrs []trace.Attribute + if err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.tls_handshake_done.error", err.Error())) + } + s.sp.Annotate(attrs, "TLSHandshakeDone") +} + +func (s spanAnnotator) wroteHeaders() { + s.sp.Annotate(nil, "WroteHeaders") +} + +func (s spanAnnotator) wait100Continue() { + s.sp.Annotate(nil, "Wait100Continue") +} + +func (s spanAnnotator) wroteRequest(info httptrace.WroteRequestInfo) { + var attrs []trace.Attribute + if info.Err != nil { + attrs = append(attrs, + trace.StringAttribute("httptrace.wrote_request.error", info.Err.Error())) + } + s.sp.Annotate(attrs, "WroteRequest") +} diff --git a/plugin/ochttp/span_annotator_test.go b/plugin/ochttp/span_annotator_test.go new file mode 100644 index 000000000..2642d9488 --- /dev/null +++ b/plugin/ochttp/span_annotator_test.go @@ -0,0 +1,104 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp_test + +import ( + "errors" + "net/http" + "net/http/httptest" + "strings" + "sync" + "testing" + + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/trace" +) + +func TestSpanAnnotator(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + resp.Write([]byte("Hello, world!")) + })) + defer server.Close() + + recorder := &testExporter{} + + trace.RegisterExporter(recorder) + + tr := ochttp.Transport{NewClientTrace: ochttp.NewSpanAnnotator} + + req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) + if err != nil { + t.Errorf("error creating request: %v", err) + } + + resp, err := tr.RoundTrip(req) + if err != nil { + t.Errorf("response error: %v", err) + } + if err := resp.Body.Close(); err != nil { + t.Errorf("error closing response body: %v", err) + } + if got, want := resp.StatusCode, 200; got != want { + t.Errorf("resp.StatusCode=%d; want=%d", got, want) + } + + if got, want := len(recorder.spans), 1; got != want { + t.Errorf("span count=%d; want=%d", got, want) + } + + var annotations []string + for _, annotation := range recorder.spans[0].Annotations { + annotations = append(annotations, annotation.Message) + } + + required := []string{ + "GetConn", "GotConn", "GotFirstResponseByte", "ConnectStart", + "ConnectDone", "WroteHeaders", "WroteRequest", + } + + if errs := requiredAnnotations(required, annotations); len(errs) > 0 { + for _, err := range errs { + t.Error(err) + } + } + +} + +type testExporter struct { + mu sync.Mutex + spans []*trace.SpanData +} + +func (t *testExporter) ExportSpan(s *trace.SpanData) { + t.mu.Lock() + t.spans = append(t.spans, s) + t.mu.Unlock() +} + +func requiredAnnotations(required []string, list []string) []error { + var errs []error + for _, item := range required { + var found bool + for _, v := range list { + if v == item { + found = true + } + } + if !found { + errs = append(errs, errors.New("missing expected annotation: "+item)) + } + } + return errs +} diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 81abc3c2c..980b6390f 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -17,6 +17,7 @@ package ochttp import ( "io" "net/http" + "net/http/httptrace" "go.opencensus.io/plugin/ochttp/propagation/b3" "go.opencensus.io/trace" @@ -42,6 +43,7 @@ type traceTransport struct { startOptions trace.StartOptions format propagation.HTTPFormat formatSpanName func(*http.Request) string + newClientTrace func(*http.Request, *trace.Span) *httptrace.ClientTrace } // TODO(jbd): Add message events for request and response size. @@ -57,7 +59,12 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { trace.WithSampler(t.startOptions.Sampler), trace.WithSpanKind(trace.SpanKindClient)) - req = req.WithContext(ctx) + if t.newClientTrace != nil { + req = req.WithContext(httptrace.WithClientTrace(ctx, t.newClientTrace(req, span))) + } else { + req = req.WithContext(ctx) + } + if t.format != nil { t.format.SpanContextToRequest(span.SpanContext(), req) } From 7b558058b7cc960667590e5413ef55157b06652e Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Fri, 3 Aug 2018 08:39:44 -0700 Subject: [PATCH 044/212] Remove deprecated Stackdriver exporter (#856) It's moved to contrib.go.opencensus.io/exporter/stackdriver. --- exporter/stackdriver/stackdriver.go | 148 ---- exporter/stackdriver/stackdriver_test.go | 125 ---- exporter/stackdriver/stats.go | 439 ------------ exporter/stackdriver/stats_test.go | 866 ----------------------- exporter/stackdriver/trace.go | 172 ----- exporter/stackdriver/trace_proto.go | 255 ------- exporter/stackdriver/trace_proto_test.go | 389 ---------- exporter/stackdriver/trace_test.go | 62 -- 8 files changed, 2456 deletions(-) delete mode 100644 exporter/stackdriver/stackdriver.go delete mode 100644 exporter/stackdriver/stackdriver_test.go delete mode 100644 exporter/stackdriver/stats.go delete mode 100644 exporter/stackdriver/stats_test.go delete mode 100644 exporter/stackdriver/trace.go delete mode 100644 exporter/stackdriver/trace_proto.go delete mode 100644 exporter/stackdriver/trace_proto_test.go delete mode 100644 exporter/stackdriver/trace_test.go diff --git a/exporter/stackdriver/stackdriver.go b/exporter/stackdriver/stackdriver.go deleted file mode 100644 index b4f152149..000000000 --- a/exporter/stackdriver/stackdriver.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package stackdriver has moved. -// -// Deprecated: Use contrib.go.opencensus.io/exporter/stackdriver instead. -package stackdriver // import "go.opencensus.io/exporter/stackdriver" - -import ( - "context" - "errors" - "fmt" - "log" - "time" - - traceapi "cloud.google.com/go/trace/apiv2" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "golang.org/x/oauth2/google" - "google.golang.org/api/option" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" -) - -// Options contains options for configuring the exporter. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -type Options struct { - // ProjectID is the identifier of the Stackdriver - // project the user is uploading the stats data to. - // If not set, this will default to your "Application Default Credentials". - // For details see: https://developers.google.com/accounts/docs/application-default-credentials - ProjectID string - - // OnError is the hook to be called when there is - // an error uploading the stats or tracing data. - // If no custom hook is set, errors are logged. - // Optional. - OnError func(err error) - - // MonitoringClientOptions are additional options to be passed - // to the underlying Stackdriver Monitoring API client. - // Optional. - MonitoringClientOptions []option.ClientOption - - // TraceClientOptions are additional options to be passed - // to the underlying Stackdriver Trace API client. - // Optional. - TraceClientOptions []option.ClientOption - - // BundleDelayThreshold determines the max amount of time - // the exporter can wait before uploading view data to - // the backend. - // Optional. - BundleDelayThreshold time.Duration - - // BundleCountThreshold determines how many view data events - // can be buffered before batch uploading them to the backend. - // Optional. - BundleCountThreshold int - - // Resource is an optional field that represents the Stackdriver - // MonitoredResource, a resource that can be used for monitoring. - // If no custom ResourceDescriptor is set, a default MonitoredResource - // with type global and no resource labels will be used. - // Optional. - Resource *monitoredrespb.MonitoredResource - - // MetricPrefix overrides the OpenCensus prefix of a stackdriver metric. - // Optional. - MetricPrefix string -} - -// Exporter is a stats.Exporter and trace.Exporter -// implementation that uploads data to Stackdriver. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -type Exporter struct { - traceExporter *traceExporter - statsExporter *statsExporter -} - -// NewExporter creates a new Exporter that implements both stats.Exporter and -// trace.Exporter. -// -// Deprecated: This package has been moved to: contrib.go.opencensus.io/exporter/stackdriver. -func NewExporter(o Options) (*Exporter, error) { - if o.ProjectID == "" { - creds, err := google.FindDefaultCredentials(context.Background(), traceapi.DefaultAuthScopes()...) - if err != nil { - return nil, fmt.Errorf("stackdriver: %v", err) - } - if creds.ProjectID == "" { - return nil, errors.New("stackdriver: no project found with application default credentials") - } - o.ProjectID = creds.ProjectID - } - se, err := newStatsExporter(o) - if err != nil { - return nil, err - } - te, err := newTraceExporter(o) - if err != nil { - return nil, err - } - return &Exporter{ - statsExporter: se, - traceExporter: te, - }, nil -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -func (e *Exporter) ExportView(vd *view.Data) { - e.statsExporter.ExportView(vd) -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *Exporter) ExportSpan(sd *trace.SpanData) { - e.traceExporter.ExportSpan(sd) -} - -// Flush waits for exported data to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose recent stats or spans. -func (e *Exporter) Flush() { - e.statsExporter.Flush() - e.traceExporter.Flush() -} - -func (o Options) handleError(err error) { - if o.OnError != nil { - o.OnError(err) - return - } - log.Printf("Error exporting to Stackdriver: %v", err) -} diff --git a/exporter/stackdriver/stackdriver_test.go b/exporter/stackdriver/stackdriver_test.go deleted file mode 100644 index 55cc81e93..000000000 --- a/exporter/stackdriver/stackdriver_test.go +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "io/ioutil" - "net/http" - "net/http/httptest" - "os" - "testing" - "time" - - "go.opencensus.io/internal/testpb" - "go.opencensus.io/plugin/ochttp" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" - "golang.org/x/net/context/ctxhttp" -) - -func TestExport(t *testing.T) { - projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") - if !ok { - t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") - } - - var exportErrors []error - - exporter, err := NewExporter(Options{ProjectID: projectID, OnError: func(err error) { - exportErrors = append(exportErrors, err) - }}) - if err != nil { - t.Fatal(err) - } - defer exporter.Flush() - - trace.RegisterExporter(exporter) - defer trace.UnregisterExporter(exporter) - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - _, span := trace.StartSpan(context.Background(), "custom-span") - time.Sleep(10 * time.Millisecond) - span.End() - - // Test HTTP spans - - handler := http.HandlerFunc(func(rw http.ResponseWriter, req *http.Request) { - _, backgroundSpan := trace.StartSpan(context.Background(), "BackgroundWork") - spanContext := backgroundSpan.SpanContext() - time.Sleep(10 * time.Millisecond) - backgroundSpan.End() - - _, span := trace.StartSpan(req.Context(), "Sleep") - span.AddLink(trace.Link{Type: trace.LinkTypeChild, TraceID: spanContext.TraceID, SpanID: spanContext.SpanID}) - time.Sleep(150 * time.Millisecond) // do work - span.End() - rw.Write([]byte("Hello, world!")) - }) - server := httptest.NewServer(&ochttp.Handler{Handler: handler}) - defer server.Close() - - ctx := context.Background() - client := &http.Client{ - Transport: &ochttp.Transport{}, - } - resp, err := ctxhttp.Get(ctx, client, server.URL+"/test/123?abc=xyz") - if err != nil { - t.Fatal(err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatal(err) - } - if want, got := "Hello, world!", string(body); want != got { - t.Fatalf("resp.Body = %q; want %q", want, got) - } - - // Flush twice to expose issue of exporter creating traces internally (#557) - exporter.Flush() - exporter.Flush() - - for _, err := range exportErrors { - t.Error(err) - } -} - -func TestGRPC(t *testing.T) { - projectID, ok := os.LookupEnv("STACKDRIVER_TEST_PROJECT_ID") - if !ok { - t.Skip("STACKDRIVER_TEST_PROJECT_ID not set") - } - - exporter, err := NewExporter(Options{ProjectID: projectID}) - if err != nil { - t.Fatal(err) - } - defer exporter.Flush() - - trace.RegisterExporter(exporter) - defer trace.UnregisterExporter(exporter) - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) - - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - client, done := testpb.NewTestClient(t) - defer done() - - client.Single(context.Background(), &testpb.FooRequest{SleepNanos: int64(42 * time.Millisecond)}) -} diff --git a/exporter/stackdriver/stats.go b/exporter/stackdriver/stats.go deleted file mode 100644 index 93635dac8..000000000 --- a/exporter/stackdriver/stats.go +++ /dev/null @@ -1,439 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "errors" - "fmt" - "os" - "path" - "strconv" - "strings" - "sync" - "time" - - "go.opencensus.io/internal" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "go.opencensus.io/trace" - - "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/ptypes/timestamp" - "google.golang.org/api/option" - "google.golang.org/api/support/bundler" - distributionpb "google.golang.org/genproto/googleapis/api/distribution" - labelpb "google.golang.org/genproto/googleapis/api/label" - "google.golang.org/genproto/googleapis/api/metric" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" -) - -const maxTimeSeriesPerUpload = 200 -const opencensusTaskKey = "opencensus_task" -const opencensusTaskDescription = "Opencensus task identifier" -const defaultDisplayNamePrefix = "OpenCensus" - -// statsExporter exports stats to the Stackdriver Monitoring. -type statsExporter struct { - bundler *bundler.Bundler - o Options - - createdViewsMu sync.Mutex - createdViews map[string]*metricpb.MetricDescriptor // Views already created remotely - - c *monitoring.MetricClient - taskValue string -} - -// Enforces the singleton on NewExporter per projectID per process -// lest there will be races with Stackdriver. -var ( - seenProjectsMu sync.Mutex - seenProjects = make(map[string]bool) -) - -var ( - errBlankProjectID = errors.New("expecting a non-blank ProjectID") - errSingletonExporter = errors.New("only one exporter can be created per unique ProjectID per process") -) - -// newStatsExporter returns an exporter that uploads stats data to Stackdriver Monitoring. -// Only one Stackdriver exporter should be created per ProjectID per process, any subsequent -// invocations of NewExporter with the same ProjectID will return an error. -func newStatsExporter(o Options) (*statsExporter, error) { - if strings.TrimSpace(o.ProjectID) == "" { - return nil, errBlankProjectID - } - - seenProjectsMu.Lock() - defer seenProjectsMu.Unlock() - _, seen := seenProjects[o.ProjectID] - if seen { - return nil, errSingletonExporter - } - - seenProjects[o.ProjectID] = true - - opts := append(o.MonitoringClientOptions, option.WithUserAgent(internal.UserAgent)) - client, err := monitoring.NewMetricClient(context.Background(), opts...) - if err != nil { - return nil, err - } - e := &statsExporter{ - c: client, - o: o, - createdViews: make(map[string]*metricpb.MetricDescriptor), - taskValue: getTaskValue(), - } - e.bundler = bundler.NewBundler((*view.Data)(nil), func(bundle interface{}) { - vds := bundle.([]*view.Data) - e.handleUpload(vds...) - }) - e.bundler.DelayThreshold = e.o.BundleDelayThreshold - e.bundler.BundleCountThreshold = e.o.BundleCountThreshold - return e, nil -} - -// ExportView exports to the Stackdriver Monitoring if view data -// has one or more rows. -func (e *statsExporter) ExportView(vd *view.Data) { - if len(vd.Rows) == 0 { - return - } - err := e.bundler.Add(vd, 1) - switch err { - case nil: - return - case bundler.ErrOversizedItem: - go e.handleUpload(vd) - case bundler.ErrOverflow: - e.o.handleError(errors.New("failed to upload: buffer full")) - default: - e.o.handleError(err) - } -} - -// getTaskValue returns a task label value in the format of -// "go-@". -func getTaskValue() string { - hostname, err := os.Hostname() - if err != nil { - hostname = "localhost" - } - return "go-" + strconv.Itoa(os.Getpid()) + "@" + hostname -} - -// handleUpload handles uploading a slice -// of Data, as well as error handling. -func (e *statsExporter) handleUpload(vds ...*view.Data) { - if err := e.uploadStats(vds); err != nil { - e.o.handleError(err) - } -} - -// Flush waits for exported view data to be uploaded. -// -// This is useful if your program is ending and you do not -// want to lose recent spans. -func (e *statsExporter) Flush() { - e.bundler.Flush() -} - -func (e *statsExporter) uploadStats(vds []*view.Data) error { - ctx, span := trace.StartSpan( - context.Background(), - "go.opencensus.io/exporter/stackdriver.uploadStats", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - - for _, vd := range vds { - if err := e.createMeasure(ctx, vd); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - return err - } - } - for _, req := range e.makeReq(vds, maxTimeSeriesPerUpload) { - if err := e.c.CreateTimeSeries(ctx, req); err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - // TODO(jbd): Don't fail fast here, batch errors? - return err - } - } - return nil -} - -func (e *statsExporter) makeReq(vds []*view.Data, limit int) []*monitoringpb.CreateTimeSeriesRequest { - var reqs []*monitoringpb.CreateTimeSeriesRequest - var timeSeries []*monitoringpb.TimeSeries - - resource := e.o.Resource - if resource == nil { - resource = &monitoredrespb.MonitoredResource{ - Type: "global", - } - } - - for _, vd := range vds { - for _, row := range vd.Rows { - ts := &monitoringpb.TimeSeries{ - Metric: &metricpb.Metric{ - Type: namespacedViewName(vd.View.Name), - Labels: newLabels(row.Tags, e.taskValue), - }, - Resource: resource, - Points: []*monitoringpb.Point{newPoint(vd.View, row, vd.Start, vd.End)}, - } - timeSeries = append(timeSeries, ts) - if len(timeSeries) == limit { - reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(e.o.ProjectID), - TimeSeries: timeSeries, - }) - timeSeries = []*monitoringpb.TimeSeries{} - } - } - } - if len(timeSeries) > 0 { - reqs = append(reqs, &monitoringpb.CreateTimeSeriesRequest{ - Name: monitoring.MetricProjectPath(e.o.ProjectID), - TimeSeries: timeSeries, - }) - } - return reqs -} - -// createMeasure creates a MetricDescriptor for the given view data in Stackdriver Monitoring. -// An error will be returned if there is already a metric descriptor created with the same name -// but it has a different aggregation or keys. -func (e *statsExporter) createMeasure(ctx context.Context, vd *view.Data) error { - e.createdViewsMu.Lock() - defer e.createdViewsMu.Unlock() - - m := vd.View.Measure - agg := vd.View.Aggregation - tagKeys := vd.View.TagKeys - viewName := vd.View.Name - - if md, ok := e.createdViews[viewName]; ok { - return equalMeasureAggTagKeys(md, m, agg, tagKeys) - } - - metricType := namespacedViewName(viewName) - var valueType metricpb.MetricDescriptor_ValueType - unit := m.Unit() - - switch agg.Type { - case view.AggTypeCount: - valueType = metricpb.MetricDescriptor_INT64 - // If the aggregation type is count, which counts the number of recorded measurements, the unit must be "1", - // because this view does not apply to the recorded values. - unit = stats.UnitDimensionless - case view.AggTypeSum: - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - case view.AggTypeDistribution: - valueType = metricpb.MetricDescriptor_DISTRIBUTION - case view.AggTypeLastValue: - switch m.(type) { - case *stats.Int64Measure: - valueType = metricpb.MetricDescriptor_INT64 - case *stats.Float64Measure: - valueType = metricpb.MetricDescriptor_DOUBLE - } - default: - return fmt.Errorf("unsupported aggregation type: %s", agg.Type.String()) - } - - metricKind := metricpb.MetricDescriptor_CUMULATIVE - displayNamePrefix := defaultDisplayNamePrefix - if e.o.MetricPrefix != "" { - displayNamePrefix = e.o.MetricPrefix - } - - md, err := createMetricDescriptor(ctx, e.c, &monitoringpb.CreateMetricDescriptorRequest{ - Name: fmt.Sprintf("projects/%s", e.o.ProjectID), - MetricDescriptor: &metricpb.MetricDescriptor{ - Name: fmt.Sprintf("projects/%s/metricDescriptors/%s", e.o.ProjectID, metricType), - DisplayName: path.Join(displayNamePrefix, viewName), - Description: vd.View.Description, - Unit: unit, - Type: metricType, - MetricKind: metricKind, - ValueType: valueType, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, - }) - if err != nil { - return err - } - - e.createdViews[viewName] = md - return nil -} - -func newPoint(v *view.View, row *view.Row, start, end time.Time) *monitoringpb.Point { - return &monitoringpb.Point{ - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: newTypedValue(v, row), - } -} - -func newTypedValue(vd *view.View, r *view.Row) *monitoringpb.TypedValue { - switch v := r.Data.(type) { - case *view.CountData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: v.Value, - }} - case *view.SumData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - case *view.DistributionData: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DistributionValue{ - DistributionValue: &distributionpb.Distribution{ - Count: v.Count, - Mean: v.Mean, - SumOfSquaredDeviation: v.SumOfSquaredDev, - // TODO(songya): uncomment this once Stackdriver supports min/max. - // Range: &distributionpb.Distribution_Range{ - // Min: v.Min, - // Max: v.Max, - // }, - BucketOptions: &distributionpb.Distribution_BucketOptions{ - Options: &distributionpb.Distribution_BucketOptions_ExplicitBuckets{ - ExplicitBuckets: &distributionpb.Distribution_BucketOptions_Explicit{ - Bounds: vd.Aggregation.Buckets, - }, - }, - }, - BucketCounts: v.CountPerBucket, - }, - }} - case *view.LastValueData: - switch vd.Measure.(type) { - case *stats.Int64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: int64(v.Value), - }} - case *stats.Float64Measure: - return &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: v.Value, - }} - } - } - return nil -} - -func namespacedViewName(v string) string { - return path.Join("custom.googleapis.com", "opencensus", v) -} - -func newLabels(tags []tag.Tag, taskValue string) map[string]string { - labels := make(map[string]string) - for _, tag := range tags { - labels[internal.Sanitize(tag.Key.Name())] = tag.Value - } - labels[opencensusTaskKey] = taskValue - return labels -} - -func newLabelDescriptors(keys []tag.Key) []*labelpb.LabelDescriptor { - labelDescriptors := make([]*labelpb.LabelDescriptor, len(keys)+1) - for i, key := range keys { - labelDescriptors[i] = &labelpb.LabelDescriptor{ - Key: internal.Sanitize(key.Name()), - ValueType: labelpb.LabelDescriptor_STRING, // We only use string tags - } - } - // Add a specific open census task id label. - labelDescriptors[len(keys)] = &labelpb.LabelDescriptor{ - Key: opencensusTaskKey, - ValueType: labelpb.LabelDescriptor_STRING, - Description: opencensusTaskDescription, - } - return labelDescriptors -} - -func equalMeasureAggTagKeys(md *metricpb.MetricDescriptor, m stats.Measure, agg *view.Aggregation, keys []tag.Key) error { - var aggTypeMatch bool - switch md.ValueType { - case metricpb.MetricDescriptor_INT64: - if _, ok := m.(*stats.Int64Measure); !(ok || agg.Type == view.AggTypeCount) { - return fmt.Errorf("stackdriver metric descriptor was not created as int64") - } - aggTypeMatch = agg.Type == view.AggTypeCount || agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue - case metricpb.MetricDescriptor_DOUBLE: - if _, ok := m.(*stats.Float64Measure); !ok { - return fmt.Errorf("stackdriver metric descriptor was not created as double") - } - aggTypeMatch = agg.Type == view.AggTypeSum || agg.Type == view.AggTypeLastValue - case metricpb.MetricDescriptor_DISTRIBUTION: - aggTypeMatch = agg.Type == view.AggTypeDistribution - } - - if !aggTypeMatch { - return fmt.Errorf("stackdriver metric descriptor was not created with aggregation type %T", agg.Type) - } - - if len(md.Labels) != len(keys)+1 { - return errors.New("stackdriver metric descriptor was not created with the view labels") - } - - labels := make(map[string]struct{}, len(keys)+1) - for _, k := range keys { - labels[internal.Sanitize(k.Name())] = struct{}{} - } - labels[opencensusTaskKey] = struct{}{} - - for _, k := range md.Labels { - if _, ok := labels[k.Key]; !ok { - return fmt.Errorf("stackdriver metric descriptor was not created with label %q", k) - } - } - - return nil -} - -var createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return c.CreateMetricDescriptor(ctx, mdr) -} - -var getMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.GetMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - return c.GetMetricDescriptor(ctx, mdr) -} diff --git a/exporter/stackdriver/stats_test.go b/exporter/stackdriver/stats_test.go deleted file mode 100644 index d734dc52e..000000000 --- a/exporter/stackdriver/stats_test.go +++ /dev/null @@ -1,866 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "reflect" - "testing" - "time" - - "cloud.google.com/go/monitoring/apiv3" - "github.com/golang/protobuf/ptypes/timestamp" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - "google.golang.org/api/option" - "google.golang.org/genproto/googleapis/api/label" - "google.golang.org/genproto/googleapis/api/metric" - metricpb "google.golang.org/genproto/googleapis/api/metric" - monitoredrespb "google.golang.org/genproto/googleapis/api/monitoredres" - monitoringpb "google.golang.org/genproto/googleapis/monitoring/v3" - "google.golang.org/grpc" -) - -var authOptions = []option.ClientOption{option.WithGRPCConn(&grpc.ClientConn{})} - -func TestRejectBlankProjectID(t *testing.T) { - ids := []string{"", " ", " "} - for _, projectID := range ids { - opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} - exp, err := newStatsExporter(opts) - if err == nil || exp != nil { - t.Errorf("%q ProjectID must be rejected: NewExporter() = %v err = %q", projectID, exp, err) - } - } -} - -// Ensure only one exporter per projectID per process, any -// subsequent invocations of NewExporter should fail. -func TestNewExporterSingletonPerProcess(t *testing.T) { - ids := []string{"open-census.io", "x", "fakeProjectID"} - for _, projectID := range ids { - opts := Options{ProjectID: projectID, MonitoringClientOptions: authOptions} - exp, err := newStatsExporter(opts) - if err != nil { - t.Errorf("NewExporter() projectID = %q err = %q", projectID, err) - continue - } - if exp == nil { - t.Errorf("NewExporter returned a nil Exporter") - continue - } - exp, err = newStatsExporter(opts) - if err == nil || exp != nil { - t.Errorf("NewExporter more than once should fail; exp (%v) err %v", exp, err) - } - } -} - -func TestExporter_makeReq(t *testing.T) { - m := stats.Float64("test-measure", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "testview", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - distView := &view.View{ - Name: "distview", - Description: "desc", - Measure: m, - Aggregation: view.Distribution(2, 4, 7), - } - - start := time.Now() - end := start.Add(time.Minute) - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - sum1 := &view.SumData{Value: 5.5} - sum2 := &view.SumData{Value: -11.1} - last1 := view.LastValueData{Value: 100} - last2 := view.LastValueData{Value: 200} - taskValue := getTaskValue() - - tests := []struct { - name string - projID string - vd *view.Data - want []*monitoringpb.CreateTimeSeriesRequest - }{ - { - name: "count agg + timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, count1, count2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 10, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 16, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "sum agg + timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, sum1, sum2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 5.5, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: -11.1, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "last value agg", - projID: "proj-id", - vd: newTestViewData(v, start, end, &last1, &last2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 100, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: &monitoredrespb.MonitoredResource{ - Type: "global", - }, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_DoubleValue{ - DoubleValue: 200, - }}, - }, - }, - }, - }, - }}, - }, - { - name: "dist agg + time window", - projID: "proj-id", - vd: newTestDistViewData(distView, start, end), - want: nil, //TODO: add expectation for distribution - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &statsExporter{ - o: Options{ProjectID: tt.projID}, - taskValue: taskValue, - } - resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) - if tt.want == nil { - t.Skip("Missing expectation") - } - if got, want := len(resps), len(tt.want); got != want { - t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) - } - if len(tt.want) == 0 { - return - } - if !reflect.DeepEqual(resps, tt.want) { - t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) - } - }) - } -} - -func TestExporter_makeReq_batching(t *testing.T) { - m := stats.Float64("test-measure/makeReq_batching", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "view", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - - tests := []struct { - name string - iter int - limit int - wantReqs int - wantTotal int - }{ - { - name: "4 vds; 3 limit", - iter: 2, - limit: 3, - wantReqs: 2, - wantTotal: 4, - }, - { - name: "4 vds; 4 limit", - iter: 2, - limit: 4, - wantReqs: 1, - wantTotal: 4, - }, - { - name: "4 vds; 5 limit", - iter: 2, - limit: 5, - wantReqs: 1, - wantTotal: 4, - }, - } - - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - - for _, tt := range tests { - var vds []*view.Data - for i := 0; i < tt.iter; i++ { - vds = append(vds, newTestViewData(v, time.Now(), time.Now(), count1, count2)) - } - - e := &statsExporter{} - resps := e.makeReq(vds, tt.limit) - if len(resps) != tt.wantReqs { - t.Errorf("%v: got %v; want %d requests", tt.name, resps, tt.wantReqs) - } - - var total int - for _, resp := range resps { - total += len(resp.TimeSeries) - } - if got, want := total, tt.wantTotal; got != want { - t.Errorf("%v: len(resps[...].TimeSeries) = %d; want %d", tt.name, got, want) - } - } -} - -func TestEqualAggWindowTagKeys(t *testing.T) { - key1, _ := tag.NewKey("test-key-one") - key2, _ := tag.NewKey("test-key-two") - tests := []struct { - name string - md *metricpb.MetricDescriptor - m stats.Measure - agg *view.Aggregation - keys []tag.Key - wantErr bool - }{ - { - name: "count agg with in64 measure", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - { - name: "count agg with double measure", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - { - name: "sum agg double", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.Sum(), - wantErr: false, - }, - { - name: "sum agg int64", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Sum(), - wantErr: false, - }, - { - name: "last value agg double", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.LastValue(), - wantErr: false, - }, - { - name: "last value agg int64", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.LastValue(), - wantErr: false, - }, - { - name: "distribution - mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: true, - }, - { - name: "last value - measure mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Float64("name", "", ""), - agg: view.LastValue(), - wantErr: true, - }, - { - name: "distribution agg with keys", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - Labels: []*label.LabelDescriptor{ - {Key: "test_key_one"}, - {Key: "test_key_two"}, - {Key: opencensusTaskKey}, - }, - }, - m: stats.Int64("name", "", ""), - agg: view.Distribution(), - keys: []tag.Key{key1, key2}, - wantErr: false, - }, - { - name: "distribution agg with keys -- mismatch", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DISTRIBUTION, - }, - m: stats.Int64("name", "", ""), - agg: view.Distribution(), - keys: []tag.Key{key1, key2}, - wantErr: true, - }, - { - name: "count agg with pointers", - md: &metricpb.MetricDescriptor{ - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: []*label.LabelDescriptor{{Key: opencensusTaskKey}}, - }, - m: stats.Int64("name", "", ""), - agg: view.Count(), - wantErr: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - err := equalMeasureAggTagKeys(tt.md, tt.m, tt.agg, tt.keys) - if err != nil && !tt.wantErr { - t.Errorf("equalAggTagKeys() = %q; want no error", err) - } - if err == nil && tt.wantErr { - t.Errorf("equalAggTagKeys() = %q; want error", err) - } - - }) - } -} - -func TestExporter_createMeasure(t *testing.T) { - oldCreateMetricDescriptor := createMetricDescriptor - - defer func() { - createMetricDescriptor = oldCreateMetricDescriptor - }() - - key, _ := tag.NewKey("test-key-one") - m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) - - v := &view.View{ - Name: "test_view_sum", - Description: "view_description", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Sum(), - } - - data := &view.CountData{Value: 0} - vd := newTestViewData(v, time.Now(), time.Now(), data, data) - - e := &statsExporter{ - createdViews: make(map[string]*metricpb.MetricDescriptor), - o: Options{ProjectID: "test_project"}, - } - - var createCalls int - createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - createCalls++ - if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_DOUBLE; got != want { - t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { - t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { - t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_sum"; got != want { - t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Unit, stats.UnitMilliseconds; got != want { - t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) - } - return &metric.MetricDescriptor{ - DisplayName: "OpenCensus/test_view_sum", - Description: "view_description", - Unit: stats.UnitMilliseconds, - Type: "custom.googleapis.com/opencensus/test_view_sum", - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_DOUBLE, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, nil - } - - ctx := context.Background() - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } - if count := createCalls; count != 1 { - t.Errorf("createMetricDescriptor needs to be called for once; called %v times", count) - } - if count := len(e.createdViews); count != 1 { - t.Errorf("len(e.createdViews) = %v; want 1", count) - } -} - -func TestExporter_createMeasure_CountAggregation(t *testing.T) { - oldCreateMetricDescriptor := createMetricDescriptor - - defer func() { - createMetricDescriptor = oldCreateMetricDescriptor - }() - - key, _ := tag.NewKey("test-key-one") - m := stats.Float64("test-measure/TestExporter_createMeasure", "measure desc", stats.UnitMilliseconds) - - v := &view.View{ - Name: "test_view_count", - Description: "view_description", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - - data := &view.CountData{Value: 0} - vd := newTestViewData(v, time.Now(), time.Now(), data, data) - - e := &statsExporter{ - createdViews: make(map[string]*metricpb.MetricDescriptor), - o: Options{ProjectID: "test_project"}, - } - - createMetricDescriptor = func(ctx context.Context, c *monitoring.MetricClient, mdr *monitoringpb.CreateMetricDescriptorRequest) (*metric.MetricDescriptor, error) { - if got, want := mdr.MetricDescriptor.Name, "projects/test_project/metricDescriptors/custom.googleapis.com/opencensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.Name = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Type, "custom.googleapis.com/opencensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.Type = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.ValueType, metricpb.MetricDescriptor_INT64; got != want { - t.Errorf("MetricDescriptor.ValueType = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.MetricKind, metricpb.MetricDescriptor_CUMULATIVE; got != want { - t.Errorf("MetricDescriptor.MetricKind = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Description, "view_description"; got != want { - t.Errorf("MetricDescriptor.Description = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.DisplayName, "OpenCensus/test_view_count"; got != want { - t.Errorf("MetricDescriptor.DisplayName = %q; want %q", got, want) - } - if got, want := mdr.MetricDescriptor.Unit, stats.UnitDimensionless; got != want { - t.Errorf("MetricDescriptor.Unit = %q; want %q", got, want) - } - return &metric.MetricDescriptor{ - DisplayName: "OpenCensus/test_view_sum", - Description: "view_description", - Unit: stats.UnitDimensionless, - Type: "custom.googleapis.com/opencensus/test_view_count", - MetricKind: metricpb.MetricDescriptor_CUMULATIVE, - ValueType: metricpb.MetricDescriptor_INT64, - Labels: newLabelDescriptors(vd.View.TagKeys), - }, nil - } - ctx := context.Background() - if err := e.createMeasure(ctx, vd); err != nil { - t.Errorf("Exporter.createMeasure() error = %v", err) - } -} - -func TestExporter_makeReq_withCustomMonitoredResource(t *testing.T) { - m := stats.Float64("test-measure/TestExporter_makeReq_withCustomMonitoredResource", "measure desc", "unit") - - key, err := tag.NewKey("test_key") - if err != nil { - t.Fatal(err) - } - - v := &view.View{ - Name: "testview", - Description: "desc", - TagKeys: []tag.Key{key}, - Measure: m, - Aggregation: view.Count(), - } - if err := view.Register(v); err != nil { - t.Fatal(err) - } - defer view.Unregister(v) - - start := time.Now() - end := start.Add(time.Minute) - count1 := &view.CountData{Value: 10} - count2 := &view.CountData{Value: 16} - taskValue := getTaskValue() - - resource := &monitoredrespb.MonitoredResource{ - Type: "gce_instance", - Labels: map[string]string{"instance_id": "instance", "zone": "us-west-1a"}, - } - - tests := []struct { - name string - projID string - vd *view.Data - want []*monitoringpb.CreateTimeSeriesRequest - }{ - { - name: "count agg timeline", - projID: "proj-id", - vd: newTestViewData(v, start, end, count1, count2), - want: []*monitoringpb.CreateTimeSeriesRequest{{ - Name: monitoring.MetricProjectPath("proj-id"), - TimeSeries: []*monitoringpb.TimeSeries{ - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-1", - opencensusTaskKey: taskValue, - }, - }, - Resource: resource, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 10, - }}, - }, - }, - }, - { - Metric: &metricpb.Metric{ - Type: "custom.googleapis.com/opencensus/testview", - Labels: map[string]string{ - "test_key": "test-value-2", - opencensusTaskKey: taskValue, - }, - }, - Resource: resource, - Points: []*monitoringpb.Point{ - { - Interval: &monitoringpb.TimeInterval{ - StartTime: ×tamp.Timestamp{ - Seconds: start.Unix(), - Nanos: int32(start.Nanosecond()), - }, - EndTime: ×tamp.Timestamp{ - Seconds: end.Unix(), - Nanos: int32(end.Nanosecond()), - }, - }, - Value: &monitoringpb.TypedValue{Value: &monitoringpb.TypedValue_Int64Value{ - Int64Value: 16, - }}, - }, - }, - }, - }, - }}, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - e := &statsExporter{ - o: Options{ProjectID: tt.projID, Resource: resource}, - taskValue: taskValue, - } - resps := e.makeReq([]*view.Data{tt.vd}, maxTimeSeriesPerUpload) - if got, want := len(resps), len(tt.want); got != want { - t.Fatalf("%v: Exporter.makeReq() returned %d responses; want %d", tt.name, got, want) - } - if len(tt.want) == 0 { - return - } - if !reflect.DeepEqual(resps, tt.want) { - t.Errorf("%v: Exporter.makeReq() = %v, want %v", tt.name, resps, tt.want) - } - }) - } -} - -func newTestViewData(v *view.View, start, end time.Time, data1, data2 view.AggregationData) *view.Data { - key, _ := tag.NewKey("test-key") - tag1 := tag.Tag{Key: key, Value: "test-value-1"} - tag2 := tag.Tag{Key: key, Value: "test-value-2"} - return &view.Data{ - View: v, - Rows: []*view.Row{ - { - Tags: []tag.Tag{tag1}, - Data: data1, - }, - { - Tags: []tag.Tag{tag2}, - Data: data2, - }, - }, - Start: start, - End: end, - } -} - -func newTestDistViewData(v *view.View, start, end time.Time) *view.Data { - return &view.Data{ - View: v, - Rows: []*view.Row{ - {Data: &view.DistributionData{ - Count: 5, - Min: 1, - Max: 7, - Mean: 3, - SumOfSquaredDev: 1.5, - CountPerBucket: []int64{2, 2, 1}, - }}, - }, - Start: start, - End: end, - } -} diff --git a/exporter/stackdriver/trace.go b/exporter/stackdriver/trace.go deleted file mode 100644 index e3fd6bab7..000000000 --- a/exporter/stackdriver/trace.go +++ /dev/null @@ -1,172 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "log" - "sync" - "time" - - tracingclient "cloud.google.com/go/trace/apiv2" - "go.opencensus.io/trace" - "google.golang.org/api/support/bundler" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" -) - -// traceExporter is an implementation of trace.Exporter that uploads spans to -// Stackdriver. -// -type traceExporter struct { - o Options - projectID string - bundler *bundler.Bundler - // uploadFn defaults to uploadSpans; it can be replaced for tests. - uploadFn func(spans []*trace.SpanData) - overflowLogger - client *tracingclient.Client -} - -var _ trace.Exporter = (*traceExporter)(nil) - -func newTraceExporter(o Options) (*traceExporter, error) { - client, err := tracingclient.NewClient(context.Background(), o.TraceClientOptions...) - if err != nil { - return nil, fmt.Errorf("stackdriver: couldn't initialize trace client: %v", err) - } - return newTraceExporterWithClient(o, client), nil -} - -func newTraceExporterWithClient(o Options, c *tracingclient.Client) *traceExporter { - e := &traceExporter{ - projectID: o.ProjectID, - client: c, - o: o, - } - bundler := bundler.NewBundler((*trace.SpanData)(nil), func(bundle interface{}) { - e.uploadFn(bundle.([]*trace.SpanData)) - }) - if o.BundleDelayThreshold > 0 { - bundler.DelayThreshold = o.BundleDelayThreshold - } else { - bundler.DelayThreshold = 2 * time.Second - } - if o.BundleCountThreshold > 0 { - bundler.BundleCountThreshold = o.BundleCountThreshold - } else { - bundler.BundleCountThreshold = 50 - } - // The measured "bytes" are not really bytes, see exportReceiver. - bundler.BundleByteThreshold = bundler.BundleCountThreshold * 200 - bundler.BundleByteLimit = bundler.BundleCountThreshold * 1000 - bundler.BufferedByteLimit = bundler.BundleCountThreshold * 2000 - - e.bundler = bundler - e.uploadFn = e.uploadSpans - return e -} - -// ExportSpan exports a SpanData to Stackdriver Trace. -func (e *traceExporter) ExportSpan(s *trace.SpanData) { - // n is a length heuristic. - n := 1 - n += len(s.Attributes) - n += len(s.Annotations) - n += len(s.MessageEvents) - err := e.bundler.Add(s, n) - switch err { - case nil: - return - case bundler.ErrOversizedItem: - go e.uploadFn([]*trace.SpanData{s}) - case bundler.ErrOverflow: - e.overflowLogger.log() - default: - e.o.handleError(err) - } -} - -// Flush waits for exported trace spans to be uploaded. -// -// This is useful if your program is ending and you do not want to lose recent -// spans. -func (e *traceExporter) Flush() { - e.bundler.Flush() -} - -// uploadSpans uploads a set of spans to Stackdriver. -func (e *traceExporter) uploadSpans(spans []*trace.SpanData) { - req := tracepb.BatchWriteSpansRequest{ - Name: "projects/" + e.projectID, - Spans: make([]*tracepb.Span, 0, len(spans)), - } - for _, span := range spans { - req.Spans = append(req.Spans, protoFromSpanData(span, e.projectID)) - } - // Create a never-sampled span to prevent traces associated with exporter. - ctx, span := trace.StartSpan( // TODO: add timeouts - context.Background(), - "go.opencensus.io/exporter/stackdriver.uploadSpans", - trace.WithSampler(trace.NeverSample()), - ) - defer span.End() - span.AddAttributes(trace.Int64Attribute("num_spans", int64(len(spans)))) - - err := e.client.BatchWriteSpans(ctx, &req) - if err != nil { - span.SetStatus(trace.Status{Code: trace.StatusCodeUnknown, Message: err.Error()}) - e.o.handleError(err) - } -} - -// overflowLogger ensures that at most one overflow error log message is -// written every 5 seconds. -type overflowLogger struct { - mu sync.Mutex - pause bool - accum int -} - -func (o *overflowLogger) delay() { - o.pause = true - time.AfterFunc(5*time.Second, func() { - o.mu.Lock() - defer o.mu.Unlock() - switch { - case o.accum == 0: - o.pause = false - case o.accum == 1: - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.accum = 0 - o.delay() - default: - log.Printf("OpenCensus Stackdriver exporter: failed to upload %d spans: buffer full", o.accum) - o.accum = 0 - o.delay() - } - }) -} - -func (o *overflowLogger) log() { - o.mu.Lock() - defer o.mu.Unlock() - if !o.pause { - log.Println("OpenCensus Stackdriver exporter: failed to upload span: buffer full") - o.delay() - } else { - o.accum++ - } -} diff --git a/exporter/stackdriver/trace_proto.go b/exporter/stackdriver/trace_proto.go deleted file mode 100644 index 5c2dc2d46..000000000 --- a/exporter/stackdriver/trace_proto.go +++ /dev/null @@ -1,255 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "math" - "time" - "unicode/utf8" - - "go.opencensus.io/internal" - "go.opencensus.io/plugin/ochttp" - - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "go.opencensus.io/trace" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -const ( - maxAnnotationEventsPerSpan = 32 - maxMessageEventsPerSpan = 128 - maxAttributeStringValue = 256 - agentLabel = "g.co/agent" - - labelHTTPHost = `/http/host` - labelHTTPMethod = `/http/method` - labelHTTPStatusCode = `/http/status_code` - labelHTTPPath = `/http/path` - labelHTTPUserAgent = `/http/user_agent` -) - -// proto returns a protocol buffer representation of a SpanData. -func protoFromSpanData(s *trace.SpanData, projectID string) *tracepb.Span { - if s == nil { - return nil - } - - traceIDString := s.SpanContext.TraceID.String() - spanIDString := s.SpanContext.SpanID.String() - - name := s.Name - switch s.SpanKind { - case trace.SpanKindClient: - name = "Sent." + name - case trace.SpanKindServer: - name = "Recv." + name - } - - sp := &tracepb.Span{ - Name: "projects/" + projectID + "/traces/" + traceIDString + "/spans/" + spanIDString, - SpanId: spanIDString, - DisplayName: trunc(name, 128), - StartTime: timestampProto(s.StartTime), - EndTime: timestampProto(s.EndTime), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: !s.HasRemoteParent}, - } - if p := s.ParentSpanID; p != (trace.SpanID{}) { - sp.ParentSpanId = p.String() - } - if s.Status.Code != 0 || s.Status.Message != "" { - sp.Status = &statuspb.Status{Code: s.Status.Code, Message: s.Status.Message} - } - - var annotations, droppedAnnotationsCount, messageEvents, droppedMessageEventsCount int - copyAttributes(&sp.Attributes, s.Attributes) - - as := s.Annotations - for i, a := range as { - if annotations >= maxAnnotationEventsPerSpan { - droppedAnnotationsCount = len(as) - i - break - } - annotation := &tracepb.Span_TimeEvent_Annotation{Description: trunc(a.Message, maxAttributeStringValue)} - copyAttributes(&annotation.Attributes, a.Attributes) - event := &tracepb.Span_TimeEvent{ - Time: timestampProto(a.Time), - Value: &tracepb.Span_TimeEvent_Annotation_{Annotation: annotation}, - } - annotations++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, event) - } - - if sp.Attributes == nil { - sp.Attributes = &tracepb.Span_Attributes{ - AttributeMap: make(map[string]*tracepb.AttributeValue), - } - } - sp.Attributes.AttributeMap[agentLabel] = &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{ - StringValue: trunc(internal.UserAgent, maxAttributeStringValue), - }, - } - - es := s.MessageEvents - for i, e := range es { - if messageEvents >= maxMessageEventsPerSpan { - droppedMessageEventsCount = len(es) - i - break - } - messageEvents++ - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.TimeEvent = append(sp.TimeEvents.TimeEvent, &tracepb.Span_TimeEvent{ - Time: timestampProto(e.Time), - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_Type(e.EventType), - Id: e.MessageID, - UncompressedSizeBytes: e.UncompressedByteSize, - CompressedSizeBytes: e.CompressedByteSize, - }, - }, - }) - } - - if droppedAnnotationsCount != 0 || droppedMessageEventsCount != 0 { - if sp.TimeEvents == nil { - sp.TimeEvents = &tracepb.Span_TimeEvents{} - } - sp.TimeEvents.DroppedAnnotationsCount = clip32(droppedAnnotationsCount) - sp.TimeEvents.DroppedMessageEventsCount = clip32(droppedMessageEventsCount) - } - - if len(s.Links) > 0 { - sp.Links = &tracepb.Span_Links{} - sp.Links.Link = make([]*tracepb.Span_Link, 0, len(s.Links)) - for _, l := range s.Links { - link := &tracepb.Span_Link{ - TraceId: l.TraceID.String(), - SpanId: l.SpanID.String(), - Type: tracepb.Span_Link_Type(l.Type), - } - copyAttributes(&link.Attributes, l.Attributes) - sp.Links.Link = append(sp.Links.Link, link) - } - } - return sp -} - -// timestampProto creates a timestamp proto for a time.Time. -func timestampProto(t time.Time) *timestamppb.Timestamp { - return ×tamppb.Timestamp{ - Seconds: t.Unix(), - Nanos: int32(t.Nanosecond()), - } -} - -// copyAttributes copies a map of attributes to a proto map field. -// It creates the map if it is nil. -func copyAttributes(out **tracepb.Span_Attributes, in map[string]interface{}) { - if len(in) == 0 { - return - } - if *out == nil { - *out = &tracepb.Span_Attributes{} - } - if (*out).AttributeMap == nil { - (*out).AttributeMap = make(map[string]*tracepb.AttributeValue) - } - var dropped int32 - for key, value := range in { - av := attributeValue(value) - if av == nil { - continue - } - switch key { - case ochttp.PathAttribute: - (*out).AttributeMap[labelHTTPPath] = av - case ochttp.HostAttribute: - (*out).AttributeMap[labelHTTPHost] = av - case ochttp.MethodAttribute: - (*out).AttributeMap[labelHTTPMethod] = av - case ochttp.UserAgentAttribute: - (*out).AttributeMap[labelHTTPUserAgent] = av - case ochttp.StatusCodeAttribute: - (*out).AttributeMap[labelHTTPStatusCode] = av - default: - if len(key) > 128 { - dropped++ - continue - } - (*out).AttributeMap[key] = av - } - } - (*out).DroppedAttributesCount = dropped -} - -func attributeValue(v interface{}) *tracepb.AttributeValue { - switch value := v.(type) { - case bool: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_BoolValue{BoolValue: value}, - } - case int64: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_IntValue{IntValue: value}, - } - case string: - return &tracepb.AttributeValue{ - Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(value, maxAttributeStringValue)}, - } - } - return nil -} - -// trunc returns a TruncatableString truncated to the given limit. -func trunc(s string, limit int) *tracepb.TruncatableString { - if len(s) > limit { - b := []byte(s[:limit]) - for { - r, size := utf8.DecodeLastRune(b) - if r == utf8.RuneError && size == 1 { - b = b[:len(b)-1] - } else { - break - } - } - return &tracepb.TruncatableString{ - Value: string(b), - TruncatedByteCount: clip32(len(s) - len(b)), - } - } - return &tracepb.TruncatableString{ - Value: s, - TruncatedByteCount: 0, - } -} - -// clip32 clips an int to the range of an int32. -func clip32(x int) int32 { - if x < math.MinInt32 { - return math.MinInt32 - } - if x > math.MaxInt32 { - return math.MaxInt32 - } - return int32(x) -} diff --git a/exporter/stackdriver/trace_proto_test.go b/exporter/stackdriver/trace_proto_test.go deleted file mode 100644 index 2597b08b5..000000000 --- a/exporter/stackdriver/trace_proto_test.go +++ /dev/null @@ -1,389 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "fmt" - "math/big" - "reflect" - "sort" - "strings" - "testing" - "time" - - "go.opencensus.io/internal" - - "github.com/golang/protobuf/proto" - timestamppb "github.com/golang/protobuf/ptypes/timestamp" - wrapperspb "github.com/golang/protobuf/ptypes/wrappers" - "go.opencensus.io/trace" - tracepb "google.golang.org/genproto/googleapis/devtools/cloudtrace/v2" - codepb "google.golang.org/genproto/googleapis/rpc/code" - statuspb "google.golang.org/genproto/googleapis/rpc/status" -) - -const projectID = "testproject" - -var ( - traceID = trace.TraceID{0x40, 0x41, 0x42, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x4b, 0x4c, 0x4d, 0x4e, 0x4f} - spanID = trace.SpanID{0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1, 0x1} -) - -type spans []*tracepb.Span - -func (s spans) Len() int { return len(s) } -func (s spans) Less(x, y int) bool { return s[x].DisplayName.Value < s[y].DisplayName.Value } -func (s spans) Swap(x, y int) { s[x], s[y] = s[y], s[x] } - -type testExporter struct { - spans []*trace.SpanData -} - -func (t *testExporter) ExportSpan(s *trace.SpanData) { - t.spans = append(t.spans, s) -} - -func TestExportTrace(t *testing.T) { - ctx := context.Background() - - var te testExporter - trace.RegisterExporter(&te) - defer trace.UnregisterExporter(&te) - - ctx, span0 := trace.StartSpanWithRemoteParent( - ctx, - "span0", - trace.SpanContext{ - TraceID: traceID, - SpanID: spanID, - TraceOptions: 1, - }, - ) - { - ctx1, span1 := trace.StartSpan(ctx, "span1") - { - _, span2 := trace.StartSpan(ctx1, "span2") - span2.AddMessageSendEvent(0x123, 1024, 512) - span2.Annotatef(nil, "in span%d", 2) - span2.Annotate(nil, big.NewRat(2, 4).String()) - span2.AddAttributes( - trace.StringAttribute("key1", "value1"), - trace.StringAttribute("key2", "value2")) - span2.AddAttributes(trace.Int64Attribute("key1", 100)) - span2.End() - } - { - ctx3, span3 := trace.StartSpan(ctx1, "span3") - span3.Annotate(nil, "in span3") - span3.AddMessageReceiveEvent(0x456, 2048, 1536) - span3.SetStatus(trace.Status{Code: int32(codepb.Code_UNAVAILABLE)}) - span3.End() - { - _, span4 := trace.StartSpan(ctx3, "span4") - x := 42 - a1 := []trace.Attribute{trace.StringAttribute("k1", "v1")} - a2 := []trace.Attribute{trace.StringAttribute("k2", "v2")} - a3 := []trace.Attribute{trace.StringAttribute("k3", "v3")} - a4 := map[string]interface{}{"k4": "v4"} - r := big.NewRat(2, 4) - span4.Annotate(a1, r.String()) - span4.Annotatef(a2, "foo %d", x) - span4.Annotate(a3, "in span4") - span4.AddLink(trace.Link{TraceID: trace.TraceID{1, 2}, SpanID: trace.SpanID{3}, Type: trace.LinkTypeParent, Attributes: a4}) - span4.End() - } - } - span1.End() - } - span0.End() - if len(te.spans) != 5 { - t.Errorf("got %d exported spans, want 5", len(te.spans)) - } - - var spbs spans - for _, s := range te.spans { - spbs = append(spbs, protoFromSpanData(s, "testproject")) - } - sort.Sort(spbs) - - for i, want := range []string{ - spanID.String(), - spbs[0].SpanId, - spbs[1].SpanId, - spbs[1].SpanId, - spbs[3].SpanId, - } { - if got := spbs[i].ParentSpanId; got != want { - t.Errorf("span %d: got ParentSpanID %q want %q", i, got, want) - } - } - checkTime := func(ts **timestamppb.Timestamp) { - if *ts == nil { - t.Error("expected timestamp") - } - *ts = nil - } - for _, span := range spbs { - checkTime(&span.StartTime) - checkTime(&span.EndTime) - if span.TimeEvents != nil { - for _, te := range span.TimeEvents.TimeEvent { - checkTime(&te.Time) - } - } - if want := fmt.Sprintf("projects/testproject/traces/%s/spans/%s", traceID, span.SpanId); span.Name != want { - t.Errorf("got span name %q want %q", span.Name, want) - } - span.Name, span.SpanId, span.ParentSpanId = "", "", "" - } - - expectedSpans := spans{ - &tracepb.Span{ - DisplayName: trunc("span0", 128), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: false}, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - }, - &tracepb.Span{ - DisplayName: trunc("span1", 128), - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - }, - &tracepb.Span{ - DisplayName: trunc("span2", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "key2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("value2", 256)}}, - "key1": {Value: &tracepb.AttributeValue_IntValue{IntValue: 100}}, - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span2", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("1/2", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_SENT, - Id: 0x123, - UncompressedSizeBytes: 1024, - CompressedSizeBytes: 512, - }, - }, - }, - }, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - &tracepb.Span{ - DisplayName: trunc("span3", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span3", 256), - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_MessageEvent_{ - MessageEvent: &tracepb.Span_TimeEvent_MessageEvent{ - Type: tracepb.Span_TimeEvent_MessageEvent_RECEIVED, - Id: 0x456, - UncompressedSizeBytes: 2048, - CompressedSizeBytes: 1536, - }, - }, - }, - }, - }, - Status: &statuspb.Status{ - Code: 14, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - &tracepb.Span{ - DisplayName: trunc("span4", 128), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - agentLabel: {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc(internal.UserAgent, len(internal.UserAgent))}}, - }, - }, - TimeEvents: &tracepb.Span_TimeEvents{ - TimeEvent: []*tracepb.Span_TimeEvent{ - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("1/2", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k1": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v1", 256)}}, - }, - }, - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("foo 42", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k2": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v2", 256)}}, - }, - }, - }, - }, - }, - { - Value: &tracepb.Span_TimeEvent_Annotation_{ - Annotation: &tracepb.Span_TimeEvent_Annotation{ - Description: trunc("in span4", 256), - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k3": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v3", 256)}}, - }, - }, - }, - }, - }, - }, - }, - Links: &tracepb.Span_Links{ - Link: []*tracepb.Span_Link{ - { - TraceId: "01020000000000000000000000000000", - SpanId: "0300000000000000", - Type: tracepb.Span_Link_PARENT_LINKED_SPAN, - Attributes: &tracepb.Span_Attributes{ - AttributeMap: map[string]*tracepb.AttributeValue{ - "k4": {Value: &tracepb.AttributeValue_StringValue{StringValue: trunc("v4", 256)}}, - }, - }, - }, - }, - }, - SameProcessAsParentSpan: &wrapperspb.BoolValue{Value: true}, - }, - } - - if !reflect.DeepEqual(spbs, expectedSpans) { - var got, want []string - for _, s := range spbs { - got = append(got, proto.MarshalTextString(s)) - } - for _, s := range expectedSpans { - want = append(want, proto.MarshalTextString(s)) - } - t.Errorf("got spans:\n%s\nwant:\n%s", strings.Join(got, "\n"), strings.Join(want, "\n")) - } -} - -func TestEnums(t *testing.T) { - for _, test := range []struct { - x trace.LinkType - y tracepb.Span_Link_Type - }{ - {trace.LinkTypeUnspecified, tracepb.Span_Link_TYPE_UNSPECIFIED}, - {trace.LinkTypeChild, tracepb.Span_Link_CHILD_LINKED_SPAN}, - {trace.LinkTypeParent, tracepb.Span_Link_PARENT_LINKED_SPAN}, - } { - if test.x != trace.LinkType(test.y) { - t.Errorf("got link type values %d and %d, want equal", test.x, test.y) - } - } - - for _, test := range []struct { - x trace.MessageEventType - y tracepb.Span_TimeEvent_MessageEvent_Type - }{ - {trace.MessageEventTypeUnspecified, tracepb.Span_TimeEvent_MessageEvent_TYPE_UNSPECIFIED}, - {trace.MessageEventTypeSent, tracepb.Span_TimeEvent_MessageEvent_SENT}, - {trace.MessageEventTypeRecv, tracepb.Span_TimeEvent_MessageEvent_RECEIVED}, - } { - if test.x != trace.MessageEventType(test.y) { - t.Errorf("got network event type values %d and %d, want equal", test.x, test.y) - } - } -} - -func BenchmarkProto(b *testing.B) { - sd := &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: traceID, - SpanID: spanID, - }, - Name: "foo", - StartTime: time.Now().Add(-time.Second), - EndTime: time.Now(), - Attributes: map[string]interface{}{"foo": "bar"}, - Annotations: []trace.Annotation{ - { - Time: time.Now().Add(-time.Millisecond), - Message: "hello, world", - Attributes: map[string]interface{}{"foo": "bar"}, - }, - }, - MessageEvents: []trace.MessageEvent{ - { - Time: time.Now().Add(-time.Microsecond), - EventType: 1, - MessageID: 2, - UncompressedByteSize: 4, - CompressedByteSize: 3, - }, - }, - Status: trace.Status{ - Code: 42, - Message: "failed", - }, - HasRemoteParent: true, - } - var x int - for i := 0; i < b.N; i++ { - s := protoFromSpanData(sd, `testproject`) - x += len(s.Name) - } - if x == 0 { - fmt.Println(x) - } -} diff --git a/exporter/stackdriver/trace_test.go b/exporter/stackdriver/trace_test.go deleted file mode 100644 index 03a24700d..000000000 --- a/exporter/stackdriver/trace_test.go +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package stackdriver - -import ( - "context" - "testing" - "time" - - "go.opencensus.io/trace" -) - -func TestBundling(t *testing.T) { - exporter := newTraceExporterWithClient(Options{ - ProjectID: "fakeProjectID", - BundleDelayThreshold: time.Second / 10, - BundleCountThreshold: 10, - }, nil) - - ch := make(chan []*trace.SpanData) - exporter.uploadFn = func(spans []*trace.SpanData) { - ch <- spans - } - trace.RegisterExporter(exporter) - - for i := 0; i < 35; i++ { - _, span := trace.StartSpan(context.Background(), "span", trace.WithSampler(trace.AlwaysSample())) - span.End() - } - - // Read the first three bundles. - <-ch - <-ch - <-ch - - // Test that the fourth bundle isn't sent early. - select { - case <-ch: - t.Errorf("bundle sent too early") - case <-time.After(time.Second / 20): - <-ch - } - - // Test that there aren't extra bundles. - select { - case <-ch: - t.Errorf("too many bundles sent") - case <-time.After(time.Second / 5): - } -} From 457d67e3f41e6e1861cf86f5bd9390e46272ecb0 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 13 Aug 2018 14:57:08 -0700 Subject: [PATCH 045/212] Bump version string to 0.16.0 (#863) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index 8b67867d8..3b4e0c65a 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.15.0" + return "0.16.0" } From 1789eaf7638d6f16d055afd35654928c04663cfc Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Thu, 16 Aug 2018 15:37:06 -0700 Subject: [PATCH 046/212] trace: reorder defaultIDGenerator fields for 8byte alignment (#866) Fixes #865 This bug manifested as a consequence of https://golang.org/pkg/sync/atomic/#pkg-note-BUG and was exposed by PR #851 which switched to atomically incrementing defaultIDGenerator.nextSpanID The organization of the fields was misaligned on 32-bit machines because the field `traceIDRand *rand.Rand`, a pointer was included as the second field of the struct. This is because the size of a pointer on 32-bit machines is 4 bytes, hence after the second field, we'll have offset from 12 bytes and for atomic access of *int64 fields, which are accessed in 4 byte increments by atomic operations, on 32-bit machines, their addresses are on non-8-byte divisible alignments i.e. * nextSpanID -- [28, 36] * spanIDInc -- [36, 44] but on 64-bit machines, sizeof(pointer) = 8 bytes hence their addresses are on 8-byte divisible alignments i.e. * nextSpanID -- [32, 40] * spanIDInc -- [40, 48] Thus the required reorganization but making the pointer the last field fixes the problem for both 32-bit and 64-bit. This fix can be verified by prefixing `GOARCH=386` before running code or tests. --- trace/trace.go | 16 +++++++++++++--- 1 file changed, 13 insertions(+), 3 deletions(-) diff --git a/trace/trace.go b/trace/trace.go index e0d87400b..887e90be1 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -470,10 +470,20 @@ func init() { type defaultIDGenerator struct { sync.Mutex - traceIDRand *rand.Rand + + // Please keep these as the first fields + // so that these 8 byte fields will be aligned on addresses + // divisible by 8, on both 32-bit and 64-bit machines when + // performing atomic increments and accesses. + // See: + // * https://github.com/census-instrumentation/opencensus-go/issues/587 + // * https://github.com/census-instrumentation/opencensus-go/issues/865 + // * https://golang.org/pkg/sync/atomic/#pkg-note-BUG + nextSpanID uint64 + spanIDInc uint64 + traceIDAdd [2]uint64 - nextSpanID uint64 - spanIDInc uint64 + traceIDRand *rand.Rand } // NewSpanID returns a non-zero span ID from a randomly-chosen sequence. From e9e93c613ef59c28dc29941ebd3b5a680f90286b Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 20 Aug 2018 17:29:24 -0700 Subject: [PATCH 047/212] Document that ExportView is not called concurrently (#864) This allows Exporter implementations to avoid unnecessary internal synchronization. --- stats/view/export.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/stats/view/export.go b/stats/view/export.go index ffd0d1ac7..7cb59718f 100644 --- a/stats/view/export.go +++ b/stats/view/export.go @@ -27,6 +27,9 @@ var ( // Exporter takes a significant amount of time to // process a Data, that work should be done on another goroutine. // +// It is safe to assume that ExportView will not be called concurrently from +// multiple goroutines. +// // The Data should not be modified. type Exporter interface { ExportView(viewData *Data) From 92c730d476fbbdaa3003474c3ef85ecbb64a9993 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 21 Aug 2018 15:27:13 -0700 Subject: [PATCH 048/212] Align dependency metadata with imports. Gopkg.lock was out of date. --- Gopkg.lock | 185 ++++++++++++++++++++++++----------------------------- Gopkg.toml | 12 ---- 2 files changed, 85 insertions(+), 112 deletions(-) diff --git a/Gopkg.lock b/Gopkg.lock index 026f01334..3be12ac8f 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -1,58 +1,47 @@ # This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. -[[projects]] - name = "cloud.google.com/go" - packages = [ - "compute/metadata", - "internal/version", - "monitoring/apiv3", - "trace/apiv2" - ] - revision = "0fd7230b2a7505833d5f69b75cbd6c9582401479" - version = "v0.23.0" - [[projects]] branch = "master" + digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" name = "git.apache.org/thrift.git" packages = ["lib/go/thrift"] - revision = "88591e32e710a0524327153c8b629d5b461e35e0" + pruneopts = "UT" + revision = "6e67faa92827ece022380b211c2caaadd6145bf5" source = "github.com/apache/thrift" [[projects]] branch = "master" + digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" name = "github.com/beorn7/perks" packages = ["quantile"] + pruneopts = "UT" revision = "3a771d992973f24aa725d07868b467d1ddfceafb" [[projects]] + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" name = "github.com/golang/protobuf" packages = [ "proto", - "protoc-gen-go/descriptor", "ptypes", "ptypes/any", "ptypes/duration", - "ptypes/empty", "ptypes/timestamp", - "ptypes/wrappers" ] - revision = "b4deda0973fb4c70b50d226b1af49f3da59f5265" - version = "v1.1.0" - -[[projects]] - name = "github.com/googleapis/gax-go" - packages = ["."] - revision = "317e0006254c44a0ac427cc52a0e083ff0b9622f" - version = "v2.0.0" + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" [[projects]] + digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" name = "github.com/matttproud/golang_protobuf_extensions" packages = ["pbutil"] - revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" - version = "v1.0.0" + pruneopts = "UT" + revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" + version = "v1.0.1" [[projects]] + digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" name = "github.com/openzipkin/zipkin-go" packages = [ ".", @@ -60,81 +49,90 @@ "model", "propagation", "reporter", - "reporter/http" + "reporter/http", ] - revision = "f197ec29e729f226d23370ea60f0e49b8f44ccf4" - version = "v0.1.0" + pruneopts = "UT" + revision = "d455a5674050831c1e187644faa4046d653433c2" + version = "v0.1.1" [[projects]] + digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" name = "github.com/prometheus/client_golang" packages = [ "prometheus", - "prometheus/promhttp" + "prometheus/promhttp", ] + pruneopts = "UT" revision = "c5b7fccd204277076155f10851dad72b76a49317" version = "v0.8.0" [[projects]] branch = "master" + digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" name = "github.com/prometheus/client_model" packages = ["go"] - revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + pruneopts = "UT" + revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" [[projects]] branch = "master" + digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" name = "github.com/prometheus/common" packages = [ "expfmt", "internal/bitbucket.org/ww/goautoneg", - "model" + "model", ] - revision = "7600349dcfe1abd18d72d3a1770870d9800a7801" + pruneopts = "UT" + revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" [[projects]] branch = "master" + digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" name = "github.com/prometheus/procfs" packages = [ ".", "internal/util", "nfs", - "xfs" + "xfs", ] - revision = "8b1c2da0d56deffdbb9e48d4414b4e674bd8083e" + pruneopts = "UT" + revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" [[projects]] branch = "master" + digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" name = "golang.org/x/net" packages = [ "context", - "context/ctxhttp", "http/httpguts", "http2", "http2/hpack", "idna", "internal/timeseries", - "trace" + "trace", ] - revision = "9ef9f5bb98a1fdc41f8cf6c250a4404b4085e389" - -[[projects]] - branch = "master" - name = "golang.org/x/oauth2" - packages = [ - ".", - "google", - "internal", - "jws", - "jwt" - ] - revision = "dd5f5d8e78ce062a4aa881dff95a94f2a0fd405a" + pruneopts = "UT" + revision = "922f4815f713f213882e8ef45e0d315b164d705c" [[projects]] branch = "master" + digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" name = "golang.org/x/sync" packages = ["semaphore"] + pruneopts = "UT" revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" [[projects]] + branch = "master" + digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" + +[[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" name = "golang.org/x/text" packages = [ "collate", @@ -150,79 +148,48 @@ "unicode/bidi", "unicode/cldr", "unicode/norm", - "unicode/rangetable" + "unicode/rangetable", ] + pruneopts = "UT" revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" version = "v0.3.0" [[projects]] branch = "master" + digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" name = "google.golang.org/api" - packages = [ - "googleapi/transport", - "internal", - "iterator", - "option", - "support/bundler", - "transport", - "transport/grpc", - "transport/http" - ] - revision = "4f7dd2b006a4ffd9fd683c1c734d2fe91ca0ea1c" - -[[projects]] - name = "google.golang.org/appengine" - packages = [ - ".", - "internal", - "internal/app_identity", - "internal/base", - "internal/datastore", - "internal/log", - "internal/modules", - "internal/remote_api", - "internal/socket", - "internal/urlfetch", - "socket", - "urlfetch" - ] - revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" - version = "v1.0.0" + packages = ["support/bundler"] + pruneopts = "UT" + revision = "e21acd801f91da814261b938941d193bb036441a" [[projects]] branch = "master" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" name = "google.golang.org/genproto" - packages = [ - "googleapis/api/annotations", - "googleapis/api/distribution", - "googleapis/api/label", - "googleapis/api/metric", - "googleapis/api/monitoredres", - "googleapis/devtools/cloudtrace/v2", - "googleapis/monitoring/v3", - "googleapis/rpc/code", - "googleapis/rpc/status", - "protobuf/field_mask" - ] - revision = "11a468237815f3a3ddf9f7c6e8b6b3b382a24d15" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" [[projects]] + digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" name = "google.golang.org/grpc" packages = [ ".", "balancer", "balancer/base", "balancer/roundrobin", - "channelz", "codes", "connectivity", "credentials", - "credentials/oauth", "encoding", "encoding/proto", - "grpclb/grpc_lb_v1/messages", "grpclog", "internal", + "internal/backoff", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/transport", "keepalive", "metadata", "naming", @@ -233,14 +200,32 @@ "stats", "status", "tap", - "transport" ] - revision = "41344da2231b913fa3d983840a57a6b1b7b631a1" - version = "v1.12.0" + pruneopts = "UT" + revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" + version = "v1.14.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "3fd3b357ae771c152cbc6b6d7b731c00c91c871cf2dbccb2f155ecc84ec80c4f" + input-imports = [ + "git.apache.org/thrift.git/lib/go/thrift", + "github.com/golang/protobuf/proto", + "github.com/openzipkin/zipkin-go", + "github.com/openzipkin/zipkin-go/model", + "github.com/openzipkin/zipkin-go/reporter", + "github.com/openzipkin/zipkin-go/reporter/http", + "github.com/prometheus/client_golang/prometheus", + "github.com/prometheus/client_golang/prometheus/promhttp", + "golang.org/x/net/context", + "golang.org/x/net/http2", + "google.golang.org/api/support/bundler", + "google.golang.org/grpc", + "google.golang.org/grpc/codes", + "google.golang.org/grpc/grpclog", + "google.golang.org/grpc/metadata", + "google.golang.org/grpc/stats", + "google.golang.org/grpc/status", + ] solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index c3f8292f7..a9f3cd68e 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -2,10 +2,6 @@ # to avoid locking to a particular minor version which can cause dep to not be # able to find a satisfying dependency graph. -[[constraint]] - name = "cloud.google.com/go" - version = ">=0.21.0" - [[constraint]] branch = "master" name = "git.apache.org/thrift.git" @@ -27,18 +23,10 @@ branch = "master" name = "golang.org/x/net" -[[constraint]] - branch = "master" - name = "golang.org/x/oauth2" - [[constraint]] branch = "master" name = "google.golang.org/api" -[[constraint]] - branch = "master" - name = "google.golang.org/genproto" - [[constraint]] name = "google.golang.org/grpc" version = "1.11.3" From bfa2d767dc63397a5d358f7b2999bf50030298fa Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 21 Aug 2018 17:15:33 -0700 Subject: [PATCH 049/212] Vendor Thrift, since upstream repo is unavailable --- .gitignore | 4 +- vendor/git.apache.org/thrift.git/LICENSE | 239 +++ vendor/git.apache.org/thrift.git/NOTICE | 5 + .../thrift.git/contrib/fb303/LICENSE | 16 + .../thrift.git/debian/copyright | 129 ++ .../thrift.git/lib/dart/LICENSE_HEADER | 16 + .../lib/go/thrift/application_exception.go | 164 ++ .../lib/go/thrift/binary_protocol.go | 515 +++++++ .../lib/go/thrift/buffered_transport.go | 92 ++ .../thrift.git/lib/go/thrift/client.go | 85 ++ .../lib/go/thrift/compact_protocol.go | 816 ++++++++++ .../thrift.git/lib/go/thrift/context.go | 24 + .../lib/go/thrift/debug_protocol.go | 270 ++++ .../thrift.git/lib/go/thrift/deserializer.go | 58 + .../thrift.git/lib/go/thrift/exception.go | 44 + .../thrift.git/lib/go/thrift/field.go | 79 + .../lib/go/thrift/framed_transport.go | 173 +++ .../thrift.git/lib/go/thrift/http_client.go | 242 +++ .../lib/go/thrift/http_transport.go | 63 + .../lib/go/thrift/iostream_transport.go | 214 +++ .../thrift.git/lib/go/thrift/json_protocol.go | 584 +++++++ .../thrift.git/lib/go/thrift/memory_buffer.go | 80 + .../thrift.git/lib/go/thrift/messagetype.go | 31 + .../lib/go/thrift/multiplexed_protocol.go | 170 +++ .../thrift.git/lib/go/thrift/numeric.go | 164 ++ .../thrift.git/lib/go/thrift/pointerize.go | 50 + .../lib/go/thrift/processor_factory.go | 70 + .../thrift.git/lib/go/thrift/protocol.go | 179 +++ .../lib/go/thrift/protocol_exception.go | 77 + .../lib/go/thrift/protocol_factory.go | 25 + .../lib/go/thrift/rich_transport.go | 68 + .../thrift.git/lib/go/thrift/serializer.go | 79 + .../thrift.git/lib/go/thrift/server.go | 35 + .../thrift.git/lib/go/thrift/server_socket.go | 134 ++ .../lib/go/thrift/server_transport.go | 34 + .../lib/go/thrift/simple_json_protocol.go | 1338 +++++++++++++++++ .../thrift.git/lib/go/thrift/simple_server.go | 227 +++ .../thrift.git/lib/go/thrift/socket.go | 166 ++ .../lib/go/thrift/ssl_server_socket.go | 112 ++ .../thrift.git/lib/go/thrift/ssl_socket.go | 176 +++ .../thrift.git/lib/go/thrift/transport.go | 70 + .../lib/go/thrift/transport_exception.go | 90 ++ .../lib/go/thrift/transport_factory.go | 39 + .../thrift.git/lib/go/thrift/type.go | 69 + .../lib/go/thrift/zlib_transport.go | 132 ++ .../git.apache.org/thrift.git/lib/hs/LICENSE | 202 +++ .../thrift.git/tutorial/erl/server.sh | 1 + .../thrift.git/tutorial/hs/LICENSE | 239 +++ 48 files changed, 7888 insertions(+), 1 deletion(-) create mode 100644 vendor/git.apache.org/thrift.git/LICENSE create mode 100644 vendor/git.apache.org/thrift.git/NOTICE create mode 100644 vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE create mode 100644 vendor/git.apache.org/thrift.git/debian/copyright create mode 100644 vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/client.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/context.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/field.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/type.go create mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go create mode 100644 vendor/git.apache.org/thrift.git/lib/hs/LICENSE create mode 120000 vendor/git.apache.org/thrift.git/tutorial/erl/server.sh create mode 100644 vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE diff --git a/.gitignore b/.gitignore index 806230244..74a6db472 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,6 @@ /exporter/aws/ # Exclude vendor, use dep ensure after checkout: -/vendor/ +/vendor/github.com/ +/vendor/golang.org/ +/vendor/google.golang.org/ diff --git a/vendor/git.apache.org/thrift.git/LICENSE b/vendor/git.apache.org/thrift.git/LICENSE new file mode 100644 index 000000000..3b6d7d74c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/LICENSE @@ -0,0 +1,239 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) +-------------------------------------------------- diff --git a/vendor/git.apache.org/thrift.git/NOTICE b/vendor/git.apache.org/thrift.git/NOTICE new file mode 100644 index 000000000..902dc8d31 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/NOTICE @@ -0,0 +1,5 @@ +Apache Thrift +Copyright 2006-2017 The Apache Software Foundation. + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE b/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE new file mode 100644 index 000000000..4eacb6431 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/git.apache.org/thrift.git/debian/copyright b/vendor/git.apache.org/thrift.git/debian/copyright new file mode 100644 index 000000000..850643c9a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/debian/copyright @@ -0,0 +1,129 @@ +This package was debianized by Thrift Developer's . + + +This package and the Debian packaging is licensed under the Apache License, +see `/usr/share/common-licenses/Apache-2.0'. + +The following information was copied from Apache Thrift LICENSE file. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + + +-------------------------------------------------- +The following files contain some portions of code contributed under +the Thrift Software License (see doc/old-thrift-license.txt), and relicensed +under the Apache 2.0 License: + + compiler/cpp/Makefile.am + compiler/cpp/src/generate/t_cocoa_generator.cc + compiler/cpp/src/generate/t_cpp_generator.cc + compiler/cpp/src/generate/t_csharp_generator.cc + compiler/cpp/src/generate/t_erl_generator.cc + compiler/cpp/src/generate/t_hs_generator.cc + compiler/cpp/src/generate/t_java_generator.cc + compiler/cpp/src/generate/t_ocaml_generator.cc + compiler/cpp/src/generate/t_perl_generator.cc + compiler/cpp/src/generate/t_php_generator.cc + compiler/cpp/src/generate/t_py_generator.cc + compiler/cpp/src/generate/t_rb_generator.cc + compiler/cpp/src/generate/t_st_generator.cc + compiler/cpp/src/generate/t_xsd_generator.cc + compiler/cpp/src/main.cc + compiler/cpp/src/parse/t_field.h + compiler/cpp/src/parse/t_program.h + compiler/cpp/src/platform.h + compiler/cpp/src/thriftl.ll + compiler/cpp/src/thrifty.yy + lib/csharp/src/Protocol/TBinaryProtocol.cs + lib/csharp/src/Protocol/TField.cs + lib/csharp/src/Protocol/TList.cs + lib/csharp/src/Protocol/TMap.cs + lib/csharp/src/Protocol/TMessage.cs + lib/csharp/src/Protocol/TMessageType.cs + lib/csharp/src/Protocol/TProtocol.cs + lib/csharp/src/Protocol/TProtocolException.cs + lib/csharp/src/Protocol/TProtocolFactory.cs + lib/csharp/src/Protocol/TProtocolUtil.cs + lib/csharp/src/Protocol/TSet.cs + lib/csharp/src/Protocol/TStruct.cs + lib/csharp/src/Protocol/TType.cs + lib/csharp/src/Server/TServer.cs + lib/csharp/src/Server/TSimpleServer.cs + lib/csharp/src/Server/TThreadPoolServer.cs + lib/csharp/src/TApplicationException.cs + lib/csharp/src/Thrift.csproj + lib/csharp/src/Thrift.sln + lib/csharp/src/TProcessor.cs + lib/csharp/src/Transport/TServerSocket.cs + lib/csharp/src/Transport/TServerTransport.cs + lib/csharp/src/Transport/TSocket.cs + lib/csharp/src/Transport/TStreamTransport.cs + lib/csharp/src/Transport/TTransport.cs + lib/csharp/src/Transport/TTransportException.cs + lib/csharp/src/Transport/TTransportFactory.cs + lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs + lib/csharp/ThriftMSBuildTask/ThriftBuild.cs + lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj + lib/rb/lib/thrift.rb + lib/st/README + lib/st/thrift.st + test/OptionalRequiredTest.cpp + test/OptionalRequiredTest.thrift + test/ThriftTest.thrift + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the compiler/cpp/src/md5.[ch] components: + +/* + Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. + + This software is provided 'as-is', without any express or implied + warranty. In no event will the authors be held liable for any damages + arising from the use of this software. + + Permission is granted to anyone to use this software for any purpose, + including commercial applications, and to alter it and redistribute it + freely, subject to the following restrictions: + + 1. The origin of this software must not be misrepresented; you must not + claim that you wrote the original software. If you use this software + in a product, an acknowledgment in the product documentation would be + appreciated but is not required. + 2. Altered source versions must be plainly marked as such, and must not be + misrepresented as being the original software. + 3. This notice may not be removed or altered from any source distribution. + + L. Peter Deutsch + ghost@aladdin.com + + */ + +--------------------------------------------------- +For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki, +lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components: + Copyright (C) 1999 - 2007 Markus Mottl + +Licensed under the terms of the GNU Lesser General Public License 2.1 +(see doc/lgpl-2.1.txt for the full terms of this license) diff --git a/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER b/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER new file mode 100644 index 000000000..4eacb6431 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER @@ -0,0 +1,16 @@ +Licensed to the Apache Software Foundation (ASF) under one +or more contributor license agreements. See the NOTICE file +distributed with this work for additional information +regarding copyright ownership. The ASF licenses this file +to you under the Apache License, Version 2.0 (the +"License"); you may not use this file except in compliance +with the License. You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, +software distributed under the License is distributed on an +"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +KIND, either express or implied. See the License for the +specific language governing permissions and limitations +under the License. diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go new file mode 100644 index 000000000..b9d7eedcd --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +const ( + UNKNOWN_APPLICATION_EXCEPTION = 0 + UNKNOWN_METHOD = 1 + INVALID_MESSAGE_TYPE_EXCEPTION = 2 + WRONG_METHOD_NAME = 3 + BAD_SEQUENCE_ID = 4 + MISSING_RESULT = 5 + INTERNAL_ERROR = 6 + PROTOCOL_ERROR = 7 +) + +var defaultApplicationExceptionMessage = map[int32]string{ + UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", + UNKNOWN_METHOD: "unknown method", + INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", + WRONG_METHOD_NAME: "wrong method name", + BAD_SEQUENCE_ID: "bad sequence ID", + MISSING_RESULT: "missing result", + INTERNAL_ERROR: "unknown internal error", + PROTOCOL_ERROR: "unknown protocol error", +} + +// Application level Thrift exception +type TApplicationException interface { + TException + TypeId() int32 + Read(iprot TProtocol) error + Write(oprot TProtocol) error +} + +type tApplicationException struct { + message string + type_ int32 +} + +func (e tApplicationException) Error() string { + if e.message != "" { + return e.message + } + return defaultApplicationExceptionMessage[e.type_] +} + +func NewTApplicationException(type_ int32, message string) TApplicationException { + return &tApplicationException{message, type_} +} + +func (p *tApplicationException) TypeId() int32 { + return p.type_ +} + +func (p *tApplicationException) Read(iprot TProtocol) error { + // TODO: this should really be generated by the compiler + _, err := iprot.ReadStructBegin() + if err != nil { + return err + } + + message := "" + type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) + + for { + _, ttype, id, err := iprot.ReadFieldBegin() + if err != nil { + return err + } + if ttype == STOP { + break + } + switch id { + case 1: + if ttype == STRING { + if message, err = iprot.ReadString(); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return err + } + } + case 2: + if ttype == I32 { + if type_, err = iprot.ReadI32(); err != nil { + return err + } + } else { + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return err + } + } + default: + if err = SkipDefaultDepth(iprot, ttype); err != nil { + return err + } + } + if err = iprot.ReadFieldEnd(); err != nil { + return err + } + } + if err := iprot.ReadStructEnd(); err != nil { + return err + } + + p.message = message + p.type_ = type_ + + return nil +} + +func (p *tApplicationException) Write(oprot TProtocol) (err error) { + err = oprot.WriteStructBegin("TApplicationException") + if len(p.Error()) > 0 { + err = oprot.WriteFieldBegin("message", STRING, 1) + if err != nil { + return + } + err = oprot.WriteString(p.Error()) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + } + err = oprot.WriteFieldBegin("type", I32, 2) + if err != nil { + return + } + err = oprot.WriteI32(p.type_) + if err != nil { + return + } + err = oprot.WriteFieldEnd() + if err != nil { + return + } + err = oprot.WriteFieldStop() + if err != nil { + return + } + err = oprot.WriteStructEnd() + return +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go new file mode 100644 index 000000000..de0f6a7a5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go @@ -0,0 +1,515 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "encoding/binary" + "errors" + "fmt" + "io" + "math" +) + +type TBinaryProtocol struct { + trans TRichTransport + origTransport TTransport + reader io.Reader + writer io.Writer + strictRead bool + strictWrite bool + buffer [64]byte +} + +type TBinaryProtocolFactory struct { + strictRead bool + strictWrite bool +} + +func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { + return NewTBinaryProtocol(t, false, true) +} + +func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { + p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} + if et, ok := t.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(t) + } + p.reader = p.trans + p.writer = p.trans + return p +} + +func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { + return NewTBinaryProtocolFactory(false, true) +} + +func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { + return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} +} + +func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { + return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) +} + +/** + * Writing Methods + */ + +func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + if p.strictWrite { + version := uint32(VERSION_1) | uint32(typeId) + e := p.WriteI32(int32(version)) + if e != nil { + return e + } + e = p.WriteString(name) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } else { + e := p.WriteString(name) + if e != nil { + return e + } + e = p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI32(seqId) + return e + } + return nil +} + +func (p *TBinaryProtocol) WriteMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteStructBegin(name string) error { + return nil +} + +func (p *TBinaryProtocol) WriteStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + e := p.WriteByte(int8(typeId)) + if e != nil { + return e + } + e = p.WriteI16(id) + return e +} + +func (p *TBinaryProtocol) WriteFieldEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteFieldStop() error { + e := p.WriteByte(STOP) + return e +} + +func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + e := p.WriteByte(int8(keyType)) + if e != nil { + return e + } + e = p.WriteByte(int8(valueType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteListEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { + e := p.WriteByte(int8(elemType)) + if e != nil { + return e + } + e = p.WriteI32(int32(size)) + return e +} + +func (p *TBinaryProtocol) WriteSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) WriteBool(value bool) error { + if value { + return p.WriteByte(1) + } + return p.WriteByte(0) +} + +func (p *TBinaryProtocol) WriteByte(value int8) error { + e := p.trans.WriteByte(byte(value)) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI16(value int16) error { + v := p.buffer[0:2] + binary.BigEndian.PutUint16(v, uint16(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI32(value int32) error { + v := p.buffer[0:4] + binary.BigEndian.PutUint32(v, uint32(value)) + _, e := p.writer.Write(v) + return NewTProtocolException(e) +} + +func (p *TBinaryProtocol) WriteI64(value int64) error { + v := p.buffer[0:8] + binary.BigEndian.PutUint64(v, uint64(value)) + _, err := p.writer.Write(v) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteDouble(value float64) error { + return p.WriteI64(int64(math.Float64bits(value))) +} + +func (p *TBinaryProtocol) WriteString(value string) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.trans.WriteString(value) + return NewTProtocolException(err) +} + +func (p *TBinaryProtocol) WriteBinary(value []byte) error { + e := p.WriteI32(int32(len(value))) + if e != nil { + return e + } + _, err := p.writer.Write(value) + return NewTProtocolException(err) +} + +/** + * Reading methods + */ + +func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + size, e := p.ReadI32() + if e != nil { + return "", typeId, 0, NewTProtocolException(e) + } + if size < 0 { + typeId = TMessageType(size & 0x0ff) + version := int64(int64(size) & VERSION_MASK) + if version != VERSION_1 { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) + } + name, e = p.ReadString() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + seqId, e = p.ReadI32() + if e != nil { + return name, typeId, seqId, NewTProtocolException(e) + } + return name, typeId, seqId, nil + } + if p.strictRead { + return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) + } + name, e2 := p.readStringBody(size) + if e2 != nil { + return name, typeId, seqId, e2 + } + b, e3 := p.ReadByte() + if e3 != nil { + return name, typeId, seqId, e3 + } + typeId = TMessageType(b) + seqId, e4 := p.ReadI32() + if e4 != nil { + return name, typeId, seqId, e4 + } + return name, typeId, seqId, nil +} + +func (p *TBinaryProtocol) ReadMessageEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { + return +} + +func (p *TBinaryProtocol) ReadStructEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { + t, err := p.ReadByte() + typeId = TType(t) + if err != nil { + return name, typeId, seqId, err + } + if t != STOP { + seqId, err = p.ReadI16() + } + return name, typeId, seqId, err +} + +func (p *TBinaryProtocol) ReadFieldEnd() error { + return nil +} + +var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) + +func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { + k, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + kType = TType(k) + v, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + vType = TType(v) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return kType, vType, size, nil +} + +func (p *TBinaryProtocol) ReadMapEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + return +} + +func (p *TBinaryProtocol) ReadListEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { + b, e := p.ReadByte() + if e != nil { + err = NewTProtocolException(e) + return + } + elemType = TType(b) + size32, e := p.ReadI32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + return elemType, size, nil +} + +func (p *TBinaryProtocol) ReadSetEnd() error { + return nil +} + +func (p *TBinaryProtocol) ReadBool() (bool, error) { + b, e := p.ReadByte() + v := true + if b != 1 { + v = false + } + return v, e +} + +func (p *TBinaryProtocol) ReadByte() (int8, error) { + v, err := p.trans.ReadByte() + return int8(v), err +} + +func (p *TBinaryProtocol) ReadI16() (value int16, err error) { + buf := p.buffer[0:2] + err = p.readAll(buf) + value = int16(binary.BigEndian.Uint16(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI32() (value int32, err error) { + buf := p.buffer[0:4] + err = p.readAll(buf) + value = int32(binary.BigEndian.Uint32(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadI64() (value int64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = int64(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { + buf := p.buffer[0:8] + err = p.readAll(buf) + value = math.Float64frombits(binary.BigEndian.Uint64(buf)) + return value, err +} + +func (p *TBinaryProtocol) ReadString() (value string, err error) { + size, e := p.ReadI32() + if e != nil { + return "", e + } + if size < 0 { + err = invalidDataLength + return + } + + return p.readStringBody(size) +} + +func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { + size, e := p.ReadI32() + if e != nil { + return nil, e + } + if size < 0 { + return nil, invalidDataLength + } + if uint64(size) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + isize := int(size) + buf := make([]byte, isize) + _, err := io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(err) +} + +func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TBinaryProtocol) Transport() TTransport { + return p.origTransport +} + +func (p *TBinaryProtocol) readAll(buf []byte) error { + _, err := io.ReadFull(p.reader, buf) + return NewTProtocolException(err) +} + +const readLimit = 32768 + +func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { + if size < 0 { + return "", nil + } + if uint64(size) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + var ( + buf bytes.Buffer + e error + b []byte + ) + + switch { + case int(size) <= len(p.buffer): + b = p.buffer[:size] // avoids allocation for small reads + case int(size) < readLimit: + b = make([]byte, size) + default: + b = make([]byte, readLimit) + } + + for size > 0 { + _, e = io.ReadFull(p.trans, b) + buf.Write(b) + if e != nil { + break + } + size -= readLimit + if size < readLimit && size > 0 { + b = b[:size] + } + } + return buf.String(), NewTProtocolException(e) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go new file mode 100644 index 000000000..96702061b --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" +) + +type TBufferedTransportFactory struct { + size int +} + +type TBufferedTransport struct { + bufio.ReadWriter + tp TTransport +} + +func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return NewTBufferedTransport(trans, p.size), nil +} + +func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { + return &TBufferedTransportFactory{size: bufferSize} +} + +func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { + return &TBufferedTransport{ + ReadWriter: bufio.ReadWriter{ + Reader: bufio.NewReaderSize(trans, bufferSize), + Writer: bufio.NewWriterSize(trans, bufferSize), + }, + tp: trans, + } +} + +func (p *TBufferedTransport) IsOpen() bool { + return p.tp.IsOpen() +} + +func (p *TBufferedTransport) Open() (err error) { + return p.tp.Open() +} + +func (p *TBufferedTransport) Close() (err error) { + return p.tp.Close() +} + +func (p *TBufferedTransport) Read(b []byte) (int, error) { + n, err := p.ReadWriter.Read(b) + if err != nil { + p.ReadWriter.Reader.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Write(b []byte) (int, error) { + n, err := p.ReadWriter.Write(b) + if err != nil { + p.ReadWriter.Writer.Reset(p.tp) + } + return n, err +} + +func (p *TBufferedTransport) Flush(ctx context.Context) error { + if err := p.ReadWriter.Flush(); err != nil { + p.ReadWriter.Writer.Reset(p.tp) + return err + } + return p.tp.Flush(ctx) +} + +func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { + return p.tp.RemainingBytes() +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go new file mode 100644 index 000000000..28791ccd0 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go @@ -0,0 +1,85 @@ +package thrift + +import ( + "context" + "fmt" +) + +type TClient interface { + Call(ctx context.Context, method string, args, result TStruct) error +} + +type TStandardClient struct { + seqId int32 + iprot, oprot TProtocol +} + +// TStandardClient implements TClient, and uses the standard message format for Thrift. +// It is not safe for concurrent use. +func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { + return &TStandardClient{ + iprot: inputProtocol, + oprot: outputProtocol, + } +} + +func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { + if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil { + return err + } + if err := args.Write(oprot); err != nil { + return err + } + if err := oprot.WriteMessageEnd(); err != nil { + return err + } + return oprot.Flush(ctx) +} + +func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error { + rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin() + if err != nil { + return err + } + + if method != rMethod { + return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) + } else if seqId != rSeqId { + return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) + } else if rTypeId == EXCEPTION { + var exception tApplicationException + if err := exception.Read(iprot); err != nil { + return err + } + + if err := iprot.ReadMessageEnd(); err != nil { + return err + } + + return &exception + } else if rTypeId != REPLY { + return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) + } + + if err := result.Read(iprot); err != nil { + return err + } + + return iprot.ReadMessageEnd() +} + +func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { + p.seqId++ + seqId := p.seqId + + if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { + return err + } + + // method is oneway + if result == nil { + return nil + } + + return p.Recv(p.iprot, seqId, method, result) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go new file mode 100644 index 000000000..66fbf5c33 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go @@ -0,0 +1,816 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/binary" + "fmt" + "io" + "math" +) + +const ( + COMPACT_PROTOCOL_ID = 0x082 + COMPACT_VERSION = 1 + COMPACT_VERSION_MASK = 0x1f + COMPACT_TYPE_MASK = 0x0E0 + COMPACT_TYPE_BITS = 0x07 + COMPACT_TYPE_SHIFT_AMOUNT = 5 +) + +type tCompactType byte + +const ( + COMPACT_BOOLEAN_TRUE = 0x01 + COMPACT_BOOLEAN_FALSE = 0x02 + COMPACT_BYTE = 0x03 + COMPACT_I16 = 0x04 + COMPACT_I32 = 0x05 + COMPACT_I64 = 0x06 + COMPACT_DOUBLE = 0x07 + COMPACT_BINARY = 0x08 + COMPACT_LIST = 0x09 + COMPACT_SET = 0x0A + COMPACT_MAP = 0x0B + COMPACT_STRUCT = 0x0C +) + +var ( + ttypeToCompactType map[TType]tCompactType +) + +func init() { + ttypeToCompactType = map[TType]tCompactType{ + STOP: STOP, + BOOL: COMPACT_BOOLEAN_TRUE, + BYTE: COMPACT_BYTE, + I16: COMPACT_I16, + I32: COMPACT_I32, + I64: COMPACT_I64, + DOUBLE: COMPACT_DOUBLE, + STRING: COMPACT_BINARY, + LIST: COMPACT_LIST, + SET: COMPACT_SET, + MAP: COMPACT_MAP, + STRUCT: COMPACT_STRUCT, + } +} + +type TCompactProtocolFactory struct{} + +func NewTCompactProtocolFactory() *TCompactProtocolFactory { + return &TCompactProtocolFactory{} +} + +func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTCompactProtocol(trans) +} + +type TCompactProtocol struct { + trans TRichTransport + origTransport TTransport + + // Used to keep track of the last field for the current and previous structs, + // so we can do the delta stuff. + lastField []int + lastFieldId int + + // If we encounter a boolean field begin, save the TField here so it can + // have the value incorporated. + booleanFieldName string + booleanFieldId int16 + booleanFieldPending bool + + // If we read a field header, and it's a boolean field, save the boolean + // value here so that readBool can use it. + boolValue bool + boolValueIsNotNull bool + buffer [64]byte +} + +// Create a TCompactProtocol given a TTransport +func NewTCompactProtocol(trans TTransport) *TCompactProtocol { + p := &TCompactProtocol{origTransport: trans, lastField: []int{}} + if et, ok := trans.(TRichTransport); ok { + p.trans = et + } else { + p.trans = NewTRichTransport(trans) + } + + return p + +} + +// +// Public Writing methods. +// + +// Write a message header to the wire. Compact Protocol messages contain the +// protocol version so we can migrate forwards in the future if need be. +func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + err := p.writeByteDirect(COMPACT_PROTOCOL_ID) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) + if err != nil { + return NewTProtocolException(err) + } + _, err = p.writeVarint32(seqid) + if err != nil { + return NewTProtocolException(err) + } + e := p.WriteString(name) + return e + +} + +func (p *TCompactProtocol) WriteMessageEnd() error { return nil } + +// Write a struct begin. This doesn't actually put anything on the wire. We +// use it as an opportunity to put special placeholder markers on the field +// stack so we can get the field id deltas correct. +func (p *TCompactProtocol) WriteStructBegin(name string) error { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return nil +} + +// Write a struct end. This doesn't actually put anything on the wire. We use +// this as an opportunity to pop the last field from the current struct off +// of the field stack. +func (p *TCompactProtocol) WriteStructEnd() error { + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if typeId == BOOL { + // we want to possibly include the value, so we'll wait. + p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true + return nil + } + _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) + return NewTProtocolException(err) +} + +// The workhorse of writeFieldBegin. It has the option of doing a +// 'type override' of the type header. This is used specifically in the +// boolean field case. +func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { + // short lastField = lastField_.pop(); + + // if there's a type override, use that. + var typeToWrite byte + if typeOverride == 0xFF { + typeToWrite = byte(p.getCompactType(typeId)) + } else { + typeToWrite = typeOverride + } + // check if we can use delta encoding for the field id + fieldId := int(id) + written := 0 + if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { + // write them together + err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) + if err != nil { + return 0, err + } + } else { + // write them separate + err := p.writeByteDirect(typeToWrite) + if err != nil { + return 0, err + } + err = p.WriteI16(id) + written = 1 + 2 + if err != nil { + return 0, err + } + } + + p.lastFieldId = fieldId + // p.lastField.Push(field.id); + return written, nil +} + +func (p *TCompactProtocol) WriteFieldEnd() error { return nil } + +func (p *TCompactProtocol) WriteFieldStop() error { + err := p.writeByteDirect(STOP) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if size == 0 { + err := p.writeByteDirect(0) + return NewTProtocolException(err) + } + _, err := p.writeVarint32(int32(size)) + if err != nil { + return NewTProtocolException(err) + } + err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteMapEnd() error { return nil } + +// Write a list header. +func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteListEnd() error { return nil } + +// Write a set header. +func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { + _, err := p.writeCollectionBegin(elemType, size) + return NewTProtocolException(err) +} + +func (p *TCompactProtocol) WriteSetEnd() error { return nil } + +func (p *TCompactProtocol) WriteBool(value bool) error { + v := byte(COMPACT_BOOLEAN_FALSE) + if value { + v = byte(COMPACT_BOOLEAN_TRUE) + } + if p.booleanFieldPending { + // we haven't written the field header yet + _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) + p.booleanFieldPending = false + return NewTProtocolException(err) + } + // we're not part of a field, so just write the value. + err := p.writeByteDirect(v) + return NewTProtocolException(err) +} + +// Write a byte. Nothing to see here! +func (p *TCompactProtocol) WriteByte(value int8) error { + err := p.writeByteDirect(byte(value)) + return NewTProtocolException(err) +} + +// Write an I16 as a zigzag varint. +func (p *TCompactProtocol) WriteI16(value int16) error { + _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) + return NewTProtocolException(err) +} + +// Write an i32 as a zigzag varint. +func (p *TCompactProtocol) WriteI32(value int32) error { + _, err := p.writeVarint32(p.int32ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write an i64 as a zigzag varint. +func (p *TCompactProtocol) WriteI64(value int64) error { + _, err := p.writeVarint64(p.int64ToZigzag(value)) + return NewTProtocolException(err) +} + +// Write a double to the wire as 8 bytes. +func (p *TCompactProtocol) WriteDouble(value float64) error { + buf := p.buffer[0:8] + binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) + _, err := p.trans.Write(buf) + return NewTProtocolException(err) +} + +// Write a string to the wire with a varint size preceding. +func (p *TCompactProtocol) WriteString(value string) error { + _, e := p.writeVarint32(int32(len(value))) + if e != nil { + return NewTProtocolException(e) + } + if len(value) > 0 { + } + _, e = p.trans.WriteString(value) + return e +} + +// Write a byte array, using a varint for the size. +func (p *TCompactProtocol) WriteBinary(bin []byte) error { + _, e := p.writeVarint32(int32(len(bin))) + if e != nil { + return NewTProtocolException(e) + } + if len(bin) > 0 { + _, e = p.trans.Write(bin) + return NewTProtocolException(e) + } + return nil +} + +// +// Reading methods. +// + +// Read a message header. +func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + + protocolId, err := p.readByteDirect() + if err != nil { + return + } + + if protocolId != COMPACT_PROTOCOL_ID { + e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) + return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) + } + + versionAndType, err := p.readByteDirect() + if err != nil { + return + } + + version := versionAndType & COMPACT_VERSION_MASK + typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) + if version != COMPACT_VERSION { + e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) + err = NewTProtocolExceptionWithType(BAD_VERSION, e) + return + } + seqId, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + name, err = p.ReadString() + return +} + +func (p *TCompactProtocol) ReadMessageEnd() error { return nil } + +// Read a struct begin. There's nothing on the wire for this, but it is our +// opportunity to push a new struct begin marker onto the field stack. +func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { + p.lastField = append(p.lastField, p.lastFieldId) + p.lastFieldId = 0 + return +} + +// Doesn't actually consume any wire data, just removes the last field for +// this struct from the field stack. +func (p *TCompactProtocol) ReadStructEnd() error { + // consume the last field we read off the wire. + p.lastFieldId = p.lastField[len(p.lastField)-1] + p.lastField = p.lastField[:len(p.lastField)-1] + return nil +} + +// Read a field header off the wire. +func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { + t, err := p.readByteDirect() + if err != nil { + return + } + + // if it's a stop, then we can return immediately, as the struct is over. + if (t & 0x0f) == STOP { + return "", STOP, 0, nil + } + + // mask off the 4 MSB of the type header. it could contain a field id delta. + modifier := int16((t & 0xf0) >> 4) + if modifier == 0 { + // not a delta. look ahead for the zigzag varint field id. + id, err = p.ReadI16() + if err != nil { + return + } + } else { + // has a delta. add the delta to the last read field id. + id = int16(p.lastFieldId) + modifier + } + typeId, e := p.getTType(tCompactType(t & 0x0f)) + if e != nil { + err = NewTProtocolException(e) + return + } + + // if this happens to be a boolean field, the value is encoded in the type + if p.isBoolType(t) { + // save the boolean value in a special instance variable. + p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) + p.boolValueIsNotNull = true + } + + // push the new field onto the field stack so we can keep the deltas going. + p.lastFieldId = int(id) + return +} + +func (p *TCompactProtocol) ReadFieldEnd() error { return nil } + +// Read a map header off the wire. If the size is zero, skip reading the key +// and value type. This means that 0-length maps will yield TMaps without the +// "correct" types. +func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { + size32, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size32 < 0 { + err = invalidDataLength + return + } + size = int(size32) + + keyAndValueType := byte(STOP) + if size != 0 { + keyAndValueType, err = p.readByteDirect() + if err != nil { + return + } + } + keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) + valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) + return +} + +func (p *TCompactProtocol) ReadMapEnd() error { return nil } + +// Read a list header off the wire. If the list size is 0-14, the size will +// be packed into the element type header. If it's a longer list, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { + size_and_type, err := p.readByteDirect() + if err != nil { + return + } + size = int((size_and_type >> 4) & 0x0f) + if size == 15 { + size2, e := p.readVarint32() + if e != nil { + err = NewTProtocolException(e) + return + } + if size2 < 0 { + err = invalidDataLength + return + } + size = int(size2) + } + elemType, e := p.getTType(tCompactType(size_and_type)) + if e != nil { + err = NewTProtocolException(e) + return + } + return +} + +func (p *TCompactProtocol) ReadListEnd() error { return nil } + +// Read a set header off the wire. If the set size is 0-14, the size will +// be packed into the element type header. If it's a longer set, the 4 MSB +// of the element type header will be 0xF, and a varint will follow with the +// true size. +func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { + return p.ReadListBegin() +} + +func (p *TCompactProtocol) ReadSetEnd() error { return nil } + +// Read a boolean off the wire. If this is a boolean field, the value should +// already have been read during readFieldBegin, so we'll just consume the +// pre-stored value. Otherwise, read a byte. +func (p *TCompactProtocol) ReadBool() (value bool, err error) { + if p.boolValueIsNotNull { + p.boolValueIsNotNull = false + return p.boolValue, nil + } + v, err := p.readByteDirect() + return v == COMPACT_BOOLEAN_TRUE, err +} + +// Read a single byte off the wire. Nothing interesting here. +func (p *TCompactProtocol) ReadByte() (int8, error) { + v, err := p.readByteDirect() + if err != nil { + return 0, NewTProtocolException(err) + } + return int8(v), err +} + +// Read an i16 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI16() (value int16, err error) { + v, err := p.ReadI32() + return int16(v), err +} + +// Read an i32 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI32() (value int32, err error) { + v, e := p.readVarint32() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt32(v) + return value, nil +} + +// Read an i64 from the wire as a zigzag varint. +func (p *TCompactProtocol) ReadI64() (value int64, err error) { + v, e := p.readVarint64() + if e != nil { + return 0, NewTProtocolException(e) + } + value = p.zigzagToInt64(v) + return value, nil +} + +// No magic here - just read a double off the wire. +func (p *TCompactProtocol) ReadDouble() (value float64, err error) { + longBits := p.buffer[0:8] + _, e := io.ReadFull(p.trans, longBits) + if e != nil { + return 0.0, NewTProtocolException(e) + } + return math.Float64frombits(p.bytesToUint64(longBits)), nil +} + +// Reads a []byte (via readBinary), and then UTF-8 decodes it. +func (p *TCompactProtocol) ReadString() (value string, err error) { + length, e := p.readVarint32() + if e != nil { + return "", NewTProtocolException(e) + } + if length < 0 { + return "", invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return "", invalidDataLength + } + + if length == 0 { + return "", nil + } + var buf []byte + if length <= int32(len(p.buffer)) { + buf = p.buffer[0:length] + } else { + buf = make([]byte, length) + } + _, e = io.ReadFull(p.trans, buf) + return string(buf), NewTProtocolException(e) +} + +// Read a []byte from the wire. +func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { + length, e := p.readVarint32() + if e != nil { + return nil, NewTProtocolException(e) + } + if length == 0 { + return []byte{}, nil + } + if length < 0 { + return nil, invalidDataLength + } + if uint64(length) > p.trans.RemainingBytes() { + return nil, invalidDataLength + } + + buf := make([]byte, length) + _, e = io.ReadFull(p.trans, buf) + return buf, NewTProtocolException(e) +} + +func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.trans.Flush(ctx)) +} + +func (p *TCompactProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TCompactProtocol) Transport() TTransport { + return p.origTransport +} + +// +// Internal writing methods +// + +// Abstract method for writing the start of lists and sets. List and sets on +// the wire differ only by the type indicator. +func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { + if size <= 14 { + return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) + } + err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) + if err != nil { + return 0, err + } + m, err := p.writeVarint32(int32(size)) + return 1 + m, err +} + +// Write an i32 as a varint. Results in 1-5 bytes on the wire. +// TODO(pomack): make a permanent buffer like writeVarint64? +func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { + i32buf := p.buffer[0:5] + idx := 0 + for { + if (n & ^0x7F) == 0 { + i32buf[idx] = byte(n) + idx++ + // p.writeByteDirect(byte(n)); + break + // return; + } else { + i32buf[idx] = byte((n & 0x7F) | 0x80) + idx++ + // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); + u := uint32(n) + n = int32(u >> 7) + } + } + return p.trans.Write(i32buf[0:idx]) +} + +// Write an i64 as a varint. Results in 1-10 bytes on the wire. +func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { + varint64out := p.buffer[0:10] + idx := 0 + for { + if (n & ^0x7F) == 0 { + varint64out[idx] = byte(n) + idx++ + break + } else { + varint64out[idx] = byte((n & 0x7F) | 0x80) + idx++ + u := uint64(n) + n = int64(u >> 7) + } + } + return p.trans.Write(varint64out[0:idx]) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { + return (l << 1) ^ (l >> 63) +} + +// Convert l into a zigzag long. This allows negative numbers to be +// represented compactly as a varint. +func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { + return (n << 1) ^ (n >> 31) +} + +func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { + binary.LittleEndian.PutUint64(buf, n) +} + +func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { + binary.LittleEndian.PutUint64(buf, uint64(n)) +} + +// Writes a byte without any possibility of all that field header nonsense. +// Used internally by other writing methods that know they need to write a byte. +func (p *TCompactProtocol) writeByteDirect(b byte) error { + return p.trans.WriteByte(b) +} + +// Writes a byte without any possibility of all that field header nonsense. +func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { + return 1, p.writeByteDirect(byte(n)) +} + +// +// Internal reading methods +// + +// Read an i32 from the wire as a varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 5 bytes. +func (p *TCompactProtocol) readVarint32() (int32, error) { + // if the wire contains the right stuff, this will just truncate the i64 we + // read and get us the right sign. + v, err := p.readVarint64() + return int32(v), err +} + +// Read an i64 from the wire as a proper varint. The MSB of each byte is set +// if there is another byte to follow. This can read up to 10 bytes. +func (p *TCompactProtocol) readVarint64() (int64, error) { + shift := uint(0) + result := int64(0) + for { + b, err := p.readByteDirect() + if err != nil { + return 0, err + } + result |= int64(b&0x7f) << shift + if (b & 0x80) != 0x80 { + break + } + shift += 7 + } + return result, nil +} + +// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. +func (p *TCompactProtocol) readByteDirect() (byte, error) { + return p.trans.ReadByte() +} + +// +// encoding helpers +// + +// Convert from zigzag int to int. +func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { + u := uint32(n) + return int32(u>>1) ^ -(n & 1) +} + +// Convert from zigzag long to long. +func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { + u := uint64(n) + return int64(u>>1) ^ -(n & 1) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { + return int64(binary.LittleEndian.Uint64(b)) +} + +// Note that it's important that the mask bytes are long literals, +// otherwise they'll default to ints, and when you shift an int left 56 bits, +// you just get a messed up int. +func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { + return binary.LittleEndian.Uint64(b) +} + +// +// type testing and converting +// + +func (p *TCompactProtocol) isBoolType(b byte) bool { + return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE +} + +// Given a tCompactType constant, convert it to its corresponding +// TType value. +func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { + switch byte(t) & 0x0f { + case STOP: + return STOP, nil + case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: + return BOOL, nil + case COMPACT_BYTE: + return BYTE, nil + case COMPACT_I16: + return I16, nil + case COMPACT_I32: + return I32, nil + case COMPACT_I64: + return I64, nil + case COMPACT_DOUBLE: + return DOUBLE, nil + case COMPACT_BINARY: + return STRING, nil + case COMPACT_LIST: + return LIST, nil + case COMPACT_SET: + return SET, nil + case COMPACT_MAP: + return MAP, nil + case COMPACT_STRUCT: + return STRUCT, nil + } + return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f)) +} + +// Given a TType value, find the appropriate TCompactProtocol.Types constant. +func (p *TCompactProtocol) getCompactType(t TType) tCompactType { + return ttypeToCompactType[t] +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go new file mode 100644 index 000000000..d15c1bcf8 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go @@ -0,0 +1,24 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go new file mode 100644 index 000000000..57943e0f3 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go @@ -0,0 +1,270 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "log" +) + +type TDebugProtocol struct { + Delegate TProtocol + LogPrefix string +} + +type TDebugProtocolFactory struct { + Underlying TProtocolFactory + LogPrefix string +} + +func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { + return &TDebugProtocolFactory{ + Underlying: underlying, + LogPrefix: logPrefix, + } +} + +func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return &TDebugProtocol{ + Delegate: t.Underlying.GetProtocol(trans), + LogPrefix: t.LogPrefix, + } +} + +func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) + log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) + return err +} +func (tdp *TDebugProtocol) WriteMessageEnd() error { + err := tdp.Delegate.WriteMessageEnd() + log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteStructBegin(name string) error { + err := tdp.Delegate.WriteStructBegin(name) + log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) + return err +} +func (tdp *TDebugProtocol) WriteStructEnd() error { + err := tdp.Delegate.WriteStructEnd() + log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + err := tdp.Delegate.WriteFieldBegin(name, typeId, id) + log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldEnd() error { + err := tdp.Delegate.WriteFieldEnd() + log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteFieldStop() error { + err := tdp.Delegate.WriteFieldStop() + log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) + log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteMapEnd() error { + err := tdp.Delegate.WriteMapEnd() + log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { + err := tdp.Delegate.WriteListBegin(elemType, size) + log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteListEnd() error { + err := tdp.Delegate.WriteListEnd() + log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { + err := tdp.Delegate.WriteSetBegin(elemType, size) + log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) + return err +} +func (tdp *TDebugProtocol) WriteSetEnd() error { + err := tdp.Delegate.WriteSetEnd() + log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) + return err +} +func (tdp *TDebugProtocol) WriteBool(value bool) error { + err := tdp.Delegate.WriteBool(value) + log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteByte(value int8) error { + err := tdp.Delegate.WriteByte(value) + log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI16(value int16) error { + err := tdp.Delegate.WriteI16(value) + log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI32(value int32) error { + err := tdp.Delegate.WriteI32(value) + log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteI64(value int64) error { + err := tdp.Delegate.WriteI64(value) + log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteDouble(value float64) error { + err := tdp.Delegate.WriteDouble(value) + log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteString(value string) error { + err := tdp.Delegate.WriteString(value) + log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} +func (tdp *TDebugProtocol) WriteBinary(value []byte) error { + err := tdp.Delegate.WriteBinary(value) + log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) + return err +} + +func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { + name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() + log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) + return +} +func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { + err = tdp.Delegate.ReadMessageEnd() + log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { + name, err = tdp.Delegate.ReadStructBegin() + log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) + return +} +func (tdp *TDebugProtocol) ReadStructEnd() (err error) { + err = tdp.Delegate.ReadStructEnd() + log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { + name, typeId, id, err = tdp.Delegate.ReadFieldBegin() + log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) + return +} +func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { + err = tdp.Delegate.ReadFieldEnd() + log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { + keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() + log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) + return +} +func (tdp *TDebugProtocol) ReadMapEnd() (err error) { + err = tdp.Delegate.ReadMapEnd() + log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadListBegin() + log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + return +} +func (tdp *TDebugProtocol) ReadListEnd() (err error) { + err = tdp.Delegate.ReadListEnd() + log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { + elemType, size, err = tdp.Delegate.ReadSetBegin() + log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) + return +} +func (tdp *TDebugProtocol) ReadSetEnd() (err error) { + err = tdp.Delegate.ReadSetEnd() + log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) + return +} +func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { + value, err = tdp.Delegate.ReadBool() + log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { + value, err = tdp.Delegate.ReadByte() + log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { + value, err = tdp.Delegate.ReadI16() + log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { + value, err = tdp.Delegate.ReadI32() + log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { + value, err = tdp.Delegate.ReadI64() + log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { + value, err = tdp.Delegate.ReadDouble() + log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadString() (value string, err error) { + value, err = tdp.Delegate.ReadString() + log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { + value, err = tdp.Delegate.ReadBinary() + log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) + return +} +func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { + err = tdp.Delegate.Skip(fieldType) + log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) + return +} +func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { + err = tdp.Delegate.Flush(ctx) + log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) + return +} + +func (tdp *TDebugProtocol) Transport() TTransport { + return tdp.Delegate.Transport() +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go new file mode 100644 index 000000000..91a0983a4 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go @@ -0,0 +1,58 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TDeserializer struct { + Transport TTransport + Protocol TProtocol +} + +func NewTDeserializer() *TDeserializer { + var transport TTransport + transport = NewTMemoryBufferLen(1024) + + protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) + + return &TDeserializer{ + transport, + protocol} +} + +func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { + err = nil + if _, err = t.Transport.Write([]byte(s)); err != nil { + return + } + if err = msg.Read(t.Protocol); err != nil { + return + } + return +} + +func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { + err = nil + if _, err = t.Transport.Write(b); err != nil { + return + } + if err = msg.Read(t.Protocol); err != nil { + return + } + return +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go new file mode 100644 index 000000000..ea8d6f661 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" +) + +// Generic Thrift exception +type TException interface { + error +} + +// Prepends additional information to an error without losing the Thrift exception interface +func PrependError(prepend string, err error) error { + if t, ok := err.(TTransportException); ok { + return NewTTransportException(t.TypeId(), prepend+t.Error()) + } + if t, ok := err.(TProtocolException); ok { + return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) + } + if t, ok := err.(TApplicationException); ok { + return NewTApplicationException(t.TypeId(), prepend+t.Error()) + } + + return errors.New(prepend + err.Error()) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go new file mode 100644 index 000000000..9d6652550 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Helper class that encapsulates field metadata. +type field struct { + name string + typeId TType + id int +} + +func newField(n string, t TType, i int) *field { + return &field{name: n, typeId: t, id: i} +} + +func (p *field) Name() string { + if p == nil { + return "" + } + return p.name +} + +func (p *field) TypeId() TType { + if p == nil { + return TType(VOID) + } + return p.typeId +} + +func (p *field) Id() int { + if p == nil { + return -1 + } + return p.id +} + +func (p *field) String() string { + if p == nil { + return "" + } + return "" +} + +var ANONYMOUS_FIELD *field + +type fieldSlice []field + +func (p fieldSlice) Len() int { + return len(p) +} + +func (p fieldSlice) Less(i, j int) bool { + return p[i].Id() < p[j].Id() +} + +func (p fieldSlice) Swap(i, j int) { + p[i], p[j] = p[j], p[i] +} + +func init() { + ANONYMOUS_FIELD = newField("", STOP, 0) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go new file mode 100644 index 000000000..81fa65aaa --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go @@ -0,0 +1,173 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/binary" + "fmt" + "io" +) + +const DEFAULT_MAX_LENGTH = 16384000 + +type TFramedTransport struct { + transport TTransport + buf bytes.Buffer + reader *bufio.Reader + frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header + buffer [4]byte + maxLength uint32 +} + +type tFramedTransportFactory struct { + factory TTransportFactory + maxLength uint32 +} + +func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { + return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} +} + +func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { + return &tFramedTransportFactory{factory: factory, maxLength: maxLength} +} + +func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { + tt, err := p.factory.GetTransport(base) + if err != nil { + return nil, err + } + return NewTFramedTransportMaxLength(tt, p.maxLength), nil +} + +func NewTFramedTransport(transport TTransport) *TFramedTransport { + return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} +} + +func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { + return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} +} + +func (p *TFramedTransport) Open() error { + return p.transport.Open() +} + +func (p *TFramedTransport) IsOpen() bool { + return p.transport.IsOpen() +} + +func (p *TFramedTransport) Close() error { + return p.transport.Close() +} + +func (p *TFramedTransport) Read(buf []byte) (l int, err error) { + if p.frameSize == 0 { + p.frameSize, err = p.readFrameHeader() + if err != nil { + return + } + } + if p.frameSize < uint32(len(buf)) { + frameSize := p.frameSize + tmp := make([]byte, p.frameSize) + l, err = p.Read(tmp) + copy(buf, tmp) + if err == nil { + err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) + return + } + } + got, err := p.reader.Read(buf) + p.frameSize = p.frameSize - uint32(got) + //sanity check + if p.frameSize < 0 { + return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") + } + return got, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) ReadByte() (c byte, err error) { + if p.frameSize == 0 { + p.frameSize, err = p.readFrameHeader() + if err != nil { + return + } + } + if p.frameSize < 1 { + return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) + } + c, err = p.reader.ReadByte() + if err == nil { + p.frameSize-- + } + return +} + +func (p *TFramedTransport) Write(buf []byte) (int, error) { + n, err := p.buf.Write(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) WriteByte(c byte) error { + return p.buf.WriteByte(c) +} + +func (p *TFramedTransport) WriteString(s string) (n int, err error) { + return p.buf.WriteString(s) +} + +func (p *TFramedTransport) Flush(ctx context.Context) error { + size := p.buf.Len() + buf := p.buffer[:4] + binary.BigEndian.PutUint32(buf, uint32(size)) + _, err := p.transport.Write(buf) + if err != nil { + p.buf.Truncate(0) + return NewTTransportExceptionFromError(err) + } + if size > 0 { + if n, err := p.buf.WriteTo(p.transport); err != nil { + print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") + p.buf.Truncate(0) + return NewTTransportExceptionFromError(err) + } + } + err = p.transport.Flush(ctx) + return NewTTransportExceptionFromError(err) +} + +func (p *TFramedTransport) readFrameHeader() (uint32, error) { + buf := p.buffer[:4] + if _, err := io.ReadFull(p.reader, buf); err != nil { + return 0, err + } + size := binary.BigEndian.Uint32(buf) + if size < 0 || size > p.maxLength { + return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) + } + return size, nil +} + +func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { + return uint64(p.frameSize) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go new file mode 100644 index 000000000..5c82bf538 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go @@ -0,0 +1,242 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" + "io" + "io/ioutil" + "net/http" + "net/url" + "strconv" +) + +// Default to using the shared http client. Library users are +// free to change this global client or specify one through +// THttpClientOptions. +var DefaultHttpClient *http.Client = http.DefaultClient + +type THttpClient struct { + client *http.Client + response *http.Response + url *url.URL + requestBuffer *bytes.Buffer + header http.Header + nsecConnectTimeout int64 + nsecReadTimeout int64 +} + +type THttpClientTransportFactory struct { + options THttpClientOptions + url string +} + +func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*THttpClient) + if ok && t.url != nil { + return NewTHttpClientWithOptions(t.url.String(), p.options) + } + } + return NewTHttpClientWithOptions(p.url, p.options) +} + +type THttpClientOptions struct { + // If nil, DefaultHttpClient is used + Client *http.Client +} + +func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return &THttpClientTransportFactory{url: url, options: options} +} + +func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + parsedURL, err := url.Parse(urlstr) + if err != nil { + return nil, err + } + buf := make([]byte, 0, 1024) + client := options.Client + if client == nil { + client = DefaultHttpClient + } + httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} + return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil +} + +func NewTHttpClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} + +// Set the HTTP Header for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") +func (p *THttpClient) SetHeader(key string, value string) { + p.header.Add(key, value) +} + +// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// hdrValue := httpTrans.GetHeader("User-Agent") +func (p *THttpClient) GetHeader(key string) string { + return p.header.Get(key) +} + +// Deletes the HTTP Header given a Header Key for this specific Thrift Transport +// It is important that you first assert the TTransport as a THttpClient type +// like so: +// +// httpTrans := trans.(THttpClient) +// httpTrans.DelHeader("User-Agent") +func (p *THttpClient) DelHeader(key string) { + p.header.Del(key) +} + +func (p *THttpClient) Open() error { + // do nothing + return nil +} + +func (p *THttpClient) IsOpen() bool { + return p.response != nil || p.requestBuffer != nil +} + +func (p *THttpClient) closeResponse() error { + var err error + if p.response != nil && p.response.Body != nil { + // The docs specify that if keepalive is enabled and the response body is not + // read to completion the connection will never be returned to the pool and + // reused. Errors are being ignored here because if the connection is invalid + // and this fails for some reason, the Close() method will do any remaining + // cleanup. + io.Copy(ioutil.Discard, p.response.Body) + + err = p.response.Body.Close() + } + + p.response = nil + return err +} + +func (p *THttpClient) Close() error { + if p.requestBuffer != nil { + p.requestBuffer.Reset() + p.requestBuffer = nil + } + return p.closeResponse() +} + +func (p *THttpClient) Read(buf []byte) (int, error) { + if p.response == nil { + return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") + } + n, err := p.response.Body.Read(buf) + if n > 0 && (err == nil || err == io.EOF) { + return n, nil + } + return n, NewTTransportExceptionFromError(err) +} + +func (p *THttpClient) ReadByte() (c byte, err error) { + return readByte(p.response.Body) +} + +func (p *THttpClient) Write(buf []byte) (int, error) { + n, err := p.requestBuffer.Write(buf) + return n, err +} + +func (p *THttpClient) WriteByte(c byte) error { + return p.requestBuffer.WriteByte(c) +} + +func (p *THttpClient) WriteString(s string) (n int, err error) { + return p.requestBuffer.WriteString(s) +} + +func (p *THttpClient) Flush(ctx context.Context) error { + // Close any previous response body to avoid leaking connections. + p.closeResponse() + + req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) + if err != nil { + return NewTTransportExceptionFromError(err) + } + req.Header = p.header + if ctx != nil { + req = req.WithContext(ctx) + } + response, err := p.client.Do(req) + if err != nil { + return NewTTransportExceptionFromError(err) + } + if response.StatusCode != http.StatusOK { + // Close the response to avoid leaking file descriptors. closeResponse does + // more than just call Close(), so temporarily assign it and reuse the logic. + p.response = response + p.closeResponse() + + // TODO(pomack) log bad response + return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) + } + p.response = response + return nil +} + +func (p *THttpClient) RemainingBytes() (num_bytes uint64) { + len := p.response.ContentLength + if len >= 0 { + return uint64(len) + } + + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} + +// Deprecated: Use NewTHttpClientTransportFactory instead. +func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) +} + +// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. +func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { + return NewTHttpClientTransportFactoryWithOptions(url, options) +} + +// Deprecated: Use NewTHttpClientWithOptions instead. +func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, options) +} + +// Deprecated: Use NewTHttpClient instead. +func NewTHttpPostClient(urlstr string) (TTransport, error) { + return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go new file mode 100644 index 000000000..66f0f388a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go @@ -0,0 +1,63 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "compress/gzip" + "io" + "net/http" + "strings" +) + +// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function +func NewThriftHandlerFunc(processor TProcessor, + inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { + + return gz(func(w http.ResponseWriter, r *http.Request) { + w.Header().Add("Content-Type", "application/x-thrift") + + transport := NewStreamTransport(r.Body, w) + processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) + }) +} + +// gz transparently compresses the HTTP response if the client supports it. +func gz(handler http.HandlerFunc) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { + handler(w, r) + return + } + w.Header().Set("Content-Encoding", "gzip") + gz := gzip.NewWriter(w) + defer gz.Close() + gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} + handler(gzw, r) + } +} + +type gzipResponseWriter struct { + io.Writer + http.ResponseWriter +} + +func (w gzipResponseWriter) Write(b []byte) (int, error) { + return w.Writer.Write(b) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go new file mode 100644 index 000000000..fea93bcef --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go @@ -0,0 +1,214 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "context" + "io" +) + +// StreamTransport is a Transport made of an io.Reader and/or an io.Writer +type StreamTransport struct { + io.Reader + io.Writer + isReadWriter bool + closed bool +} + +type StreamTransportFactory struct { + Reader io.Reader + Writer io.Writer + isReadWriter bool +} + +func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*StreamTransport) + if ok { + if t.isReadWriter { + return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil + } + if t.Reader != nil && t.Writer != nil { + return NewStreamTransport(t.Reader, t.Writer), nil + } + if t.Reader != nil && t.Writer == nil { + return NewStreamTransportR(t.Reader), nil + } + if t.Reader == nil && t.Writer != nil { + return NewStreamTransportW(t.Writer), nil + } + return &StreamTransport{}, nil + } + } + if p.isReadWriter { + return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil + } + if p.Reader != nil && p.Writer != nil { + return NewStreamTransport(p.Reader, p.Writer), nil + } + if p.Reader != nil && p.Writer == nil { + return NewStreamTransportR(p.Reader), nil + } + if p.Reader == nil && p.Writer != nil { + return NewStreamTransportW(p.Writer), nil + } + return &StreamTransport{}, nil +} + +func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { + return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} +} + +func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportR(r io.Reader) *StreamTransport { + return &StreamTransport{Reader: bufio.NewReader(r)} +} + +func NewStreamTransportW(w io.Writer) *StreamTransport { + return &StreamTransport{Writer: bufio.NewWriter(w)} +} + +func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { + bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) + return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} +} + +func (p *StreamTransport) IsOpen() bool { + return !p.closed +} + +// implicitly opened on creation, can't be reopened once closed +func (p *StreamTransport) Open() error { + if !p.closed { + return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") + } else { + return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") + } +} + +// Closes both the input and output streams. +func (p *StreamTransport) Close() error { + if p.closed { + return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") + } + p.closed = true + closedReader := false + if p.Reader != nil { + c, ok := p.Reader.(io.Closer) + if ok { + e := c.Close() + closedReader = true + if e != nil { + return e + } + } + p.Reader = nil + } + if p.Writer != nil && (!closedReader || !p.isReadWriter) { + c, ok := p.Writer.(io.Closer) + if ok { + e := c.Close() + if e != nil { + return e + } + } + p.Writer = nil + } + return nil +} + +// Flushes the underlying output stream if not null. +func (p *StreamTransport) Flush(ctx context.Context) error { + if p.Writer == nil { + return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") + } + f, ok := p.Writer.(Flusher) + if ok { + err := f.Flush() + if err != nil { + return NewTTransportExceptionFromError(err) + } + } + return nil +} + +func (p *StreamTransport) Read(c []byte) (n int, err error) { + n, err = p.Reader.Read(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) ReadByte() (c byte, err error) { + f, ok := p.Reader.(io.ByteReader) + if ok { + c, err = f.ReadByte() + } else { + c, err = readByte(p.Reader) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) Write(c []byte) (n int, err error) { + n, err = p.Writer.Write(c) + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteByte(c byte) (err error) { + f, ok := p.Writer.(io.ByteWriter) + if ok { + err = f.WriteByte(c) + } else { + err = writeByte(p.Writer, c) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) WriteString(s string) (n int, err error) { + f, ok := p.Writer.(stringWriter) + if ok { + n, err = f.WriteString(s) + } else { + n, err = p.Writer.Write([]byte(s)) + } + if err != nil { + err = NewTTransportExceptionFromError(err) + } + return +} + +func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go new file mode 100644 index 000000000..7be685d43 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go @@ -0,0 +1,584 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "encoding/base64" + "fmt" +) + +const ( + THRIFT_JSON_PROTOCOL_VERSION = 1 +) + +// for references to _ParseContext see tsimplejson_protocol.go + +// JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TJSONProtocol struct { + *TSimpleJSONProtocol +} + +// Constructor +func NewTJSONProtocol(t TTransport) *TJSONProtocol { + v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} + v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) + v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + return v +} + +// Factory +type TJSONProtocolFactory struct{} + +func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTJSONProtocol(trans) +} + +func NewTJSONProtocolFactory() *TJSONProtocolFactory { + return &TJSONProtocolFactory{} +} + +func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { + return e + } + if e := p.WriteString(name); e != nil { + return e + } + if e := p.WriteByte(int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(seqId); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteMessageEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteStructBegin(name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteStructEnd() error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if e := p.WriteI16(id); e != nil { + return e + } + if e := p.OutputObjectBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(typeId) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) WriteFieldEnd() error { + return p.OutputObjectEnd() +} + +func (p *TJSONProtocol) WriteFieldStop() error { return nil } + +func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(keyType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + s, e1 = p.TypeIdToString(valueType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return p.OutputObjectBegin() +} + +func (p *TJSONProtocol) WriteMapEnd() error { + if e := p.OutputObjectEnd(); e != nil { + return e + } + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteListEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TJSONProtocol) WriteSetEnd() error { + return p.OutputListEnd() +} + +func (p *TJSONProtocol) WriteBool(b bool) error { + if b { + return p.WriteI32(1) + } + return p.WriteI32(0) +} + +func (p *TJSONProtocol) WriteByte(b int8) error { + return p.WriteI32(int32(b)) +} + +func (p *TJSONProtocol) WriteI16(v int16) error { + return p.WriteI32(int32(v)) +} + +func (p *TJSONProtocol) WriteI32(v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteI64(v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TJSONProtocol) WriteDouble(v float64) error { + return p.OutputF64(v) +} + +func (p *TJSONProtocol) WriteString(v string) error { + return p.OutputString(v) +} + +func (p *TJSONProtocol) WriteBinary(v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + version, err := p.ReadI32() + if err != nil { + return name, typeId, seqId, err + } + if version != THRIFT_JSON_PROTOCOL_VERSION { + e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) + return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) + + } + if name, err = p.ReadString(); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte() + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TJSONProtocol) ReadMessageEnd() error { + err := p.ParseListEnd() + return err +} + +func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TJSONProtocol) ReadStructEnd() error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { + b, _ := p.reader.Peek(1) + if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { + return "", STOP, -1, nil + } + fieldId, err := p.ReadI16() + if err != nil { + return "", STOP, fieldId, err + } + if _, err = p.ParseObjectStart(); err != nil { + return "", STOP, fieldId, err + } + sType, err := p.ReadString() + if err != nil { + return "", STOP, fieldId, err + } + fType, err := p.StringToTypeId(sType) + return "", fType, fieldId, err +} + +func (p *TJSONProtocol) ReadFieldEnd() error { + return p.ParseObjectEnd() +} + +func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + sKeyType, e := p.ReadString() + if e != nil { + return keyType, valueType, size, e + } + keyType, e = p.StringToTypeId(sKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + sValueType, e := p.ReadString() + if e != nil { + return keyType, valueType, size, e + } + valueType, e = p.StringToTypeId(sValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, e := p.ReadI64() + if e != nil { + return keyType, valueType, size, e + } + size = int(iSize) + + _, e = p.ParseObjectStart() + return keyType, valueType, size, e +} + +func (p *TJSONProtocol) ReadMapEnd() error { + e := p.ParseObjectEnd() + if e != nil { + return e + } + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadListEnd() error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TJSONProtocol) ReadSetEnd() error { + return p.ParseListEnd() +} + +func (p *TJSONProtocol) ReadBool() (bool, error) { + value, err := p.ReadI32() + return (value != 0), err +} + +func (p *TJSONProtocol) ReadByte() (int8, error) { + v, err := p.ReadI64() + return int8(v), err +} + +func (p *TJSONProtocol) ReadI16() (int16, error) { + v, err := p.ReadI64() + return int16(v), err +} + +func (p *TJSONProtocol) ReadI32() (int32, error) { + v, err := p.ReadI64() + return int32(v), err +} + +func (p *TJSONProtocol) ReadI64() (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TJSONProtocol) ReadDouble() (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TJSONProtocol) ReadString() (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) ReadBinary() ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { + err = p.writer.Flush() + if err == nil { + err = p.trans.Flush(ctx) + } + return NewTProtocolException(err) +} + +func (p *TJSONProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.WriteString(s); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + sElemType, err := p.ReadString() + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + sElemType, err := p.ReadString() + if err != nil { + return VOID, size, err + } + elemType, err = p.StringToTypeId(sElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + s, e1 := p.TypeIdToString(elemType) + if e1 != nil { + return e1 + } + if e := p.OutputString(s); e != nil { + return e + } + if e := p.OutputI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { + switch byte(fieldType) { + case BOOL: + return "tf", nil + case BYTE: + return "i8", nil + case I16: + return "i16", nil + case I32: + return "i32", nil + case I64: + return "i64", nil + case DOUBLE: + return "dbl", nil + case STRING: + return "str", nil + case STRUCT: + return "rec", nil + case MAP: + return "map", nil + case SET: + return "set", nil + case LIST: + return "lst", nil + } + + e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { + switch fieldType { + case "tf": + return TType(BOOL), nil + case "i8": + return TType(BYTE), nil + case "i16": + return TType(I16), nil + case "i32": + return TType(I32), nil + case "i64": + return TType(I64), nil + case "dbl": + return TType(DOUBLE), nil + case "str": + return TType(STRING), nil + case "rec": + return TType(STRUCT), nil + case "map": + return TType(MAP), nil + case "set": + return TType(SET), nil + case "lst": + return TType(LIST), nil + } + + e := fmt.Errorf("Unknown type identifier: %s", fieldType) + return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go new file mode 100644 index 000000000..5936d2730 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go @@ -0,0 +1,80 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bytes" + "context" +) + +// Memory buffer-based implementation of the TTransport interface. +type TMemoryBuffer struct { + *bytes.Buffer + size int +} + +type TMemoryBufferTransportFactory struct { + size int +} + +func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if trans != nil { + t, ok := trans.(*TMemoryBuffer) + if ok && t.size > 0 { + return NewTMemoryBufferLen(t.size), nil + } + } + return NewTMemoryBufferLen(p.size), nil +} + +func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { + return &TMemoryBufferTransportFactory{size: size} +} + +func NewTMemoryBuffer() *TMemoryBuffer { + return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} +} + +func NewTMemoryBufferLen(size int) *TMemoryBuffer { + buf := make([]byte, 0, size) + return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} +} + +func (p *TMemoryBuffer) IsOpen() bool { + return true +} + +func (p *TMemoryBuffer) Open() error { + return nil +} + +func (p *TMemoryBuffer) Close() error { + p.Buffer.Reset() + return nil +} + +// Flushing a memory buffer is a no-op +func (p *TMemoryBuffer) Flush(ctx context.Context) error { + return nil +} + +func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { + return uint64(p.Buffer.Len()) +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go new file mode 100644 index 000000000..25ab2e98a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go @@ -0,0 +1,31 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Message type constants in the Thrift protocol. +type TMessageType int32 + +const ( + INVALID_TMESSAGE_TYPE TMessageType = 0 + CALL TMessageType = 1 + REPLY TMessageType = 2 + EXCEPTION TMessageType = 3 + ONEWAY TMessageType = 4 +) diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go new file mode 100644 index 000000000..d028a30b3 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go @@ -0,0 +1,170 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "fmt" + "strings" +) + +/* +TMultiplexedProtocol is a protocol-independent concrete decorator +that allows a Thrift client to communicate with a multiplexing Thrift server, +by prepending the service name to the function name during function calls. + +NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request +from a multiplexing client. + +This example uses a single socket transport to invoke two services: + +socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) +transport := thrift.NewTFramedTransport(socket) +protocol := thrift.NewTBinaryProtocolTransport(transport) + +mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") +service := Calculator.NewCalculatorClient(mp) + +mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") +service2 := WeatherReport.NewWeatherReportClient(mp2) + +err := transport.Open() +if err != nil { + t.Fatal("Unable to open client socket", err) +} + +fmt.Println(service.Add(2,2)) +fmt.Println(service2.GetTemperature()) +*/ + +type TMultiplexedProtocol struct { + TProtocol + serviceName string +} + +const MULTIPLEXED_SEPARATOR = ":" + +func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { + return &TMultiplexedProtocol{ + TProtocol: protocol, + serviceName: serviceName, + } +} + +func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { + if typeId == CALL || typeId == ONEWAY { + return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) + } else { + return t.TProtocol.WriteMessageBegin(name, typeId, seqid) + } +} + +/* +TMultiplexedProcessor is a TProcessor allowing +a single TServer to provide multiple services. + +To do so, you instantiate the processor and then register additional +processors with it, as shown in the following example: + +var processor = thrift.NewTMultiplexedProcessor() + +firstProcessor := +processor.RegisterProcessor("FirstService", firstProcessor) + +processor.registerProcessor( + "Calculator", + Calculator.NewCalculatorProcessor(&CalculatorHandler{}), +) + +processor.registerProcessor( + "WeatherReport", + WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), +) + +serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) +if err != nil { + t.Fatal("Unable to create server socket", err) +} +server := thrift.NewTSimpleServer2(processor, serverTransport) +server.Serve(); +*/ + +type TMultiplexedProcessor struct { + serviceProcessorMap map[string]TProcessor + DefaultProcessor TProcessor +} + +func NewTMultiplexedProcessor() *TMultiplexedProcessor { + return &TMultiplexedProcessor{ + serviceProcessorMap: make(map[string]TProcessor), + } +} + +func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { + t.DefaultProcessor = processor +} + +func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { + if t.serviceProcessorMap == nil { + t.serviceProcessorMap = make(map[string]TProcessor) + } + t.serviceProcessorMap[name] = processor +} + +func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { + name, typeId, seqid, err := in.ReadMessageBegin() + if err != nil { + return false, err + } + if typeId != CALL && typeId != ONEWAY { + return false, fmt.Errorf("Unexpected message type %v", typeId) + } + //extract the service name + v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) + if len(v) != 2 { + if t.DefaultProcessor != nil { + smb := NewStoredMessageProtocol(in, name, typeId, seqid) + return t.DefaultProcessor.Process(ctx, smb, out) + } + return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) + } + actualProcessor, ok := t.serviceProcessorMap[v[0]] + if !ok { + return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) + } + smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) + return actualProcessor.Process(ctx, smb, out) +} + +//Protocol that use stored message for ReadMessageBegin +type storedMessageProtocol struct { + TProtocol + name string + typeId TMessageType + seqid int32 +} + +func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { + return &storedMessageProtocol{protocol, name, typeId, seqid} +} + +func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { + return s.name, s.typeId, s.seqid, nil +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go new file mode 100644 index 000000000..aa8daa9b5 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go @@ -0,0 +1,164 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "math" + "strconv" +) + +type Numeric interface { + Int64() int64 + Int32() int32 + Int16() int16 + Byte() byte + Int() int + Float64() float64 + Float32() float32 + String() string + isNull() bool +} + +type numeric struct { + iValue int64 + dValue float64 + sValue string + isNil bool +} + +var ( + INFINITY Numeric + NEGATIVE_INFINITY Numeric + NAN Numeric + ZERO Numeric + NUMERIC_NULL Numeric +) + +func NewNumericFromDouble(dValue float64) Numeric { + if math.IsInf(dValue, 1) { + return INFINITY + } + if math.IsInf(dValue, -1) { + return NEGATIVE_INFINITY + } + if math.IsNaN(dValue) { + return NAN + } + iValue := int64(dValue) + sValue := strconv.FormatFloat(dValue, 'g', 10, 64) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI64(iValue int64) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromI32(iValue int32) Numeric { + dValue := float64(iValue) + sValue := string(iValue) + isNil := false + return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromString(sValue string) Numeric { + if sValue == INFINITY.String() { + return INFINITY + } + if sValue == NEGATIVE_INFINITY.String() { + return NEGATIVE_INFINITY + } + if sValue == NAN.String() { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + isNil := len(sValue) == 0 + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} +} + +func NewNumericFromJSONString(sValue string, isNull bool) Numeric { + if isNull { + return NewNullNumeric() + } + if sValue == JSON_INFINITY { + return INFINITY + } + if sValue == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY + } + if sValue == JSON_NAN { + return NAN + } + iValue, _ := strconv.ParseInt(sValue, 10, 64) + dValue, _ := strconv.ParseFloat(sValue, 64) + return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} +} + +func NewNullNumeric() Numeric { + return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} +} + +func (p *numeric) Int64() int64 { + return p.iValue +} + +func (p *numeric) Int32() int32 { + return int32(p.iValue) +} + +func (p *numeric) Int16() int16 { + return int16(p.iValue) +} + +func (p *numeric) Byte() byte { + return byte(p.iValue) +} + +func (p *numeric) Int() int { + return int(p.iValue) +} + +func (p *numeric) Float64() float64 { + return p.dValue +} + +func (p *numeric) Float32() float32 { + return float32(p.dValue) +} + +func (p *numeric) String() string { + return p.sValue +} + +func (p *numeric) isNull() bool { + return p.isNil +} + +func init() { + INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} + NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} + NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} + ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} + NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go new file mode 100644 index 000000000..8d6b2c215 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go @@ -0,0 +1,50 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +/////////////////////////////////////////////////////////////////////////////// +// This file is home to helpers that convert from various base types to +// respective pointer types. This is necessary because Go does not permit +// references to constants, nor can a pointer type to base type be allocated +// and initialized in a single expression. +// +// E.g., this is not allowed: +// +// var ip *int = &5 +// +// But this *is* allowed: +// +// func IntPtr(i int) *int { return &i } +// var ip *int = IntPtr(5) +// +// Since pointers to base types are commonplace as [optional] fields in +// exported thrift structs, we factor such helpers here. +/////////////////////////////////////////////////////////////////////////////// + +func Float32Ptr(v float32) *float32 { return &v } +func Float64Ptr(v float64) *float64 { return &v } +func IntPtr(v int) *int { return &v } +func Int32Ptr(v int32) *int32 { return &v } +func Int64Ptr(v int64) *int64 { return &v } +func StringPtr(v string) *string { return &v } +func Uint32Ptr(v uint32) *uint32 { return &v } +func Uint64Ptr(v uint64) *uint64 { return &v } +func BoolPtr(v bool) *bool { return &v } +func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go new file mode 100644 index 000000000..e4b132b30 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "context" + +// A processor is a generic object which operates upon an input stream and +// writes to some output stream. +type TProcessor interface { + Process(ctx context.Context, in, out TProtocol) (bool, TException) +} + +type TProcessorFunction interface { + Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) +} + +// The default processor factory just returns a singleton +// instance. +type TProcessorFactory interface { + GetProcessor(trans TTransport) TProcessor +} + +type tProcessorFactory struct { + processor TProcessor +} + +func NewTProcessorFactory(p TProcessor) TProcessorFactory { + return &tProcessorFactory{processor: p} +} + +func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { + return p.processor +} + +/** + * The default processor factory just returns a singleton + * instance. + */ +type TProcessorFunctionFactory interface { + GetProcessorFunction(trans TTransport) TProcessorFunction +} + +type tProcessorFunctionFactory struct { + processor TProcessorFunction +} + +func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { + return &tProcessorFunctionFactory{processor: p} +} + +func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { + return p.processor +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go new file mode 100644 index 000000000..615b7a4a8 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "fmt" +) + +const ( + VERSION_MASK = 0xffff0000 + VERSION_1 = 0x80010000 +) + +type TProtocol interface { + WriteMessageBegin(name string, typeId TMessageType, seqid int32) error + WriteMessageEnd() error + WriteStructBegin(name string) error + WriteStructEnd() error + WriteFieldBegin(name string, typeId TType, id int16) error + WriteFieldEnd() error + WriteFieldStop() error + WriteMapBegin(keyType TType, valueType TType, size int) error + WriteMapEnd() error + WriteListBegin(elemType TType, size int) error + WriteListEnd() error + WriteSetBegin(elemType TType, size int) error + WriteSetEnd() error + WriteBool(value bool) error + WriteByte(value int8) error + WriteI16(value int16) error + WriteI32(value int32) error + WriteI64(value int64) error + WriteDouble(value float64) error + WriteString(value string) error + WriteBinary(value []byte) error + + ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) + ReadMessageEnd() error + ReadStructBegin() (name string, err error) + ReadStructEnd() error + ReadFieldBegin() (name string, typeId TType, id int16, err error) + ReadFieldEnd() error + ReadMapBegin() (keyType TType, valueType TType, size int, err error) + ReadMapEnd() error + ReadListBegin() (elemType TType, size int, err error) + ReadListEnd() error + ReadSetBegin() (elemType TType, size int, err error) + ReadSetEnd() error + ReadBool() (value bool, err error) + ReadByte() (value int8, err error) + ReadI16() (value int16, err error) + ReadI32() (value int32, err error) + ReadI64() (value int64, err error) + ReadDouble() (value float64, err error) + ReadString() (value string, err error) + ReadBinary() (value []byte, err error) + + Skip(fieldType TType) (err error) + Flush(ctx context.Context) (err error) + + Transport() TTransport +} + +// The maximum recursive depth the skip() function will traverse +const DEFAULT_RECURSION_DEPTH = 64 + +// Skips over the next data element from the provided input TProtocol object. +func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { + return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) +} + +// Skips over the next data element from the provided input TProtocol object. +func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { + + if maxDepth <= 0 { + return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) + } + + switch fieldType { + case STOP: + return + case BOOL: + _, err = self.ReadBool() + return + case BYTE: + _, err = self.ReadByte() + return + case I16: + _, err = self.ReadI16() + return + case I32: + _, err = self.ReadI32() + return + case I64: + _, err = self.ReadI64() + return + case DOUBLE: + _, err = self.ReadDouble() + return + case STRING: + _, err = self.ReadString() + return + case STRUCT: + if _, err = self.ReadStructBegin(); err != nil { + return err + } + for { + _, typeId, _, _ := self.ReadFieldBegin() + if typeId == STOP { + break + } + err := Skip(self, typeId, maxDepth-1) + if err != nil { + return err + } + self.ReadFieldEnd() + } + return self.ReadStructEnd() + case MAP: + keyType, valueType, size, err := self.ReadMapBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, keyType, maxDepth-1) + if err != nil { + return err + } + self.Skip(valueType) + } + return self.ReadMapEnd() + case SET: + elemType, size, err := self.ReadSetBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadSetEnd() + case LIST: + elemType, size, err := self.ReadListBegin() + if err != nil { + return err + } + for i := 0; i < size; i++ { + err := Skip(self, elemType, maxDepth-1) + if err != nil { + return err + } + } + return self.ReadListEnd() + default: + return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) + } + return nil +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go new file mode 100644 index 000000000..29ab75d92 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go @@ -0,0 +1,77 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "encoding/base64" +) + +// Thrift Protocol exception +type TProtocolException interface { + TException + TypeId() int +} + +const ( + UNKNOWN_PROTOCOL_EXCEPTION = 0 + INVALID_DATA = 1 + NEGATIVE_SIZE = 2 + SIZE_LIMIT = 3 + BAD_VERSION = 4 + NOT_IMPLEMENTED = 5 + DEPTH_LIMIT = 6 +) + +type tProtocolException struct { + typeId int + message string +} + +func (p *tProtocolException) TypeId() int { + return p.typeId +} + +func (p *tProtocolException) String() string { + return p.message +} + +func (p *tProtocolException) Error() string { + return p.message +} + +func NewTProtocolException(err error) TProtocolException { + if err == nil { + return nil + } + if e, ok := err.(TProtocolException); ok { + return e + } + if _, ok := err.(base64.CorruptInputError); ok { + return &tProtocolException{INVALID_DATA, err.Error()} + } + return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} +} + +func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { + if err == nil { + return nil + } + return &tProtocolException{errType, err.Error()} +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go new file mode 100644 index 000000000..c40f796d8 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go @@ -0,0 +1,25 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory interface for constructing protocol instances. +type TProtocolFactory interface { + GetProtocol(trans TTransport) TProtocol +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go new file mode 100644 index 000000000..4025bebea --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go @@ -0,0 +1,68 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import "io" + +type RichTransport struct { + TTransport +} + +// Wraps Transport to provide TRichTransport interface +func NewTRichTransport(trans TTransport) *RichTransport { + return &RichTransport{trans} +} + +func (r *RichTransport) ReadByte() (c byte, err error) { + return readByte(r.TTransport) +} + +func (r *RichTransport) WriteByte(c byte) error { + return writeByte(r.TTransport, c) +} + +func (r *RichTransport) WriteString(s string) (n int, err error) { + return r.Write([]byte(s)) +} + +func (r *RichTransport) RemainingBytes() (num_bytes uint64) { + return r.TTransport.RemainingBytes() +} + +func readByte(r io.Reader) (c byte, err error) { + v := [1]byte{0} + n, err := r.Read(v[0:1]) + if n > 0 && (err == nil || err == io.EOF) { + return v[0], nil + } + if n > 0 && err != nil { + return v[0], err + } + if err != nil { + return 0, err + } + return v[0], nil +} + +func writeByte(w io.Writer, c byte) error { + v := [1]byte{c} + _, err := w.Write(v[0:1]) + return err +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go new file mode 100644 index 000000000..1ff4d3754 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go @@ -0,0 +1,79 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" +) + +type TSerializer struct { + Transport *TMemoryBuffer + Protocol TProtocol +} + +type TStruct interface { + Write(p TProtocol) error + Read(p TProtocol) error +} + +func NewTSerializer() *TSerializer { + transport := NewTMemoryBufferLen(1024) + protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) + + return &TSerializer{ + transport, + protocol} +} + +func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + if err = t.Transport.Flush(ctx); err != nil { + return + } + + return t.Transport.String(), nil +} + +func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { + t.Transport.Reset() + + if err = msg.Write(t.Protocol); err != nil { + return + } + + if err = t.Protocol.Flush(ctx); err != nil { + return + } + + if err = t.Transport.Flush(ctx); err != nil { + return + } + + b = append(b, t.Transport.Bytes()...) + return +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go new file mode 100644 index 000000000..f813fa353 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go @@ -0,0 +1,35 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +type TServer interface { + ProcessorFactory() TProcessorFactory + ServerTransport() TServerTransport + InputTransportFactory() TTransportFactory + OutputTransportFactory() TTransportFactory + InputProtocolFactory() TProtocolFactory + OutputProtocolFactory() TProtocolFactory + + // Starts the server + Serve() error + // Stops the server. This is optional on a per-implementation basis. Not + // all servers are required to be cleanly stoppable. + Stop() error +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go new file mode 100644 index 000000000..80313c4be --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go @@ -0,0 +1,134 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "net" + "sync" + "time" +) + +type TServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + + // Protects the interrupted value to make it thread safe. + mu sync.RWMutex + interrupted bool +} + +func NewTServerSocket(listenAddr string) (*TServerSocket, error) { + return NewTServerSocketTimeout(listenAddr, 0) +} + +func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil +} + +// Creates a TServerSocket from a net.Addr +func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { + return &TServerSocket{addr: addr, clientTimeout: clientTimeout} +} + +func (p *TServerSocket) Listen() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return nil + } + l, err := net.Listen(p.addr.Network(), p.addr.String()) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TServerSocket) Accept() (TTransport, error) { + p.mu.RLock() + interrupted := p.interrupted + p.mu.RUnlock() + + if interrupted { + return nil, errTransportInterrupted + } + + listener := p.listener + if listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + + conn, err := listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TServerSocket) Open() error { + p.mu.Lock() + defer p.mu.Unlock() + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TServerSocket) Addr() net.Addr { + if p.listener != nil { + return p.listener.Addr() + } + return p.addr +} + +func (p *TServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TServerSocket) Interrupt() error { + p.mu.Lock() + defer p.mu.Unlock() + p.interrupted = true + p.Close() + + return nil +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go new file mode 100644 index 000000000..51c40b64a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go @@ -0,0 +1,34 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Server transport. Object which provides client transports. +type TServerTransport interface { + Listen() error + Accept() (TTransport, error) + Close() error + + // Optional method implementation. This signals to the server transport + // that it should break out of any accept() or listen() that it is currently + // blocked on. This method, if implemented, MUST be thread safe, as it may + // be called from a different thread context than the other TServerTransport + // methods. + Interrupt() error +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go new file mode 100644 index 000000000..2e8a71112 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go @@ -0,0 +1,1338 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "bufio" + "bytes" + "context" + "encoding/base64" + "encoding/json" + "fmt" + "io" + "math" + "strconv" +) + +type _ParseContext int + +const ( + _CONTEXT_IN_TOPLEVEL _ParseContext = 1 + _CONTEXT_IN_LIST_FIRST _ParseContext = 2 + _CONTEXT_IN_LIST _ParseContext = 3 + _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 + _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 + _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 +) + +func (p _ParseContext) String() string { + switch p { + case _CONTEXT_IN_TOPLEVEL: + return "TOPLEVEL" + case _CONTEXT_IN_LIST_FIRST: + return "LIST-FIRST" + case _CONTEXT_IN_LIST: + return "LIST" + case _CONTEXT_IN_OBJECT_FIRST: + return "OBJECT-FIRST" + case _CONTEXT_IN_OBJECT_NEXT_KEY: + return "OBJECT-NEXT-KEY" + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + return "OBJECT-NEXT-VALUE" + } + return "UNKNOWN-PARSE-CONTEXT" +} + +// JSON protocol implementation for thrift. +// +// This protocol produces/consumes a simple output format +// suitable for parsing by scripting languages. It should not be +// confused with the full-featured TJSONProtocol. +// +type TSimpleJSONProtocol struct { + trans TTransport + + parseContextStack []int + dumpContext []int + + writer *bufio.Writer + reader *bufio.Reader +} + +// Constructor +func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { + v := &TSimpleJSONProtocol{trans: t, + writer: bufio.NewWriter(t), + reader: bufio.NewReader(t), + } + v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) + v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) + return v +} + +// Factory +type TSimpleJSONProtocolFactory struct{} + +func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { + return NewTSimpleJSONProtocol(trans) +} + +func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { + return &TSimpleJSONProtocolFactory{} +} + +var ( + JSON_COMMA []byte + JSON_COLON []byte + JSON_LBRACE []byte + JSON_RBRACE []byte + JSON_LBRACKET []byte + JSON_RBRACKET []byte + JSON_QUOTE byte + JSON_QUOTE_BYTES []byte + JSON_NULL []byte + JSON_TRUE []byte + JSON_FALSE []byte + JSON_INFINITY string + JSON_NEGATIVE_INFINITY string + JSON_NAN string + JSON_INFINITY_BYTES []byte + JSON_NEGATIVE_INFINITY_BYTES []byte + JSON_NAN_BYTES []byte + json_nonbase_map_elem_bytes []byte +) + +func init() { + JSON_COMMA = []byte{','} + JSON_COLON = []byte{':'} + JSON_LBRACE = []byte{'{'} + JSON_RBRACE = []byte{'}'} + JSON_LBRACKET = []byte{'['} + JSON_RBRACKET = []byte{']'} + JSON_QUOTE = '"' + JSON_QUOTE_BYTES = []byte{'"'} + JSON_NULL = []byte{'n', 'u', 'l', 'l'} + JSON_TRUE = []byte{'t', 'r', 'u', 'e'} + JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} + JSON_INFINITY = "Infinity" + JSON_NEGATIVE_INFINITY = "-Infinity" + JSON_NAN = "NaN" + JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} + JSON_NAN_BYTES = []byte{'N', 'a', 'N'} + json_nonbase_map_elem_bytes = []byte{']', ',', '['} +} + +func jsonQuote(s string) string { + b, _ := json.Marshal(s) + s1 := string(b) + return s1 +} + +func jsonUnquote(s string) (string, bool) { + s1 := new(string) + err := json.Unmarshal([]byte(s), s1) + return *s1, err == nil +} + +func mismatch(expected, actual string) error { + return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) +} + +func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { + p.resetContextStack() // THRIFT-3735 + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteString(name); e != nil { + return e + } + if e := p.WriteByte(int8(typeId)); e != nil { + return e + } + if e := p.WriteI32(seqId); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteMessageEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { + if e := p.OutputObjectBegin(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteStructEnd() error { + return p.OutputObjectEnd() +} + +func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { + if e := p.WriteString(name); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldEnd() error { + //return p.OutputListEnd() + return nil +} + +func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } + +func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(keyType)); e != nil { + return e + } + if e := p.WriteByte(int8(valueType)); e != nil { + return e + } + return p.WriteI32(int32(size)) +} + +func (p *TSimpleJSONProtocol) WriteMapEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteListEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { + return p.OutputElemListBegin(elemType, size) +} + +func (p *TSimpleJSONProtocol) WriteSetEnd() error { + return p.OutputListEnd() +} + +func (p *TSimpleJSONProtocol) WriteBool(b bool) error { + return p.OutputBool(b) +} + +func (p *TSimpleJSONProtocol) WriteByte(b int8) error { + return p.WriteI32(int32(b)) +} + +func (p *TSimpleJSONProtocol) WriteI16(v int16) error { + return p.WriteI32(int32(v)) +} + +func (p *TSimpleJSONProtocol) WriteI32(v int32) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteI64(v int64) error { + return p.OutputI64(int64(v)) +} + +func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { + return p.OutputF64(v) +} + +func (p *TSimpleJSONProtocol) WriteString(v string) error { + return p.OutputString(v) +} + +func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { + // JSON library only takes in a string, + // not an arbitrary byte array, to ensure bytes are transmitted + // efficiently we must convert this into a valid JSON string + // therefore we use base64 encoding to avoid excessive escaping/quoting + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + writer := base64.NewEncoder(base64.StdEncoding, p.writer) + if _, e := writer.Write(v); e != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + return NewTProtocolException(e) + } + if e := writer.Close(); e != nil { + return NewTProtocolException(e) + } + if _, e := p.write(JSON_QUOTE_BYTES); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +// Reading methods. +func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { + p.resetContextStack() // THRIFT-3735 + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, typeId, seqId, err + } + if name, err = p.ReadString(); err != nil { + return name, typeId, seqId, err + } + bTypeId, err := p.ReadByte() + typeId = TMessageType(bTypeId) + if err != nil { + return name, typeId, seqId, err + } + if seqId, err = p.ReadI32(); err != nil { + return name, typeId, seqId, err + } + return name, typeId, seqId, nil +} + +func (p *TSimpleJSONProtocol) ReadMessageEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { + _, err = p.ParseObjectStart() + return "", err +} + +func (p *TSimpleJSONProtocol) ReadStructEnd() error { + return p.ParseObjectEnd() +} + +func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { + if err := p.ParsePreValue(); err != nil { + return "", STOP, 0, err + } + b, _ := p.reader.Peek(1) + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return "", STOP, 0, nil + case JSON_QUOTE: + p.reader.ReadByte() + name, err := p.ParseStringBody() + // simplejson is not meant to be read back into thrift + // - see http://wiki.apache.org/thrift/ThriftUsageJava + // - use JSON instead + if err != nil { + return name, STOP, 0, err + } + return name, STOP, -1, p.ParsePostValue() + /* + if err = p.ParsePostValue(); err != nil { + return name, STOP, 0, err + } + if isNull, err := p.ParseListBegin(); isNull || err != nil { + return name, STOP, 0, err + } + bType, err := p.ReadByte() + thetype := TType(bType) + if err != nil { + return name, thetype, 0, err + } + id, err := p.ReadI16() + return name, thetype, id, err + */ + } + e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) + return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return "", STOP, 0, NewTProtocolException(io.EOF) +} + +func (p *TSimpleJSONProtocol) ReadFieldEnd() error { + return nil + //return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, VOID, 0, e + } + + // read keyType + bKeyType, e := p.ReadByte() + keyType = TType(bKeyType) + if e != nil { + return keyType, valueType, size, e + } + + // read valueType + bValueType, e := p.ReadByte() + valueType = TType(bValueType) + if e != nil { + return keyType, valueType, size, e + } + + // read size + iSize, err := p.ReadI64() + size = int(iSize) + return keyType, valueType, size, err +} + +func (p *TSimpleJSONProtocol) ReadMapEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadListEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { + return p.ParseElemListBegin() +} + +func (p *TSimpleJSONProtocol) ReadSetEnd() error { + return p.ParseListEnd() +} + +func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { + var value bool + + if err := p.ParsePreValue(); err != nil { + return value, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 { + switch f[0] { + case JSON_TRUE[0]: + b := make([]byte, len(JSON_TRUE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_TRUE) { + value = true + } else { + e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_FALSE[0]: + b := make([]byte, len(JSON_FALSE)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_FALSE) { + value = false + } else { + e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + break + case JSON_NULL[0]: + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return false, NewTProtocolException(err) + } + if string(b) == string(JSON_NULL) { + value = false + } else { + e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + default: + e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) + return value, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + return value, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { + v, err := p.ReadI64() + return int8(v), err +} + +func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { + v, err := p.ReadI64() + return int16(v), err +} + +func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { + v, err := p.ReadI64() + return int32(v), err +} + +func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { + v, _, err := p.ParseI64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { + v, _, err := p.ParseF64() + return v, err +} + +func (p *TSimpleJSONProtocol) ReadString() (string, error) { + var v string + if err := p.ParsePreValue(); err != nil { + return v, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseStringBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { + var v []byte + if err := p.ParsePreValue(); err != nil { + return nil, err + } + f, _ := p.reader.Peek(1) + if len(f) > 0 && f[0] == JSON_QUOTE { + p.reader.ReadByte() + value, err := p.ParseBase64EncodedBody() + v = value + if err != nil { + return v, err + } + } else if len(f) > 0 && f[0] == JSON_NULL[0] { + b := make([]byte, len(JSON_NULL)) + _, err := p.reader.Read(b) + if err != nil { + return v, NewTProtocolException(err) + } + if string(b) != string(JSON_NULL) { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } else { + e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) + return v, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + + return v, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { + return NewTProtocolException(p.writer.Flush()) +} + +func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { + return SkipDefaultDepth(p, fieldType) +} + +func (p *TSimpleJSONProtocol) Transport() TTransport { + return p.trans +} + +func (p *TSimpleJSONProtocol) OutputPreValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: + if _, e := p.write(JSON_COMMA); e != nil { + return NewTProtocolException(e) + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if _, e := p.write(JSON_COLON); e != nil { + return NewTProtocolException(e) + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputPostValue() error { + cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputBool(value bool) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if value { + v = string(JSON_TRUE) + } else { + v = string(JSON_FALSE) + } + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputNull() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_NULL); e != nil { + return NewTProtocolException(e) + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputF64(value float64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + var v string + if math.IsNaN(value) { + v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) + } else if math.IsInf(value, 1) { + v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) + } else if math.IsInf(value, -1) { + v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) + } else { + v = strconv.FormatFloat(value, 'g', -1, 64) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = string(JSON_QUOTE) + v + string(JSON_QUOTE) + default: + } + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputI64(value int64) error { + if e := p.OutputPreValue(); e != nil { + return e + } + v := strconv.FormatInt(value, 10) + switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + v = jsonQuote(v) + default: + } + if e := p.OutputStringData(v); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputString(s string) error { + if e := p.OutputPreValue(); e != nil { + return e + } + if e := p.OutputStringData(jsonQuote(s)); e != nil { + return e + } + return p.OutputPostValue() +} + +func (p *TSimpleJSONProtocol) OutputStringData(s string) error { + _, e := p.write([]byte(s)) + return NewTProtocolException(e) +} + +func (p *TSimpleJSONProtocol) OutputObjectBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputObjectEnd() error { + if _, e := p.write(JSON_RBRACE); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputListBegin() error { + if e := p.OutputPreValue(); e != nil { + return e + } + if _, e := p.write(JSON_LBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) + return nil +} + +func (p *TSimpleJSONProtocol) OutputListEnd() error { + if _, e := p.write(JSON_RBRACKET); e != nil { + return NewTProtocolException(e) + } + p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] + if e := p.OutputPostValue(); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { + if e := p.OutputListBegin(); e != nil { + return e + } + if e := p.WriteByte(int8(elemType)); e != nil { + return e + } + if e := p.WriteI64(int64(size)); e != nil { + return e + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePreValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + b, _ := p.reader.Peek(1) + switch cxt { + case _CONTEXT_IN_LIST: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACKET[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_KEY: + if len(b) > 0 { + switch b[0] { + case JSON_RBRACE[0]: + return nil + case JSON_COMMA[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + if len(b) > 0 { + switch b[0] { + case JSON_COLON[0]: + p.reader.ReadByte() + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + return nil + default: + e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParsePostValue() error { + if e := p.readNonSignificantWhitespace(); e != nil { + return NewTProtocolException(e) + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + switch cxt { + case _CONTEXT_IN_LIST_FIRST: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) + break + case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) + break + case _CONTEXT_IN_OBJECT_NEXT_VALUE: + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) + break + } + return nil +} + +func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { + for { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return nil + } + switch b[0] { + case ' ', '\r', '\n', '\t': + p.reader.ReadByte() + continue + default: + break + } + break + } + return nil +} + +func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + v, ok := jsonUnquote(string(JSON_QUOTE) + line) + if !ok { + return "", NewTProtocolException(err) + } + return v, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + str := string(JSON_QUOTE) + line + s + v, ok := jsonUnquote(str) + if !ok { + e := fmt.Errorf("Unable to parse as JSON string %s", str) + return "", NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { + line, err := p.reader.ReadString(JSON_QUOTE) + if err != nil { + return "", NewTProtocolException(err) + } + l := len(line) + // count number of escapes to see if we need to keep going + i := 1 + for ; i < l; i++ { + if line[l-i-1] != '\\' { + break + } + } + if i&0x01 == 1 { + return line, nil + } + s, err := p.ParseQuotedStringBody() + if err != nil { + return "", NewTProtocolException(err) + } + v := line + s + return v, nil +} + +func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { + line, err := p.reader.ReadBytes(JSON_QUOTE) + if err != nil { + return line, NewTProtocolException(err) + } + line2 := line[0 : len(line)-1] + l := len(line2) + if (l % 4) != 0 { + pad := 4 - (l % 4) + fill := [...]byte{'=', '=', '='} + line2 = append(line2, fill[:pad]...) + l = len(line2) + } + output := make([]byte, base64.StdEncoding.DecodedLen(l)) + n, err := base64.StdEncoding.Decode(output, line2) + return output[0:n], NewTProtocolException(err) +} + +func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value int64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Int64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { + if err := p.ParsePreValue(); err != nil { + return 0, false, err + } + var value float64 + var isnull bool + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + isnull = true + } else { + num, err := p.readNumeric() + isnull = (num == nil) + if !isnull { + value = num.Float64() + } + if err != nil { + return value, isnull, err + } + } + return value, isnull, p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { + if err := p.ParsePreValue(); err != nil { + return false, err + } + var b []byte + b, err := p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) > 0 && b[0] == JSON_LBRACE[0] { + p.reader.ReadByte() + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) + return false, nil + } else if p.safePeekContains(JSON_NULL) { + return true, nil + } + e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) + return false, NewTProtocolExceptionWithType(INVALID_DATA, e) +} + +func (p *TSimpleJSONProtocol) ParseObjectEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { + e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACE[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', '}': + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { + if e := p.ParsePreValue(); e != nil { + return false, e + } + var b []byte + b, err = p.reader.Peek(1) + if err != nil { + return false, err + } + if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { + p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) + p.reader.ReadByte() + isNull = false + } else if p.safePeekContains(JSON_NULL) { + isNull = true + } else { + err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) + } + return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) +} + +func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { + if isNull, e := p.ParseListBegin(); isNull || e != nil { + return VOID, 0, e + } + bElemType, err := p.ReadByte() + elemType = TType(bElemType) + if err != nil { + return elemType, size, err + } + nSize, err2 := p.ReadI64() + size = int(nSize) + return elemType, size, err2 +} + +func (p *TSimpleJSONProtocol) ParseListEnd() error { + if isNull, err := p.readIfNull(); isNull || err != nil { + return err + } + cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) + if cxt != _CONTEXT_IN_LIST { + e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + } + line, err := p.reader.ReadString(JSON_RBRACKET[0]) + if err != nil { + return NewTProtocolException(err) + } + for _, char := range line { + switch char { + default: + e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) + return NewTProtocolExceptionWithType(INVALID_DATA, e) + case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): + break + } + } + p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] + if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { + return nil + } + return p.ParsePostValue() +} + +func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { + e := p.readNonSignificantWhitespace() + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + b, e := p.reader.Peek(1) + if len(b) > 0 { + c := b[0] + switch c { + case JSON_NULL[0]: + buf := make([]byte, len(JSON_NULL)) + _, e := p.reader.Read(buf) + if e != nil { + return nil, VOID, NewTProtocolException(e) + } + if string(JSON_NULL) != string(buf) { + e = mismatch(string(JSON_NULL), string(buf)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return nil, VOID, nil + case JSON_QUOTE: + p.reader.ReadByte() + v, e := p.ParseStringBody() + if e != nil { + return v, UTF8, NewTProtocolException(e) + } + if v == JSON_INFINITY { + return INFINITY, DOUBLE, nil + } else if v == JSON_NEGATIVE_INFINITY { + return NEGATIVE_INFINITY, DOUBLE, nil + } else if v == JSON_NAN { + return NAN, DOUBLE, nil + } + return v, UTF8, nil + case JSON_TRUE[0]: + buf := make([]byte, len(JSON_TRUE)) + _, e := p.reader.Read(buf) + if e != nil { + return true, BOOL, NewTProtocolException(e) + } + if string(JSON_TRUE) != string(buf) { + e := mismatch(string(JSON_TRUE), string(buf)) + return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return true, BOOL, nil + case JSON_FALSE[0]: + buf := make([]byte, len(JSON_FALSE)) + _, e := p.reader.Read(buf) + if e != nil { + return false, BOOL, NewTProtocolException(e) + } + if string(JSON_FALSE) != string(buf) { + e := mismatch(string(JSON_FALSE), string(buf)) + return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return false, BOOL, nil + case JSON_LBRACKET[0]: + _, e := p.reader.ReadByte() + return make([]interface{}, 0), LIST, NewTProtocolException(e) + case JSON_LBRACE[0]: + _, e := p.reader.ReadByte() + return make(map[string]interface{}), STRUCT, NewTProtocolException(e) + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: + // assume numeric + v, e := p.readNumeric() + return v, DOUBLE, e + default: + e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + e = fmt.Errorf("Cannot read a single element while parsing JSON.") + return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) + +} + +func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { + cont := true + for cont { + b, _ := p.reader.Peek(1) + if len(b) < 1 { + return false, nil + } + switch b[0] { + default: + return false, nil + case JSON_NULL[0]: + cont = false + break + case ' ', '\n', '\r', '\t': + p.reader.ReadByte() + break + } + } + if p.safePeekContains(JSON_NULL) { + p.reader.Read(make([]byte, len(JSON_NULL))) + return true, nil + } + return false, nil +} + +func (p *TSimpleJSONProtocol) readQuoteIfNext() { + b, _ := p.reader.Peek(1) + if len(b) > 0 && b[0] == JSON_QUOTE { + p.reader.ReadByte() + } +} + +func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { + isNull, err := p.readIfNull() + if isNull || err != nil { + return NUMERIC_NULL, err + } + hasDecimalPoint := false + nextCanBeSign := true + hasE := false + MAX_LEN := 40 + buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) + continueFor := true + inQuotes := false + for continueFor { + c, err := p.reader.ReadByte() + if err != nil { + if err == io.EOF { + break + } + return NUMERIC_NULL, NewTProtocolException(err) + } + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + buf.WriteByte(c) + nextCanBeSign = false + case '.': + if hasDecimalPoint { + e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if hasE { + e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasDecimalPoint, nextCanBeSign = true, false + case 'e', 'E': + if hasE { + e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + hasE, nextCanBeSign = true, true + case '-', '+': + if !nextCanBeSign { + e := fmt.Errorf("Negative sign within number") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + buf.WriteByte(c) + nextCanBeSign = false + case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: + p.reader.UnreadByte() + continueFor = false + case JSON_NAN[0]: + if buf.Len() == 0 { + buffer := make([]byte, len(JSON_NAN)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NAN != string(buffer) { + e := mismatch(JSON_NAN, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NAN, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_INFINITY[0]: + if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { + buffer := make([]byte, len(JSON_INFINITY)) + buffer[0] = c + _, e := p.reader.Read(buffer[1:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_INFINITY != string(buffer) { + e := mismatch(JSON_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return INFINITY, nil + } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { + buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) + buffer[0] = JSON_NEGATIVE_INFINITY[0] + buffer[1] = c + _, e := p.reader.Read(buffer[2:]) + if e != nil { + return NUMERIC_NULL, NewTProtocolException(e) + } + if JSON_NEGATIVE_INFINITY != string(buffer) { + e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + if inQuotes { + p.readQuoteIfNext() + } + return NEGATIVE_INFINITY, nil + } else { + e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + case JSON_QUOTE: + if !inQuotes { + inQuotes = true + } else { + break + } + default: + e := fmt.Errorf("Unable to parse number starting with character '%c'", c) + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + } + if buf.Len() == 0 { + e := fmt.Errorf("Unable to parse number from empty string ''") + return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) + } + return NewNumericFromJSONString(buf.String(), false), nil +} + +// Safely peeks into the buffer, reading only what is necessary +func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { + for i := 0; i < len(b); i++ { + a, _ := p.reader.Peek(i + 1) + if len(a) == 0 || a[i] != b[i] { + return false + } + } + return true +} + +// Reset the context stack to its initial state. +func (p *TSimpleJSONProtocol) resetContextStack() { + p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} + p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} +} + +func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { + n, err := p.writer.Write(b) + if err != nil { + p.writer.Reset(p.trans) // THRIFT-3735 + } + return n, err +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go new file mode 100644 index 000000000..603580251 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go @@ -0,0 +1,227 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "log" + "runtime/debug" + "sync" + "sync/atomic" +) + +/* + * This is not a typical TSimpleServer as it is not blocked after accept a socket. + * It is more like a TThreadedServer that can handle different connections in different goroutines. + * This will work if golang user implements a conn-pool like thing in client side. + */ +type TSimpleServer struct { + closed int32 + wg sync.WaitGroup + mu sync.Mutex + + processorFactory TProcessorFactory + serverTransport TServerTransport + inputTransportFactory TTransportFactory + outputTransportFactory TTransportFactory + inputProtocolFactory TProtocolFactory + outputProtocolFactory TProtocolFactory +} + +func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) +} + +func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory4(NewTProcessorFactory(processor), + serverTransport, + transportFactory, + protocolFactory, + ) +} + +func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(NewTProcessorFactory(processor), + serverTransport, + inputTransportFactory, + outputTransportFactory, + inputProtocolFactory, + outputProtocolFactory, + ) +} + +func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + NewTTransportFactory(), + NewTTransportFactory(), + NewTBinaryProtocolFactoryDefault(), + NewTBinaryProtocolFactoryDefault(), + ) +} + +func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { + return NewTSimpleServerFactory6(processorFactory, + serverTransport, + transportFactory, + transportFactory, + protocolFactory, + protocolFactory, + ) +} + +func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { + return &TSimpleServer{ + processorFactory: processorFactory, + serverTransport: serverTransport, + inputTransportFactory: inputTransportFactory, + outputTransportFactory: outputTransportFactory, + inputProtocolFactory: inputProtocolFactory, + outputProtocolFactory: outputProtocolFactory, + } +} + +func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { + return p.processorFactory +} + +func (p *TSimpleServer) ServerTransport() TServerTransport { + return p.serverTransport +} + +func (p *TSimpleServer) InputTransportFactory() TTransportFactory { + return p.inputTransportFactory +} + +func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { + return p.outputTransportFactory +} + +func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { + return p.inputProtocolFactory +} + +func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { + return p.outputProtocolFactory +} + +func (p *TSimpleServer) Listen() error { + return p.serverTransport.Listen() +} + +func (p *TSimpleServer) innerAccept() (int32, error) { + client, err := p.serverTransport.Accept() + p.mu.Lock() + defer p.mu.Unlock() + closed := atomic.LoadInt32(&p.closed) + if closed != 0 { + return closed, nil + } + if err != nil { + return 0, err + } + if client != nil { + p.wg.Add(1) + go func() { + defer p.wg.Done() + if err := p.processRequests(client); err != nil { + log.Println("error processing request:", err) + } + }() + } + return 0, nil +} + +func (p *TSimpleServer) AcceptLoop() error { + for { + closed, err := p.innerAccept() + if err != nil { + return err + } + if closed != 0 { + return nil + } + } +} + +func (p *TSimpleServer) Serve() error { + err := p.Listen() + if err != nil { + return err + } + p.AcceptLoop() + return nil +} + +func (p *TSimpleServer) Stop() error { + p.mu.Lock() + defer p.mu.Unlock() + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + atomic.StoreInt32(&p.closed, 1) + p.serverTransport.Interrupt() + p.wg.Wait() + return nil +} + +func (p *TSimpleServer) processRequests(client TTransport) error { + processor := p.processorFactory.GetProcessor(client) + inputTransport, err := p.inputTransportFactory.GetTransport(client) + if err != nil { + return err + } + outputTransport, err := p.outputTransportFactory.GetTransport(client) + if err != nil { + return err + } + inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) + outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport) + defer func() { + if e := recover(); e != nil { + log.Printf("panic in processor: %s: %s", e, debug.Stack()) + } + }() + + if inputTransport != nil { + defer inputTransport.Close() + } + if outputTransport != nil { + defer outputTransport.Close() + } + for { + if atomic.LoadInt32(&p.closed) != 0 { + return nil + } + + ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol) + if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { + return nil + } else if err != nil { + return err + } + if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { + continue + } + if !ok { + break + } + } + return nil +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go new file mode 100644 index 000000000..885427965 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go @@ -0,0 +1,166 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "net" + "time" +) + +type TSocket struct { + conn net.Conn + addr net.Addr + timeout time.Duration +} + +// NewTSocket creates a net.Conn-backed TTransport, given a host and port +// +// Example: +// trans, err := thrift.NewTSocket("localhost:9090") +func NewTSocket(hostPort string) (*TSocket, error) { + return NewTSocketTimeout(hostPort, 0) +} + +// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port +// it also accepts a timeout as a time.Duration +func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { + //conn, err := net.DialTimeout(network, address, timeout) + addr, err := net.ResolveTCPAddr("tcp", hostPort) + if err != nil { + return nil, err + } + return NewTSocketFromAddrTimeout(addr, timeout), nil +} + +// Creates a TSocket from a net.Addr +func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { + return &TSocket{addr: addr, timeout: timeout} +} + +// Creates a TSocket from an existing net.Conn +func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { + return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} +} + +// Sets the socket timeout +func (p *TSocket) SetTimeout(timeout time.Duration) error { + p.timeout = timeout + return nil +} + +func (p *TSocket) pushDeadline(read, write bool) { + var t time.Time + if p.timeout > 0 { + t = time.Now().Add(time.Duration(p.timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSocket) Open() error { + if p.IsOpen() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + var err error + if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSocket) IsOpen() bool { + if p.conn == nil { + return false + } + return true +} + +// Closes the socket. +func (p *TSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +//Returns the remote address of the socket. +func (p *TSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSocket) Read(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSocket) Write(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSocket) Interrupt() error { + if !p.IsOpen() { + return nil + } + return p.conn.Close() +} + +func (p *TSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go new file mode 100644 index 000000000..907afca32 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go @@ -0,0 +1,112 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "crypto/tls" + "net" + "time" +) + +type TSSLServerSocket struct { + listener net.Listener + addr net.Addr + clientTimeout time.Duration + interrupted bool + cfg *tls.Config +} + +func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { + return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) +} + +func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + addr, err := net.ResolveTCPAddr("tcp", listenAddr) + if err != nil { + return nil, err + } + return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil +} + +func (p *TSSLServerSocket) Listen() error { + if p.IsListening() { + return nil + } + l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) + if err != nil { + return err + } + p.listener = l + return nil +} + +func (p *TSSLServerSocket) Accept() (TTransport, error) { + if p.interrupted { + return nil, errTransportInterrupted + } + if p.listener == nil { + return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") + } + conn, err := p.listener.Accept() + if err != nil { + return nil, NewTTransportExceptionFromError(err) + } + return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil +} + +// Checks whether the socket is listening. +func (p *TSSLServerSocket) IsListening() bool { + return p.listener != nil +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLServerSocket) Open() error { + if p.IsListening() { + return NewTTransportException(ALREADY_OPEN, "Server socket already open") + } + if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return err + } else { + p.listener = l + } + return nil +} + +func (p *TSSLServerSocket) Addr() net.Addr { + return p.addr +} + +func (p *TSSLServerSocket) Close() error { + defer func() { + p.listener = nil + }() + if p.IsListening() { + return p.listener.Close() + } + return nil +} + +func (p *TSSLServerSocket) Interrupt() error { + p.interrupted = true + return nil +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go new file mode 100644 index 000000000..ba6337726 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go @@ -0,0 +1,176 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "crypto/tls" + "net" + "time" +) + +type TSSLSocket struct { + conn net.Conn + // hostPort contains host:port (e.g. "asdf.com:12345"). The field is + // only valid if addr is nil. + hostPort string + // addr is nil when hostPort is not "", and is only used when the + // TSSLSocket is constructed from a net.Addr. + addr net.Addr + timeout time.Duration + cfg *tls.Config +} + +// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration +// +// Example: +// trans, err := thrift.NewTSSLSocket("localhost:9090", nil) +func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { + return NewTSSLSocketTimeout(hostPort, cfg, 0) +} + +// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port +// it also accepts a tls Configuration and a timeout as a time.Duration +func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { + if cfg.MinVersion == 0 { + cfg.MinVersion = tls.VersionTLS10 + } + return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil +} + +// Creates a TSSLSocket from a net.Addr +func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { + return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} +} + +// Creates a TSSLSocket from an existing net.Conn +func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { + return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} +} + +// Sets the socket timeout +func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { + p.timeout = timeout + return nil +} + +func (p *TSSLSocket) pushDeadline(read, write bool) { + var t time.Time + if p.timeout > 0 { + t = time.Now().Add(time.Duration(p.timeout)) + } + if read && write { + p.conn.SetDeadline(t) + } else if read { + p.conn.SetReadDeadline(t) + } else if write { + p.conn.SetWriteDeadline(t) + } +} + +// Connects the socket, creating a new socket object if necessary. +func (p *TSSLSocket) Open() error { + var err error + // If we have a hostname, we need to pass the hostname to tls.Dial for + // certificate hostname checks. + if p.hostPort != "" { + if p.conn, err = tls.DialWithDialer(&net.Dialer{ + Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } else { + if p.IsOpen() { + return NewTTransportException(ALREADY_OPEN, "Socket already connected.") + } + if p.addr == nil { + return NewTTransportException(NOT_OPEN, "Cannot open nil address.") + } + if len(p.addr.Network()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") + } + if len(p.addr.String()) == 0 { + return NewTTransportException(NOT_OPEN, "Cannot open bad address.") + } + if p.conn, err = tls.DialWithDialer(&net.Dialer{ + Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil { + return NewTTransportException(NOT_OPEN, err.Error()) + } + } + return nil +} + +// Retrieve the underlying net.Conn +func (p *TSSLSocket) Conn() net.Conn { + return p.conn +} + +// Returns true if the connection is open +func (p *TSSLSocket) IsOpen() bool { + if p.conn == nil { + return false + } + return true +} + +// Closes the socket. +func (p *TSSLSocket) Close() error { + // Close the socket + if p.conn != nil { + err := p.conn.Close() + if err != nil { + return err + } + p.conn = nil + } + return nil +} + +func (p *TSSLSocket) Read(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(true, false) + n, err := p.conn.Read(buf) + return n, NewTTransportExceptionFromError(err) +} + +func (p *TSSLSocket) Write(buf []byte) (int, error) { + if !p.IsOpen() { + return 0, NewTTransportException(NOT_OPEN, "Connection not open") + } + p.pushDeadline(false, true) + return p.conn.Write(buf) +} + +func (p *TSSLSocket) Flush(ctx context.Context) error { + return nil +} + +func (p *TSSLSocket) Interrupt() error { + if !p.IsOpen() { + return nil + } + return p.conn.Close() +} + +func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { + const maxSize = ^uint64(0) + return maxSize // the thruth is, we just don't know unless framed is used +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go new file mode 100644 index 000000000..ba2738a8d --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go @@ -0,0 +1,70 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "context" + "errors" + "io" +) + +var errTransportInterrupted = errors.New("Transport Interrupted") + +type Flusher interface { + Flush() (err error) +} + +type ContextFlusher interface { + Flush(ctx context.Context) (err error) +} + +type ReadSizeProvider interface { + RemainingBytes() (num_bytes uint64) +} + +// Encapsulates the I/O layer +type TTransport interface { + io.ReadWriteCloser + ContextFlusher + ReadSizeProvider + + // Opens the transport for communication + Open() error + + // Returns true if the transport is open + IsOpen() bool +} + +type stringWriter interface { + WriteString(s string) (n int, err error) +} + +// This is "enchanced" transport with extra capabilities. You need to use one of these +// to construct protocol. +// Notably, TSocket does not implement this interface, and it is always a mistake to use +// TSocket directly in protocol. +type TRichTransport interface { + io.ReadWriter + io.ByteReader + io.ByteWriter + stringWriter + ContextFlusher + ReadSizeProvider +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go new file mode 100644 index 000000000..9505b4461 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go @@ -0,0 +1,90 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +import ( + "errors" + "io" +) + +type timeoutable interface { + Timeout() bool +} + +// Thrift Transport exception +type TTransportException interface { + TException + TypeId() int + Err() error +} + +const ( + UNKNOWN_TRANSPORT_EXCEPTION = 0 + NOT_OPEN = 1 + ALREADY_OPEN = 2 + TIMED_OUT = 3 + END_OF_FILE = 4 +) + +type tTransportException struct { + typeId int + err error +} + +func (p *tTransportException) TypeId() int { + return p.typeId +} + +func (p *tTransportException) Error() string { + return p.err.Error() +} + +func (p *tTransportException) Err() error { + return p.err +} + +func NewTTransportException(t int, e string) TTransportException { + return &tTransportException{typeId: t, err: errors.New(e)} +} + +func NewTTransportExceptionFromError(e error) TTransportException { + if e == nil { + return nil + } + + if t, ok := e.(TTransportException); ok { + return t + } + + switch v := e.(type) { + case TTransportException: + return v + case timeoutable: + if v.Timeout() { + return &tTransportException{typeId: TIMED_OUT, err: e} + } + } + + if e == io.EOF { + return &tTransportException{typeId: END_OF_FILE, err: e} + } + + return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go new file mode 100644 index 000000000..c80580794 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Factory class used to create wrapped instance of Transports. +// This is used primarily in servers, which get Transports from +// a ServerTransport and then may want to mutate them (i.e. create +// a BufferedTransport from the underlying base transport) +type TTransportFactory interface { + GetTransport(trans TTransport) (TTransport, error) +} + +type tTransportFactory struct{} + +// Return a wrapped instance of the base Transport. +func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + return trans, nil +} + +func NewTTransportFactory() TTransportFactory { + return &tTransportFactory{} +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go new file mode 100644 index 000000000..4292ffcad --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go @@ -0,0 +1,69 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package thrift + +// Type constants in the Thrift protocol +type TType byte + +const ( + STOP = 0 + VOID = 1 + BOOL = 2 + BYTE = 3 + I08 = 3 + DOUBLE = 4 + I16 = 6 + I32 = 8 + I64 = 10 + STRING = 11 + UTF7 = 11 + STRUCT = 12 + MAP = 13 + SET = 14 + LIST = 15 + UTF8 = 16 + UTF16 = 17 + //BINARY = 18 wrong and unusued +) + +var typeNames = map[int]string{ + STOP: "STOP", + VOID: "VOID", + BOOL: "BOOL", + BYTE: "BYTE", + DOUBLE: "DOUBLE", + I16: "I16", + I32: "I32", + I64: "I64", + STRING: "STRING", + STRUCT: "STRUCT", + MAP: "MAP", + SET: "SET", + LIST: "LIST", + UTF8: "UTF8", + UTF16: "UTF16", +} + +func (p TType) String() string { + if s, ok := typeNames[int(p)]; ok { + return s + } + return "Unknown" +} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go new file mode 100644 index 000000000..f3d42673a --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go @@ -0,0 +1,132 @@ +/* +* Licensed to the Apache Software Foundation (ASF) under one +* or more contributor license agreements. See the NOTICE file +* distributed with this work for additional information +* regarding copyright ownership. The ASF licenses this file +* to you under the Apache License, Version 2.0 (the +* "License"); you may not use this file except in compliance +* with the License. You may obtain a copy of the License at +* +* http://www.apache.org/licenses/LICENSE-2.0 +* +* Unless required by applicable law or agreed to in writing, +* software distributed under the License is distributed on an +* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +* KIND, either express or implied. See the License for the +* specific language governing permissions and limitations +* under the License. + */ + +package thrift + +import ( + "compress/zlib" + "context" + "io" + "log" +) + +// TZlibTransportFactory is a factory for TZlibTransport instances +type TZlibTransportFactory struct { + level int + factory TTransportFactory +} + +// TZlibTransport is a TTransport implementation that makes use of zlib compression. +type TZlibTransport struct { + reader io.ReadCloser + transport TTransport + writer *zlib.Writer +} + +// GetTransport constructs a new instance of NewTZlibTransport +func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { + if p.factory != nil { + // wrap other factory + var err error + trans, err = p.factory.GetTransport(trans) + if err != nil { + return nil, err + } + } + return NewTZlibTransport(trans, p.level) +} + +// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory +func NewTZlibTransportFactory(level int) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: nil} +} + +// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory +// as a wrapper over existing transport factory +func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { + return &TZlibTransportFactory{level: level, factory: factory} +} + +// NewTZlibTransport constructs a new instance of TZlibTransport +func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { + w, err := zlib.NewWriterLevel(trans, level) + if err != nil { + log.Println(err) + return nil, err + } + + return &TZlibTransport{ + writer: w, + transport: trans, + }, nil +} + +// Close closes the reader and writer (flushing any unwritten data) and closes +// the underlying transport. +func (z *TZlibTransport) Close() error { + if z.reader != nil { + if err := z.reader.Close(); err != nil { + return err + } + } + if err := z.writer.Close(); err != nil { + return err + } + return z.transport.Close() +} + +// Flush flushes the writer and its underlying transport. +func (z *TZlibTransport) Flush(ctx context.Context) error { + if err := z.writer.Flush(); err != nil { + return err + } + return z.transport.Flush(ctx) +} + +// IsOpen returns true if the transport is open +func (z *TZlibTransport) IsOpen() bool { + return z.transport.IsOpen() +} + +// Open opens the transport for communication +func (z *TZlibTransport) Open() error { + return z.transport.Open() +} + +func (z *TZlibTransport) Read(p []byte) (int, error) { + if z.reader == nil { + r, err := zlib.NewReader(z.transport) + if err != nil { + return 0, NewTTransportExceptionFromError(err) + } + z.reader = r + } + + return z.reader.Read(p) +} + +// RemainingBytes returns the size in bytes of the data that is still to be +// read. +func (z *TZlibTransport) RemainingBytes() uint64 { + return z.transport.RemainingBytes() +} + +func (z *TZlibTransport) Write(p []byte) (int, error) { + return z.writer.Write(p) +} diff --git a/vendor/git.apache.org/thrift.git/lib/hs/LICENSE b/vendor/git.apache.org/thrift.git/lib/hs/LICENSE new file mode 100644 index 000000000..d64569567 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/lib/hs/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh b/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh new file mode 120000 index 000000000..26b3c58e4 --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh @@ -0,0 +1 @@ +client.sh \ No newline at end of file diff --git a/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE b/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE new file mode 100644 index 000000000..3b6d7d74c --- /dev/null +++ b/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE @@ -0,0 +1,239 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + +-------------------------------------------------- +SOFTWARE DISTRIBUTED WITH THRIFT: + +The Apache Thrift software includes a number of subcomponents with +separate copyright notices and license terms. Your use of the source +code for the these subcomponents is subject to the terms and +conditions of the following licenses. + +-------------------------------------------------- +Portions of the following files are licensed under the MIT License: + + lib/erl/src/Makefile.am + +Please see doc/otp-base-license.txt for the full terms of this license. + +-------------------------------------------------- +For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: + +# Copyright (c) 2007 Thomas Porschberg +# +# Copying and distribution of this file, with or without +# modification, are permitted in any medium without royalty provided +# the copyright notice and this notice are preserved. + +-------------------------------------------------- +For the lib/nodejs/lib/thrift/json_parse.js: + +/* + json_parse.js + 2015-05-02 + Public Domain. + NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. + +*/ +(By Douglas Crockford ) +-------------------------------------------------- From 7e6c39beca2921a62fe5f9e53773d750822a6d5c Mon Sep 17 00:00:00 2001 From: JBD Date: Wed, 22 Aug 2018 16:44:29 -0700 Subject: [PATCH 050/212] Add go.mod file (#877) So, users can test if there are any problems during 1.11's experimental cycle. --- go.mod | 20 ++++++++++++++++++++ 1 file changed, 20 insertions(+) create mode 100644 go.mod diff --git a/go.mod b/go.mod new file mode 100644 index 000000000..7a1ccc661 --- /dev/null +++ b/go.mod @@ -0,0 +1,20 @@ +module go.opencensus.io + +require ( + git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827 + github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/golang/protobuf v1.2.0 + github.com/matttproud/golang_protobuf_extensions v1.0.1 + github.com/openzipkin/zipkin-go v0.1.1 + github.com/prometheus/client_golang v0.8.0 + github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 + github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e + github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 + golang.org/x/net v0.0.0-20180821023952-922f4815f713 + golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f + golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339 + golang.org/x/text v0.3.0 + google.golang.org/api v0.0.0-20180818000503-e21acd801f91 + google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 + google.golang.org/grpc v1.14.0 +) From 71e2e3e3082a525f158c187a70a4dbb59aa522b9 Mon Sep 17 00:00:00 2001 From: Bastian Ike Date: Thu, 23 Aug 2018 21:16:57 +0200 Subject: [PATCH 051/212] Add possibility to Jaeger to add process tags (#871) Jaeger allows to attach tags to it's process, which this patch allows to specify. Currently this is limited to bool, string, int64 and int32, as attributeToTag is used for converting it to *jaeger.Tag. For reference see the official Jaeger client go: https://github.com/jaegertracing/jaeger-client-go/blob/252d853b2a4f3cfc98bb89c3a8bb3d3aa50ed4d6/tracer.go#L64 This change helps to specify tags, such as 'ip', which has a special handling in Jaeger (for example jaeger-query skips clock skew adjustments for processes from the same IP). This change makes Process part of the Jaeger Options. The Options ServiceName is deprecated and used as a fallback. All examples are updated accordingly. Int32Tag is removed (as there is Int64Tag already). --- exporter/jaeger/example/main.go | 6 ++-- exporter/jaeger/example_test.go | 32 ++++++++++++++++-- exporter/jaeger/jaeger.go | 60 ++++++++++++++++++++++++++++----- 3 files changed, 85 insertions(+), 13 deletions(-) diff --git a/exporter/jaeger/example/main.go b/exporter/jaeger/example/main.go index 1caec456e..3cab8785d 100644 --- a/exporter/jaeger/example/main.go +++ b/exporter/jaeger/example/main.go @@ -30,8 +30,10 @@ func main() { // Register the Jaeger exporter to be able to retrieve // the collected spans. exporter, err := jaeger.NewExporter(jaeger.Options{ - Endpoint: "http://localhost:14268", - ServiceName: "trace-demo", + Endpoint: "http://localhost:14268", + Process: jaeger.Process{ + ServiceName: "trace-demo", + }, }) if err != nil { log.Fatal(err) diff --git a/exporter/jaeger/example_test.go b/exporter/jaeger/example_test.go index 7865d6ef5..bb21207e4 100644 --- a/exporter/jaeger/example_test.go +++ b/exporter/jaeger/example_test.go @@ -25,8 +25,10 @@ func ExampleNewExporter_collector() { // Register the Jaeger exporter to be able to retrieve // the collected spans. exporter, err := jaeger.NewExporter(jaeger.Options{ - Endpoint: "http://localhost:14268", - ServiceName: "trace-demo", + Endpoint: "http://localhost:14268", + Process: jaeger.Process{ + ServiceName: "trace-demo", + }, }) if err != nil { log.Fatal(err) @@ -39,7 +41,31 @@ func ExampleNewExporter_agent() { // the collected spans. exporter, err := jaeger.NewExporter(jaeger.Options{ AgentEndpoint: "localhost:6831", - ServiceName: "trace-demo", + Process: jaeger.Process{ + ServiceName: "trace-demo", + }, + }) + if err != nil { + log.Fatal(err) + } + trace.RegisterExporter(exporter) +} + +// ExampleNewExporter_processTags shows how to set ProcessTags +// on a Jaeger exporter. These tags will be added to the exported +// Jaeger process. +func ExampleNewExporter_processTags() { + // Register the Jaeger exporter to be able to retrieve + // the collected spans. + exporter, err := jaeger.NewExporter(jaeger.Options{ + AgentEndpoint: "localhost:6831", + Process: jaeger.Process{ + ServiceName: "trace-demo", + Tags: []jaeger.Tag{ + jaeger.StringTag("ip", "127.0.0.1"), + jaeger.BoolTag("demo", true), + }, + }, }) if err != nil { log.Fatal(err) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index 8adafbb24..34f69e455 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -58,7 +58,11 @@ type Options struct { Password string // ServiceName is the Jaeger service name. + // Deprecated: Specify Process instead. ServiceName string + + // Process contains the information about the exporting process. + Process Process } // NewExporter returns a trace.Exporter implementation that exports @@ -86,17 +90,27 @@ func NewExporter(o Options) (*Exporter, error) { } log.Printf("Error when uploading spans to Jaeger: %v", err) } - service := o.ServiceName - if service == "" { + service := o.Process.ServiceName + if service == "" && o.ServiceName != "" { + // fallback to old service name if specified + service = o.ServiceName + } else if service == "" { service = defaultServiceName } + tags := make([]*gen.Tag, len(o.Process.Tags)) + for i, tag := range o.Process.Tags { + tags[i] = attributeToTag(tag.key, tag.value) + } e := &Exporter{ endpoint: endpoint, agentEndpoint: o.AgentEndpoint, client: client, username: o.Username, password: o.Password, - service: service, + process: &gen.Process{ + ServiceName: service, + Tags: tags, + }, } bundler := bundler.NewBundler((*gen.Span)(nil), func(bundle interface{}) { if err := e.upload(bundle.([]*gen.Span)); err != nil { @@ -107,11 +121,43 @@ func NewExporter(o Options) (*Exporter, error) { return e, nil } +// Process contains the information exported to jaeger about the source +// of the trace data. +type Process struct { + // ServiceName is the Jaeger service name. + ServiceName string + + // Tags are added to Jaeger Process exports + Tags []Tag +} + +// Tag defines a key-value pair +// It is limited to the possible conversions to *jaeger.Tag by attributeToTag +type Tag struct { + key string + value interface{} +} + +// BoolTag creates a new tag of type bool, exported as jaeger.TagType_BOOL +func BoolTag(key string, value bool) Tag { + return Tag{key, value} +} + +// StringTag creates a new tag of type string, exported as jaeger.TagType_STRING +func StringTag(key string, value string) Tag { + return Tag{key, value} +} + +// Int64Tag creates a new tag of type int64, exported as jaeger.TagType_LONG +func Int64Tag(key string, value int64) Tag { + return Tag{key, value} +} + // Exporter is an implementation of trace.Exporter that uploads spans to Jaeger. type Exporter struct { endpoint string agentEndpoint string - service string + process *gen.Process bundler *bundler.Bundler client *agentClientUDP @@ -230,10 +276,8 @@ func (e *Exporter) Flush() { func (e *Exporter) upload(spans []*gen.Span) error { batch := &gen.Batch{ - Spans: spans, - Process: &gen.Process{ - ServiceName: e.service, - }, + Spans: spans, + Process: e.process, } if e.endpoint != "" { return e.uploadCollector(batch) From 91168ff30a428d6a67d5f5ad2526514ae93c1615 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Di=C3=B3genes=20Falc=C3=A3o?= Date: Fri, 24 Aug 2018 20:03:06 -0300 Subject: [PATCH 052/212] [refactor] Change `SpanAnnotator` to `SpanAnnotatingClientTrace` (#870) Currently the `SpanAnnotator` type is in fact a httptrace's `ClientTrace` (a collection of hooks called during the roundtrip flow). Trying to pass the idea that it's still a ClientTrace that annotates the given Span with information from http events, this change renames it to `SpanAnnotatingClientTrace`. Fixes: #860 --- ...annotator.go => span_annotating_client_trace.go} | 13 ++++++++++--- ...test.go => span_annotating_client_trace_test.go} | 4 ++-- 2 files changed, 12 insertions(+), 5 deletions(-) rename plugin/ochttp/{span_annotator.go => span_annotating_client_trace.go} (91%) rename plugin/ochttp/{span_annotator_test.go => span_annotating_client_trace_test.go} (95%) diff --git a/plugin/ochttp/span_annotator.go b/plugin/ochttp/span_annotating_client_trace.go similarity index 91% rename from plugin/ochttp/span_annotator.go rename to plugin/ochttp/span_annotating_client_trace.go index 128228729..7aa03cd5d 100644 --- a/plugin/ochttp/span_annotator.go +++ b/plugin/ochttp/span_annotating_client_trace.go @@ -27,9 +27,16 @@ type spanAnnotator struct { sp *trace.Span } -// NewSpanAnnotator returns a httptrace.ClientTrace which annotates all emitted -// httptrace events on the provided Span. -func NewSpanAnnotator(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { +// TODO: Remove NewSpanAnnotator at the next release. + +// Deprecated: Use NewSpanAnnotatingClientTrace instead +func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { + return NewSpanAnnotatingClientTrace(r, s) +} + +// NewSpanAnnotatingClientTrace returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. +func NewSpanAnnotatingClientTrace(_ *http.Request, s *trace.Span) *httptrace.ClientTrace { sa := spanAnnotator{sp: s} return &httptrace.ClientTrace{ diff --git a/plugin/ochttp/span_annotator_test.go b/plugin/ochttp/span_annotating_client_trace_test.go similarity index 95% rename from plugin/ochttp/span_annotator_test.go rename to plugin/ochttp/span_annotating_client_trace_test.go index 2642d9488..9f0da0c51 100644 --- a/plugin/ochttp/span_annotator_test.go +++ b/plugin/ochttp/span_annotating_client_trace_test.go @@ -26,7 +26,7 @@ import ( "go.opencensus.io/trace" ) -func TestSpanAnnotator(t *testing.T) { +func TestSpanAnnotatingClientTrace(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) })) @@ -36,7 +36,7 @@ func TestSpanAnnotator(t *testing.T) { trace.RegisterExporter(recorder) - tr := ochttp.Transport{NewClientTrace: ochttp.NewSpanAnnotator} + tr := ochttp.Transport{NewClientTrace: ochttp.NewSpanAnnotatingClientTrace} req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) if err != nil { From 194d97fd2f24cc82144b437839f5e010da2def7a Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 21 Aug 2018 15:05:07 -0700 Subject: [PATCH 053/212] Add stats quickstart from opencensus.io --- examples/quickstart/stats.go | 164 +++++++++++++++++++++++++++++++++++ 1 file changed, 164 insertions(+) create mode 100644 examples/quickstart/stats.go diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go new file mode 100644 index 000000000..9be111357 --- /dev/null +++ b/examples/quickstart/stats.go @@ -0,0 +1,164 @@ +// Copyright 2017, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Command stats implements the stats Quick Start example from: +// https://opencensus.io/quickstart/go/metrics/ +package main + +import ( + "bufio" + "bytes" + "context" + "fmt" + "io" + "log" + "os" + "time" + + "net/http" + + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/zpages" +) + +var ( + // The latency in milliseconds + MLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", "ms") + + // Counts the number of lines read in from standard input + MLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", "1") + + // Encounters the number of non EOF(end-of-file) errors. + MErrors = stats.Int64("repl/errors", "The number of errors encountered", "1") + + // Counts/groups the lengths of lines read in. + MLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", "By") +) + +var ( + KeyMethod, _ = tag.NewKey("method") +) + +var ( + LatencyView = &view.View{ + Name: "demo/latency", + Measure: MLatencyMs, + Description: "The distribution of the latencies", + + // Latency in buckets: + // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] + Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), + TagKeys: []tag.Key{KeyMethod}} + + LineCountView = &view.View{ + Name: "demo/lines_in", + Measure: MLinesIn, + Description: "The number of lines from standard input", + Aggregation: view.Count(), + } + + ErrorCountView = &view.View{ + Name: "demo/errors", + Measure: MErrors, + Description: "The number of errors encountered", + Aggregation: view.Count(), + } + + LineLengthView = &view.View{ + Name: "demo/line_lengths", + Description: "Groups the lengths of keys in buckets", + Measure: MLineLengths, + // Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] + Aggregation: view.Distribution(0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), + } +) + +func main() { + zpages.Handle(nil, "/debug") + go http.ListenAndServe("localhost:8080", nil) + + // Create that Stackdriver stats exporter + exporter, err := prometheus.NewExporter(prometheus.Options{}) + if err != nil { + log.Fatalf("Failed to create the Stackdriver stats exporter: %v", err) + } + http.Handle("/metrics", exporter) + + // Register the stats exporter + view.RegisterExporter(exporter) + + // Register the views + if err := view.Register(LatencyView, LineCountView, ErrorCountView, LineLengthView); err != nil { + log.Fatalf("Failed to register views: %v", err) + } + + // But also we can change the metrics reporting period to 2 seconds + //view.SetReportingPeriod(2 * time.Second) + + // In a REPL: + // 1. Read input + // 2. process input + br := bufio.NewReader(os.Stdin) + + // repl is the read, evaluate, print, loop + for { + if err := readEvaluateProcess(br); err != nil { + if err == io.EOF { + return + } + log.Fatal(err) + } + } +} + +// readEvaluateProcess reads a line from the input reader and +// then processes it. It returns an error if any was encountered. +func readEvaluateProcess(br *bufio.Reader) error { + ctx, err := tag.New(context.Background(), tag.Insert(KeyMethod, "repl")) + if err != nil { + return err + } + + fmt.Printf("> ") + line, _, err := br.ReadLine() + if err != nil { + if err != io.EOF { + stats.Record(ctx, MErrors.M(1)) + } + return err + } + + out, err := processLine(ctx, line) + if err != nil { + stats.Record(ctx, MErrors.M(1)) + return err + } + fmt.Printf("< %s\n\n", out) + return nil +} + +// processLine takes in a line of text and +// transforms it. Currently it just capitalizes it. +func processLine(ctx context.Context, in []byte) (out []byte, err error) { + startTime := time.Now() + defer func() { + ms := float64(time.Since(startTime).Nanoseconds()) / 1e6 + stats.Record(ctx, MLinesIn.M(1), MLatencyMs.M(ms), MLineLengths.M(int64(len(in)))) + }() + + return bytes.ToUpper(in), nil +} From c437049becfaaa55b8584ef3064db4cf4712b362 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 21 Aug 2018 15:06:40 -0700 Subject: [PATCH 054/212] Add Graphite exporter to README; fix AWS link --- README.md | 4 +++- examples/quickstart/stats.go | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index bc50ee44c..0cdfcea9e 100644 --- a/README.md +++ b/README.md @@ -55,6 +55,7 @@ can implement their own exporters by implementing the exporter interfaces * [Jaeger][exporter-jaeger] for traces * [AWS X-Ray][exporter-xray] for traces * [Datadog][exporter-datadog] for stats and traces +* [Graphite][exporter-graphite] for stats ## Overview @@ -255,5 +256,6 @@ release in which the functionality was marked *Deprecated*. [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver [exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin [exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger -[exporter-xray]: https://github.com/census-instrumentation/opencensus-go-exporter-aws +[exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog +[exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 9be111357..19ddd4869 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -1,4 +1,4 @@ -// Copyright 2017, OpenCensus Authors +// Copyright 2018, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. From 6ce7b575fc2d218f79e0eb8b8dd72441df5f3b5d Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 28 Aug 2018 15:24:41 -0700 Subject: [PATCH 055/212] Add warnings about using AlwaysSample in a production app (#882) --- examples/http/helloworld_client/main.go | 4 +++- examples/http/helloworld_server/main.go | 4 +++- exporter/jaeger/example/main.go | 4 +++- exporter/zipkin/example/main.go | 4 +++- trace/doc.go | 2 ++ trace/sampling.go | 7 +++---- 6 files changed, 17 insertions(+), 8 deletions(-) diff --git a/examples/http/helloworld_client/main.go b/examples/http/helloworld_client/main.go index 3b3186b52..0a0753659 100644 --- a/examples/http/helloworld_client/main.go +++ b/examples/http/helloworld_client/main.go @@ -34,7 +34,9 @@ func main() { view.RegisterExporter(exporter) trace.RegisterExporter(exporter) - // Always trace for this demo. + // Always trace for this demo. In a production application, you should + // configure this to a trace.ProbabilitySampler set at the desired + // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) // Report stats at every second. diff --git a/examples/http/helloworld_server/main.go b/examples/http/helloworld_server/main.go index 3f042c15a..1c99e428f 100644 --- a/examples/http/helloworld_server/main.go +++ b/examples/http/helloworld_server/main.go @@ -41,7 +41,9 @@ func main() { view.RegisterExporter(exporter) trace.RegisterExporter(exporter) - // Always trace for this demo. + // Always trace for this demo. In a production application, you should + // configure this to a trace.ProbabilitySampler set at the desired + // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) // Report stats at every second. diff --git a/exporter/jaeger/example/main.go b/exporter/jaeger/example/main.go index 3cab8785d..303bc6ea1 100644 --- a/exporter/jaeger/example/main.go +++ b/exporter/jaeger/example/main.go @@ -40,7 +40,9 @@ func main() { } trace.RegisterExporter(exporter) - // For demoing purposes, always sample. + // For demoing purposes, always sample. In a production application, you should + // configure this to a trace.ProbabilitySampler set at the desired + // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) ctx, span := trace.StartSpan(ctx, "/foo") diff --git a/exporter/zipkin/example/main.go b/exporter/zipkin/example/main.go index 24ec878f7..9466c9809 100644 --- a/exporter/zipkin/example/main.go +++ b/exporter/zipkin/example/main.go @@ -41,7 +41,9 @@ func main() { exporter := zipkin.NewExporter(reporter, localEndpoint) trace.RegisterExporter(exporter) - // For example purposes, sample every trace. + // For example purposes, sample every trace. In a production application, you should + // configure this to a trace.ProbabilitySampler set at the desired + // probability. trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) ctx := context.Background() diff --git a/trace/doc.go b/trace/doc.go index db00044b1..04b1ee4f3 100644 --- a/trace/doc.go +++ b/trace/doc.go @@ -32,6 +32,8 @@ to sample a subset of traces, or use AlwaysSample to collect a trace on every ru trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) +Be careful about using trace.AlwaysSample in a production application with +significant traffic: a new trace will be started and exported for every request. Adding Spans to a Trace diff --git a/trace/sampling.go b/trace/sampling.go index 313f8b68e..71c10f9e3 100644 --- a/trace/sampling.go +++ b/trace/sampling.go @@ -20,10 +20,6 @@ import ( const defaultSamplingProbability = 1e-4 -func newDefaultSampler() Sampler { - return ProbabilitySampler(defaultSamplingProbability) -} - // Sampler decides whether a trace should be sampled and exported. type Sampler func(SamplingParameters) SamplingDecision @@ -62,6 +58,9 @@ func ProbabilitySampler(fraction float64) Sampler { } // AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. func AlwaysSample() Sampler { return func(p SamplingParameters) SamplingDecision { return SamplingDecision{Sample: true} From 8fb3517747f9853edf7838e24139e84a8b60dbaa Mon Sep 17 00:00:00 2001 From: Harsh Agarwal Date: Wed, 5 Sep 2018 00:13:10 +0530 Subject: [PATCH 056/212] Typecast num into int64 (#894) * zpages: fix int comparison that overflows on 32bit architectures In the count formatting routines, a comparsion between an int and 1e12 overflowed, since the max range of an int on a 32bit architecture is 2147483647 which is less than even 1e10. Fix this by a cast to int64 and then a low-to-high range comparison. Fixes #893 --- zpages/templates.go | 22 +++++++++++++--------- 1 file changed, 13 insertions(+), 9 deletions(-) diff --git a/zpages/templates.go b/zpages/templates.go index d60a12d57..efc216d62 100644 --- a/zpages/templates.go +++ b/zpages/templates.go @@ -58,20 +58,24 @@ func parseTemplate(name string) *template.Template { } func countFormatter(num int) string { - if num == 0 { + if num <= 0 { return " " } var floatVal float64 var suffix string - if num >= 1e12 { - floatVal = float64(num) / 1e9 - suffix = " T " - } else if num >= 1e9 { - floatVal = float64(num) / 1e9 - suffix = " G " - } else if num >= 1e6 { - floatVal = float64(num) / 1e6 + + num64 := int64(num) + + switch { + case num64 <= 1e6: + floatVal = float64(num64) / 1e3 suffix = " M " + case num64 <= 1e9: + floatVal = float64(num64) / 1e6 + suffix = " G " + default: + floatVal = float64(num64) / 1e9 + suffix = " T " } if floatVal != 0 { From ae28ecc801ded796dc73a419587e8941d125867d Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Tue, 4 Sep 2018 15:56:09 -0700 Subject: [PATCH 057/212] zpages: fix countFormatter code and add tests (#896) Fix countFormatter code due to a bug that I advertently introduced with a suggestion in PR 894, due to change of signs: from >= to <= it should have just been the int64 cast. However, also fix a long standing bug in the original code for the case num >= 12, where the divisor was copy-pasted as 1e9 instead of 1e12 for Tera* values. This change also adds tests to ensure such problems could be caught in the first place but that we never regress. --- zpages/formatter_test.go | 40 ++++++++++++++++++++++++++++++++++++++++ zpages/templates.go | 17 ++++++++--------- 2 files changed, 48 insertions(+), 9 deletions(-) create mode 100644 zpages/formatter_test.go diff --git a/zpages/formatter_test.go b/zpages/formatter_test.go new file mode 100644 index 000000000..33e2b2ab9 --- /dev/null +++ b/zpages/formatter_test.go @@ -0,0 +1,40 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package zpages + +import "testing" + +func TestCountFormatter(t *testing.T) { + tests := []struct { + in int + want string + }{ + {-1, " "}, + {0, " "}, + {1, "1"}, + {1024, "1024"}, + {1e5, "100000"}, + {1e6, "1.000 M "}, + {1e9, "1.000 G "}, + {1e8 + 2e9, "2.100 G "}, + } + + for _, tt := range tests { + if g, w := countFormatter(tt.in), tt.want; g != w { + t.Errorf("%d got %q want %q", tt.in, g, w) + } + } +} diff --git a/zpages/templates.go b/zpages/templates.go index efc216d62..9c3b1fc0e 100644 --- a/zpages/templates.go +++ b/zpages/templates.go @@ -66,16 +66,15 @@ func countFormatter(num int) string { num64 := int64(num) - switch { - case num64 <= 1e6: - floatVal = float64(num64) / 1e3 - suffix = " M " - case num64 <= 1e9: - floatVal = float64(num64) / 1e6 - suffix = " G " - default: - floatVal = float64(num64) / 1e9 + if num64 >= 1e12 { + floatVal = float64(num64) / 1e12 suffix = " T " + } else if num64 >= 1e9 { + floatVal = float64(num64) / 1e9 + suffix = " G " + } else if num64 >= 1e6 { + floatVal = float64(num64) / 1e6 + suffix = " M " } if floatVal != 0 { From 126790a5e96eb1675db2621d7cd8a36677c7cce0 Mon Sep 17 00:00:00 2001 From: Andrei Date: Wed, 5 Sep 2018 05:17:52 +0100 Subject: [PATCH 058/212] vendor: remove invalid symbolic for Thrift that broke Glide imports The vendored Thrift dependency was improperly added leaving an invalid symbolic link that broke Glide imports. This change removes that symbolic link. Fixes #886 --- vendor/git.apache.org/thrift.git/tutorial/erl/server.sh | 1 - 1 file changed, 1 deletion(-) delete mode 120000 vendor/git.apache.org/thrift.git/tutorial/erl/server.sh diff --git a/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh b/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh deleted file mode 120000 index 26b3c58e4..000000000 --- a/vendor/git.apache.org/thrift.git/tutorial/erl/server.sh +++ /dev/null @@ -1 +0,0 @@ -client.sh \ No newline at end of file From 7c764632b5a454dfa7c14e9ad2bf05218fceeedf Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 5 Sep 2018 20:34:27 -0700 Subject: [PATCH 059/212] Support http_server_route tag (#879) --- plugin/ochttp/example_test.go | 26 ++++++++---- plugin/ochttp/route.go | 51 ++++++++++++++++++++++ plugin/ochttp/route_test.go | 80 +++++++++++++++++++++++++++++++++++ plugin/ochttp/server.go | 20 ++++++--- plugin/ochttp/server_test.go | 9 ++-- plugin/ochttp/stats.go | 5 +++ trace/config.go | 3 +- 7 files changed, 175 insertions(+), 19 deletions(-) create mode 100644 plugin/ochttp/route.go create mode 100644 plugin/ochttp/route_test.go diff --git a/plugin/ochttp/example_test.go b/plugin/ochttp/example_test.go index 7a0c452ce..bf060d6c2 100644 --- a/plugin/ochttp/example_test.go +++ b/plugin/ochttp/example_test.go @@ -25,13 +25,18 @@ import ( ) func ExampleTransport() { + // import ( + // "go.opencensus.io/plugin/ochttp" + // "go.opencensus.io/stats/view" + // ) + if err := view.Register( - // Register to a few default views. + // Register a few default views. ochttp.ClientRequestCountByMethod, ochttp.ClientResponseCountByStatusCode, ochttp.ClientLatencyView, - // Register to a custom view. + // Register a custom view. &view.View{ Name: "httpclient_latency_by_hostpath", TagKeys: []tag.Key{ochttp.Host, ochttp.Path}, @@ -45,22 +50,27 @@ func ExampleTransport() { client := &http.Client{ Transport: &ochttp.Transport{}, } - _ = client // use client to perform requests + + // Use client to perform requests. + _ = client } var usersHandler http.Handler func ExampleHandler() { - // Enables OpenCensus for the default serve mux. - // By default, B3 propagation is used. - http.Handle("/users", usersHandler) + // import "go.opencensus.io/plugin/ochttp" + + http.Handle("/users", ochttp.WithRouteTag(usersHandler, "/users")) + + // If no handler is specified, the default mux is used. log.Fatal(http.ListenAndServe("localhost:8080", &ochttp.Handler{})) } func ExampleHandler_mux() { - mux := http.NewServeMux() - mux.Handle("/users", usersHandler) + // import "go.opencensus.io/plugin/ochttp" + mux := http.NewServeMux() + mux.Handle("/users", ochttp.WithRouteTag(usersHandler, "/users")) log.Fatal(http.ListenAndServe("localhost:8080", &ochttp.Handler{ Handler: mux, Propagation: &b3.HTTPFormat{}, diff --git a/plugin/ochttp/route.go b/plugin/ochttp/route.go new file mode 100644 index 000000000..dbe22d586 --- /dev/null +++ b/plugin/ochttp/route.go @@ -0,0 +1,51 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "net/http" + + "go.opencensus.io/tag" +) + +// WithRouteTag returns an http.Handler that records stats with the +// http_server_route tag set to the given value. +func WithRouteTag(handler http.Handler, route string) http.Handler { + return taggedHandlerFunc(func(w http.ResponseWriter, r *http.Request) []tag.Mutator { + addRoute := []tag.Mutator{tag.Upsert(KeyServerRoute, route)} + ctx, _ := tag.New(r.Context(), addRoute...) + r = r.WithContext(ctx) + handler.ServeHTTP(w, r) + return addRoute + }) +} + +// taggedHandlerFunc is a http.Handler that returns tags describing the +// processing of the request. These tags will be recorded along with the +// measures in this package at the end of the request. +type taggedHandlerFunc func(w http.ResponseWriter, r *http.Request) []tag.Mutator + +func (h taggedHandlerFunc) ServeHTTP(w http.ResponseWriter, r *http.Request) { + tags := h(w, r) + if a, ok := r.Context().Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tags...) + } +} + +type addedTagsKey struct{} + +type addedTags struct { + t []tag.Mutator +} diff --git a/plugin/ochttp/route_test.go b/plugin/ochttp/route_test.go new file mode 100644 index 000000000..a9793eb0b --- /dev/null +++ b/plugin/ochttp/route_test.go @@ -0,0 +1,80 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/plugin/ochttp" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +func TestWithRouteTag(t *testing.T) { + v := &view.View{ + Name: "request_total", + Measure: ochttp.ServerLatency, + Aggregation: view.Count(), + TagKeys: []tag.Key{ochttp.KeyServerRoute}, + } + view.Register(v) + var e testStatsExporter + view.RegisterExporter(&e) + defer view.UnregisterExporter(&e) + + mux := http.NewServeMux() + handler := ochttp.WithRouteTag(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(204) + }), "/a/") + mux.Handle("/a/", handler) + plugin := ochttp.Handler{Handler: mux} + req, _ := http.NewRequest("GET", "/a/b/c", nil) + rr := httptest.NewRecorder() + plugin.ServeHTTP(rr, req) + if got, want := rr.Code, 204; got != want { + t.Fatalf("Unexpected response, got %d; want %d", got, want) + } + + view.Unregister(v) // trigger exporting + + got := e.rowsForView("request_total") + want := []*view.Row{ + {Data: &view.CountData{Value: 1}, Tags: []tag.Tag{{Key: ochttp.KeyServerRoute, Value: "/a/"}}}, + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("Unexpected view data exported, -got, +want: %s", diff) + } +} + +type testStatsExporter struct { + vd []*view.Data +} + +func (t *testStatsExporter) ExportView(d *view.Data) { + t.vd = append(t.vd, d) +} + +func (t *testStatsExporter) rowsForView(name string) []*view.Row { + var rows []*view.Row + for _, d := range t.vd { + if d.View.Name == name { + rows = append(rows, d.Rows...) + } + } + return rows +} diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 72aa8c2d7..ea2e3e288 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -69,15 +69,16 @@ type Handler struct { } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { - var traceEnd, statsEnd func() - r, traceEnd = h.startTrace(w, r) + var tags addedTags + r, traceEnd := h.startTrace(w, r) defer traceEnd() - w, statsEnd = h.startStats(w, r) - defer statsEnd() + w, statsEnd := h.startStats(w, r) + defer statsEnd(&tags) handler := h.Handler if handler == nil { handler = http.DefaultServeMux } + r = r.WithContext(context.WithValue(r.Context(), addedTagsKey{}, &tags)) handler.ServeHTTP(w, r) } @@ -123,7 +124,7 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) return h.Propagation.SpanContextFromRequest(r) } -func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func()) { +func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), tag.Upsert(Host, r.URL.Host), tag.Upsert(Path, r.URL.Path), @@ -157,7 +158,9 @@ type trackingResponseWriter struct { // Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -func (t *trackingResponseWriter) end() { +var logTagsErrorOnce sync.Once + +func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { t.statusCode = 200 @@ -173,7 +176,10 @@ func (t *trackingResponseWriter) end() { if t.reqSize >= 0 { m = append(m, ServerRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) + allTags := make([]tag.Mutator, len(tags.t)+1) + allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) + copy(allTags[1:], tags.t) + ctx, _ := tag.New(t.ctx, allTags...) stats.Record(ctx, m...) }) } diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 29ba795f4..3b0d50479 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -65,11 +65,14 @@ func TestHandlerStatsCollection(t *testing.T) { body := bytes.NewBuffer(make([]byte, test.reqSize)) r := httptest.NewRequest(test.method, test.target, body) w := httptest.NewRecorder() + mux := http.NewServeMux() + mux.Handle("/request/", httpHandler(test.statusCode, test.respSize)) h := &Handler{ - Handler: httpHandler(test.statusCode, test.respSize), + Handler: mux, + StartOptions: trace.StartOptions{ + Sampler: trace.NeverSample(), + }, } - h.StartOptions.Sampler = trace.NeverSample() - for i := 0; i < test.count; i++ { h.ServeHTTP(w, r) totalCount++ diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index 19a882500..21d651230 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -60,6 +60,11 @@ var ( // Method is the HTTP method of the request, capitalized (GET, POST, etc.). Method, _ = tag.NewKey("http.method") + + // KeyServerRoute is a low cardinality string representing the logical + // handler of the request. This is usually the pattern registered on the a + // ServeMux (or similar string). + KeyServerRoute, _ = tag.NewKey("http_server_route") ) // Default distributions used by views in this package. diff --git a/trace/config.go b/trace/config.go index e464671c7..0816892ea 100644 --- a/trace/config.go +++ b/trace/config.go @@ -15,8 +15,9 @@ package trace import ( - "go.opencensus.io/trace/internal" "sync" + + "go.opencensus.io/trace/internal" ) // Config represents the global tracing configuration. From 00883d3a6928aa4879511c061b136e1d77a8c0aa Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 6 Sep 2018 06:38:23 +0200 Subject: [PATCH 060/212] ochttp.Transport should not modify request (#898) Fixes: #892 --- plugin/ochttp/trace.go | 10 ++++++++++ plugin/ochttp/trace_test.go | 20 ++++++++++++++++++++ 2 files changed, 30 insertions(+) diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 980b6390f..18b828577 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -66,6 +66,16 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { } if t.format != nil { + // SpanContextToRequest will modify its Request argument, which is + // contrary to the contract for http.RoundTripper, so we need to + // pass it a copy of the Request. + // However, the Request struct itself was already copied by + // the WithContext calls above and so we just need to copy the header. + header := make(http.Header) + for k, v := range req.Header { + header[k] = v + } + req.Header = header t.format.SpanContextToRequest(span.SpanContext(), req) } diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index eb1728f5c..b31fa7a1b 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -79,6 +79,26 @@ func (t testPropagator) SpanContextToRequest(sc trace.SpanContext, req *http.Req req.Header.Set("trace", hex.EncodeToString(buf.Bytes())) } +func TestTransport_RoundTrip_Race(t *testing.T) { + // This tests that we don't modify the request in accordance with the + // specification for http.RoundTripper. + // We attempt to trigger a race by reading the request from a separate + // goroutine. If the request is modified by Transport, this should trigger + // the race detector. + + transport := &testTransport{ch: make(chan *http.Request, 1)} + rt := &Transport{ + Propagation: &testPropagator{}, + Base: transport, + } + req, _ := http.NewRequest("GET", "http://foo.com", nil) + go func() { + fmt.Println(*req) + }() + rt.RoundTrip(req) + _ = <-transport.ch +} + func TestTransport_RoundTrip(t *testing.T) { ctx := context.Background() ctx, parent := trace.StartSpan(ctx, "parent") From d3ed4d20652bc008af31608017c90aa135d7cdea Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 7 Sep 2018 01:00:06 -0400 Subject: [PATCH 061/212] Add Tracestate to SpanContext (#890) * Add Tracestate to SpanContext * Moved to new package trace/tracestate. Fixed syntax and code-style issues. Fixed review comments. Made Tracestate immutable added regex for key/value validation (thx @reyang) * Rename constructors. * Minor review comment. * Few fixes. - renamed methods and fields according to go style - Removed capitalized letters from error strings. - simplified Entry type. * Merged two constructors into one. Also allowed to pass an empty entries to create nil tracestate. * Changed tracestate constructor. * Fixed error reported by go vet ./... --- trace/trace.go | 2 + trace/trace_test.go | 11 +- trace/tracestate/tracestate.go | 142 +++++++++++++ trace/tracestate/tracestate_test.go | 303 ++++++++++++++++++++++++++++ 4 files changed, 456 insertions(+), 2 deletions(-) create mode 100644 trace/tracestate/tracestate.go create mode 100644 trace/tracestate/tracestate_test.go diff --git a/trace/trace.go b/trace/trace.go index 887e90be1..77578a3c5 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -25,6 +25,7 @@ import ( "time" "go.opencensus.io/internal" + "go.opencensus.io/trace/tracestate" ) // Span represents a span of a trace. It has an associated SpanContext, and @@ -88,6 +89,7 @@ type SpanContext struct { TraceID TraceID SpanID SpanID TraceOptions TraceOptions + Tracestate *tracestate.Tracestate } type contextKey struct{} diff --git a/trace/trace_test.go b/trace/trace_test.go index 234531b97..852b43301 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -20,11 +20,14 @@ import ( "reflect" "testing" "time" + + "go.opencensus.io/trace/tracestate" ) var ( - tid = TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} - sid = SpanID{1, 2, 4, 8, 16, 32, 64, 128} + tid = TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} + sid = SpanID{1, 2, 4, 8, 16, 32, 64, 128} + testTracestate, _ = tracestate.New(nil, tracestate.Entry{Key: "foo", Value: "bar"}) ) func init() { @@ -70,6 +73,9 @@ func checkChild(p SpanContext, c *Span) error { if got, want := c.spanContext.TraceOptions, p.TraceOptions; got != want { return fmt.Errorf("got child trace options %d, want %d", got, want) } + if got, want := c.spanContext.Tracestate, p.Tracestate; got != want { + return fmt.Errorf("got child tracestate %v, want %v", got, want) + } return nil } @@ -204,6 +210,7 @@ func TestStartSpanWithRemoteParent(t *testing.T) { TraceID: tid, SpanID: sid, TraceOptions: 0x1, + Tracestate: testTracestate, } ctx, _ = StartSpanWithRemoteParent(context.Background(), "startSpanWithRemoteParent", sc) if err := checkChild(sc, FromContext(ctx)); err != nil { diff --git a/trace/tracestate/tracestate.go b/trace/tracestate/tracestate.go new file mode 100644 index 000000000..a3232e2d4 --- /dev/null +++ b/trace/tracestate/tracestate.go @@ -0,0 +1,142 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracestate + +import ( + "fmt" + "regexp" +) + +const ( + keyMaxSize = 256 + valueMaxSize = 256 + maxKeyValuePairs = 32 +) + +const ( + keyWithoutVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + keyWithVendorFormat = `[a-z][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + keyFormat = `(` + keyWithoutVendorFormat + `)|(` + keyWithVendorFormat + `)` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` +) + +var keyValidationRegExp = regexp.MustCompile(`^(` + keyFormat + `)$`) +var valueValidationRegExp = regexp.MustCompile(`^(` + valueFormat + `)$`) + +// Tracestate represents tracing-system specific context in a list of key-value pairs. Tracestate allows different +// vendors propagate additional information and inter-operate with their legacy Id formats. +type Tracestate struct { + entries []Entry +} + +// Entry represents one key-value pair in a list of key-value pair of Tracestate. +type Entry struct { + // Key is an opaque string up to 256 characters printable. It MUST begin with a lowercase letter, + // and can only contain lowercase letters a-z, digits 0-9, underscores _, dashes -, asterisks *, and + // forward slashes /. + Key string + + // Value is an opaque string up to 256 characters printable ASCII RFC0020 characters (i.e., the + // range 0x20 to 0x7E) except comma , and =. + Value string +} + +// Entries returns a slice of Entry. +func (ts *Tracestate) Entries() []Entry { + return ts.entries +} + +func (ts *Tracestate) remove(key string) *Entry { + for index, entry := range ts.entries { + if entry.Key == key { + ts.entries = append(ts.entries[:index], ts.entries[index+1:]...) + return &entry + } + } + return nil +} + +func (ts *Tracestate) add(entries []Entry) error { + for _, entry := range entries { + ts.remove(entry.Key) + } + if len(ts.entries)+len(entries) > maxKeyValuePairs { + return fmt.Errorf("adding %d key-value pairs to current %d pairs exceeds the limit of %d", + len(entries), len(ts.entries), maxKeyValuePairs) + } + ts.entries = append(entries, ts.entries...) + return nil +} + +func isValid(entry Entry) bool { + return keyValidationRegExp.MatchString(entry.Key) && + valueValidationRegExp.MatchString(entry.Value) +} + +func containsDuplicateKey(entries ...Entry) (string, bool) { + keyMap := make(map[string]int) + for _, entry := range entries { + if _, ok := keyMap[entry.Key]; ok { + return entry.Key, true + } + keyMap[entry.Key] = 1 + } + return "", false +} + +func areEntriesValid(entries ...Entry) (*Entry, bool) { + for _, entry := range entries { + if !isValid(entry) { + return &entry, false + } + } + return nil, true +} + +// New creates a Tracestate object from a parent and/or entries (key-value pair). +// Entries from the parent are copied if present. The entries passed to this function +// are inserted in front of those copied from the parent. If an entry copied from the +// parent contains the same key as one of the entry in entries then the entry copied +// from the parent is removed. See add func. +// +// An error is returned with nil Tracestate if +// 1. one or more entry in entries is invalid. +// 2. two or more entries in the input entries have the same key. +// 3. the number of entries combined from the parent and the input entries exceeds maxKeyValuePairs. +// (duplicate entry is counted only once). +func New(parent *Tracestate, entries ...Entry) (*Tracestate, error) { + if parent == nil && len(entries) == 0 { + return nil, nil + } + if entry, ok := areEntriesValid(entries...); !ok { + return nil, fmt.Errorf("key-value pair {%s, %s} is invalid", entry.Key, entry.Value) + } + + if key, duplicate := containsDuplicateKey(entries...); duplicate { + return nil, fmt.Errorf("contains duplicate keys (%s)", key) + } + + tracestate := Tracestate{} + + if parent != nil && len(parent.entries) > 0 { + tracestate.entries = append([]Entry{}, parent.entries...) + } + + err := tracestate.add(entries) + if err != nil { + return nil, err + } + return &tracestate, nil +} diff --git a/trace/tracestate/tracestate_test.go b/trace/tracestate/tracestate_test.go new file mode 100644 index 000000000..f4b26357b --- /dev/null +++ b/trace/tracestate/tracestate_test.go @@ -0,0 +1,303 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracestate + +import ( + "fmt" + "testing" +) + +func checkFront(t *testing.T, tracestate *Tracestate, wantKey, testname string) { + gotKey := tracestate.entries[0].Key + if gotKey != wantKey { + t.Errorf("test:%s: first entry in the list: got %q want %q", testname, gotKey, wantKey) + } +} + +func checkBack(t *testing.T, tracestate *Tracestate, wantKey, testname string) { + gotKey := tracestate.entries[len(tracestate.entries)-1].Key + if gotKey != wantKey { + t.Errorf("test:%s: last entry in the list: got %q want %q", testname, gotKey, wantKey) + } +} + +func checkSize(t *testing.T, tracestate *Tracestate, wantSize int, testname string) { + if gotSize := len(tracestate.entries); gotSize != wantSize { + t.Errorf("test:%s: size of the list: got %q want %q", testname, gotSize, wantSize) + } +} + +func (ts *Tracestate) get(key string) (string, bool) { + if ts == nil { + return "", false + } + for _, entry := range ts.entries { + if entry.Key == key { + return entry.Value, true + } + } + return "", false +} + +func checkKeyValue(t *testing.T, tracestate *Tracestate, key, wantValue, testname string) { + wantOk := true + if wantValue == "" { + wantOk = false + } + gotValue, gotOk := tracestate.get(key) + if wantOk != gotOk || gotValue != wantValue { + t.Errorf("test:%s: get value for key=%s failed: got %q want %q", testname, key, gotValue, wantValue) + } +} + +func checkError(t *testing.T, tracestate *Tracestate, err error, testname, msg string) { + if err != nil { + t.Errorf("test:%s: %s: tracestate=%v, error= %v", testname, msg, tracestate, err) + } +} + +func wantError(t *testing.T, tracestate *Tracestate, err error, testname, msg string) { + if err == nil { + t.Errorf("test:%s: %s: tracestate=%v, error=%v", testname, msg, tracestate, err) + } +} + +func TestCreateWithNullParent(t *testing.T) { + key1, value1 := "hello", "world" + testname := "TestCreateWithNullParent" + + entry := Entry{key1, value1} + tracestate, err := New(nil, entry) + checkError(t, tracestate, err, testname, "create failed from null parent") + checkKeyValue(t, tracestate, key1, value1, testname) +} + +func TestCreateFromParentWithSingleKey(t *testing.T) { + key1, value1, key2, value2 := "hello", "world", "foo", "bar" + testname := "TestCreateFromParentWithSingleKey" + + entry1 := Entry{key1, value1} + entry2 := Entry{key2, value2} + parent, _ := New(nil, entry1) + tracestate, err := New(parent, entry2) + + checkError(t, tracestate, err, testname, "create failed from parent with single key") + checkKeyValue(t, tracestate, key2, value2, testname) + checkFront(t, tracestate, key2, testname) + checkBack(t, tracestate, key1, testname) +} + +func TestCreateFromParentWithDoubleKeys(t *testing.T) { + key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "bar", "baz" + testname := "TestCreateFromParentWithDoubleKeys" + + entry1 := Entry{key1, value1} + entry2 := Entry{key2, value2} + entry3 := Entry{key3, value3} + parent, _ := New(nil, entry2, entry1) + tracestate, err := New(parent, entry3) + + checkError(t, tracestate, err, testname, "create failed from parent with double keys") + checkKeyValue(t, tracestate, key3, value3, testname) + checkFront(t, tracestate, key3, testname) + checkBack(t, tracestate, key1, testname) +} + +func TestCreateFromParentWithExistingKey(t *testing.T) { + key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "hello", "baz" + testname := "TestCreateFromParentWithExistingKey" + + entry1 := Entry{key1, value1} + entry2 := Entry{key2, value2} + entry3 := Entry{key3, value3} + parent, _ := New(nil, entry2, entry1) + tracestate, err := New(parent, entry3) + + checkError(t, tracestate, err, testname, "create failed with an existing key") + checkKeyValue(t, tracestate, key3, value3, testname) + checkFront(t, tracestate, key3, testname) + checkBack(t, tracestate, key2, testname) + checkSize(t, tracestate, 2, testname) +} + +func TestImplicitImmutableTracestate(t *testing.T) { + key1, value1, key2, value2, key3, value3 := "hello", "world", "hello", "bar", "foo", "baz" + testname := "TestImplicitImmutableTracestate" + + entry1 := Entry{key1, value1} + entry2 := Entry{key2, value2} + parent, _ := New(nil, entry1) + tracestate, err := New(parent, entry2) + + checkError(t, tracestate, err, testname, "create failed") + checkKeyValue(t, tracestate, key2, value2, testname) + checkKeyValue(t, parent, key2, value1, testname) + + // Get and update entries. + entries := tracestate.Entries() + entry := Entry{key3, value3} + entries = append(entries, entry) + + // Check Tracestate does not have key3. + checkKeyValue(t, tracestate, key3, "", testname) +} + +func TestKeyWithValidChar(t *testing.T) { + testname := "TestKeyWithValidChar" + + arrayRune := []rune("") + for c := 'a'; c <= 'z'; c++ { + arrayRune = append(arrayRune, c) + } + for c := '0'; c <= '9'; c++ { + arrayRune = append(arrayRune, c) + } + arrayRune = append(arrayRune, '_') + arrayRune = append(arrayRune, '-') + arrayRune = append(arrayRune, '*') + arrayRune = append(arrayRune, '/') + key := string(arrayRune) + entry := Entry{key, "world"} + tracestate, err := New(nil, entry) + + checkError(t, tracestate, err, testname, "create failed when the key contains all valid characters") +} + +func TestKeyWithInvalidChar(t *testing.T) { + testname := "TestKeyWithInvalidChar" + + keys := []string{"1ab", "1ab2", "Abc", " abc", "a=b"} + + for _, key := range keys { + entry := Entry{key, "world"} + tracestate, err := New(nil, entry) + wantError(t, tracestate, err, testname, fmt.Sprintf( + "create did not err with invalid key=%q", key)) + } +} + +func TestNilKey(t *testing.T) { + testname := "TestNilKey" + + entry := Entry{"", "world"} + tracestate, err := New(nil, entry) + wantError(t, tracestate, err, testname, "create did not err when the key is nil (\"\")") +} + +func TestValueWithInvalidChar(t *testing.T) { + testname := "TestValueWithInvalidChar" + + keys := []string{"A=B", "A,B", "AB "} + + for _, value := range keys { + entry := Entry{"hello", value} + tracestate, err := New(nil, entry) + wantError(t, tracestate, err, testname, + fmt.Sprintf("create did not err when the value is invalid (%q)", value)) + } +} + +func TestNilValue(t *testing.T) { + testname := "TestNilValue" + + tracestate, err := New(nil, Entry{"hello", ""}) + wantError(t, tracestate, err, testname, "create did not err when the value is nil (\"\")") +} + +func TestInvalidKeyLen(t *testing.T) { + testname := "TestInvalidKeyLen" + + arrayRune := []rune("") + for i := 0; i <= keyMaxSize+1; i++ { + arrayRune = append(arrayRune, 'a') + } + key := string(arrayRune) + tracestate, err := New(nil, Entry{key, "world"}) + + wantError(t, tracestate, err, testname, + fmt.Sprintf("create did not err when the length (%d) of the key is larger than max (%d)", + len(key), keyMaxSize)) +} + +func TestInvalidValueLen(t *testing.T) { + testname := "TestInvalidValueLen" + + arrayRune := []rune("") + for i := 0; i <= valueMaxSize+1; i++ { + arrayRune = append(arrayRune, 'a') + } + value := string(arrayRune) + tracestate, err := New(nil, Entry{"hello", value}) + + wantError(t, tracestate, err, testname, + fmt.Sprintf("create did not err when the length (%d) of the value is larger than max (%d)", + len(value), valueMaxSize)) +} + +func TestCreateFromArrayWithOverLimitKVPairs(t *testing.T) { + testname := "TestCreateFromArrayWithOverLimitKVPairs" + + entries := []Entry{} + for i := 0; i <= maxKeyValuePairs; i++ { + key := fmt.Sprintf("a%db", i) + entry := Entry{key, "world"} + entries = append(entries, entry) + } + tracestate, err := New(nil, entries...) + wantError(t, tracestate, err, testname, + fmt.Sprintf("create did not err when the number (%d) of key-value pairs is larger than max (%d)", + len(entries), maxKeyValuePairs)) +} + +func TestCreateFromEmptyArray(t *testing.T) { + testname := "TestCreateFromEmptyArray" + + tracestate, err := New(nil, nil...) + checkError(t, tracestate, err, testname, + "failed to create nil tracestate") +} + +func TestCreateFromParentWithOverLimitKVPairs(t *testing.T) { + testname := "TestCreateFromParentWithOverLimitKVPairs" + + entries := []Entry{} + for i := 0; i < maxKeyValuePairs; i++ { + key := fmt.Sprintf("a%db", i) + entry := Entry{key, "world"} + entries = append(entries, entry) + } + parent, err := New(nil, entries...) + + checkError(t, parent, err, testname, fmt.Sprintf("create failed to add %d key-value pair", maxKeyValuePairs)) + + // Add one more to go over the limit + key := fmt.Sprintf("a%d", maxKeyValuePairs) + tracestate, err := New(parent, Entry{key, "world"}) + wantError(t, tracestate, err, testname, + fmt.Sprintf("create did not err when attempted to exceed the key-value pair limit of %d", maxKeyValuePairs)) +} + +func TestCreateFromArrayWithDuplicateKeys(t *testing.T) { + key1, value1, key2, value2, key3, value3 := "hello", "world", "foo", "bar", "hello", "baz" + testname := "TestCreateFromArrayWithDuplicateKeys" + + entry1 := Entry{key1, value1} + entry2 := Entry{key2, value2} + entry3 := Entry{key3, value3} + tracestate, err := New(nil, entry1, entry2, entry3) + + wantError(t, tracestate, err, testname, + "create did not err when entries contained duplicate keys") +} From b11f239c032624b045c4c2bfd3d1287b4012ce89 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Sat, 8 Sep 2018 08:43:59 +0200 Subject: [PATCH 062/212] Tracestate.Entries should work on nil *Tracestate (#902) --- trace/tracestate/tracestate.go | 3 +++ trace/tracestate/tracestate_test.go | 10 ++++++++++ 2 files changed, 13 insertions(+) diff --git a/trace/tracestate/tracestate.go b/trace/tracestate/tracestate.go index a3232e2d4..2345dd379 100644 --- a/trace/tracestate/tracestate.go +++ b/trace/tracestate/tracestate.go @@ -55,6 +55,9 @@ type Entry struct { // Entries returns a slice of Entry. func (ts *Tracestate) Entries() []Entry { + if ts == nil { + return nil + } return ts.entries } diff --git a/trace/tracestate/tracestate_test.go b/trace/tracestate/tracestate_test.go index f4b26357b..db76d9c3f 100644 --- a/trace/tracestate/tracestate_test.go +++ b/trace/tracestate/tracestate_test.go @@ -301,3 +301,13 @@ func TestCreateFromArrayWithDuplicateKeys(t *testing.T) { wantError(t, tracestate, err, testname, "create did not err when entries contained duplicate keys") } + +func TestEntriesWithNil(t *testing.T) { + ts, err := New(nil) + if err != nil { + t.Fatal(err) + } + if got, want := len(ts.Entries()), 0; got != want { + t.Errorf("zero value should have no entries, got %v; want %v", got, want) + } +} From 748e5b4b56f0d2b43a9c8c56ed5ec727b3b9b218 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 10 Sep 2018 22:57:17 +0200 Subject: [PATCH 063/212] Bumb version and dependency versions (#906) --- go.mod | 10 +++++----- go.sum | 21 +++++++++++++++++++++ opencensus.go | 2 +- 3 files changed, 27 insertions(+), 6 deletions(-) create mode 100644 go.sum diff --git a/go.mod b/go.mod index 7a1ccc661..ae7cbc15c 100644 --- a/go.mod +++ b/go.mod @@ -1,7 +1,7 @@ module go.opencensus.io require ( - git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827 + git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/golang/protobuf v1.2.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 @@ -10,11 +10,11 @@ require ( github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 - golang.org/x/net v0.0.0-20180821023952-922f4815f713 + golang.org/x/net v0.0.0-20180906233101-161cd47e91fd golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f - golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339 + golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e golang.org/x/text v0.3.0 - google.golang.org/api v0.0.0-20180818000503-e21acd801f91 - google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 + google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf + google.golang.org/genproto v0.0.0-20180831171423-11092d34479b google.golang.org/grpc v1.14.0 ) diff --git a/go.sum b/go.sum new file mode 100644 index 000000000..91d0af1e4 --- /dev/null +++ b/go.sum @@ -0,0 +1,21 @@ +git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= diff --git a/opencensus.go b/opencensus.go index 3b4e0c65a..985297ee1 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.16.0" + return "0.17.0" } From f21fe3feadc5461b952191052818685a410428d4 Mon Sep 17 00:00:00 2001 From: Rafael Jesus Date: Mon, 10 Sep 2018 22:59:32 +0200 Subject: [PATCH 064/212] Fix http status code string format (#905) --- plugin/ochttp/server_test.go | 10 +++++----- plugin/ochttp/trace.go | 34 +++++++++++++++++----------------- plugin/ochttp/trace_test.go | 20 ++++++++++---------- 3 files changed, 32 insertions(+), 32 deletions(-) diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 3b0d50479..29468cbe4 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -290,11 +290,11 @@ func TestEnsureTrackingResponseWriterSetsStatusCode(t *testing.T) { res *http.Response want trace.Status }{ - {res: &http.Response{StatusCode: 200}, want: trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}}, - {res: &http.Response{StatusCode: 500}, want: trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}}, - {res: &http.Response{StatusCode: 403}, want: trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}}, - {res: &http.Response{StatusCode: 401}, want: trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}}, - {res: &http.Response{StatusCode: 429}, want: trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `"RESOURCE_EXHAUSTED"`}}, + {res: &http.Response{StatusCode: 200}, want: trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, + {res: &http.Response{StatusCode: 500}, want: trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, + {res: &http.Response{StatusCode: 403}, want: trace.Status{Code: trace.StatusCodePermissionDenied, Message: `PERMISSION_DENIED`}}, + {res: &http.Response{StatusCode: 401}, want: trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `UNAUTHENTICATED`}}, + {res: &http.Response{StatusCode: 429}, want: trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `RESOURCE_EXHAUSTED`}}, } for _, tt := range tests { diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 18b828577..819a2d5ff 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -196,23 +196,23 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { } var codeToStr = map[int32]string{ - trace.StatusCodeOK: `"OK"`, - trace.StatusCodeCancelled: `"CANCELLED"`, - trace.StatusCodeUnknown: `"UNKNOWN"`, - trace.StatusCodeInvalidArgument: `"INVALID_ARGUMENT"`, - trace.StatusCodeDeadlineExceeded: `"DEADLINE_EXCEEDED"`, - trace.StatusCodeNotFound: `"NOT_FOUND"`, - trace.StatusCodeAlreadyExists: `"ALREADY_EXISTS"`, - trace.StatusCodePermissionDenied: `"PERMISSION_DENIED"`, - trace.StatusCodeResourceExhausted: `"RESOURCE_EXHAUSTED"`, - trace.StatusCodeFailedPrecondition: `"FAILED_PRECONDITION"`, - trace.StatusCodeAborted: `"ABORTED"`, - trace.StatusCodeOutOfRange: `"OUT_OF_RANGE"`, - trace.StatusCodeUnimplemented: `"UNIMPLEMENTED"`, - trace.StatusCodeInternal: `"INTERNAL"`, - trace.StatusCodeUnavailable: `"UNAVAILABLE"`, - trace.StatusCodeDataLoss: `"DATA_LOSS"`, - trace.StatusCodeUnauthenticated: `"UNAUTHENTICATED"`, + trace.StatusCodeOK: `OK`, + trace.StatusCodeCancelled: `CANCELLED`, + trace.StatusCodeUnknown: `UNKNOWN`, + trace.StatusCodeInvalidArgument: `INVALID_ARGUMENT`, + trace.StatusCodeDeadlineExceeded: `DEADLINE_EXCEEDED`, + trace.StatusCodeNotFound: `NOT_FOUND`, + trace.StatusCodeAlreadyExists: `ALREADY_EXISTS`, + trace.StatusCodePermissionDenied: `PERMISSION_DENIED`, + trace.StatusCodeResourceExhausted: `RESOURCE_EXHAUSTED`, + trace.StatusCodeFailedPrecondition: `FAILED_PRECONDITION`, + trace.StatusCodeAborted: `ABORTED`, + trace.StatusCodeOutOfRange: `OUT_OF_RANGE`, + trace.StatusCodeUnimplemented: `UNIMPLEMENTED`, + trace.StatusCodeInternal: `INTERNAL`, + trace.StatusCodeUnavailable: `UNAVAILABLE`, + trace.StatusCodeDataLoss: `DATA_LOSS`, + trace.StatusCodeUnauthenticated: `UNAUTHENTICATED`, } func isHealthEndpoint(path string) bool { diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index b31fa7a1b..a14c866a7 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -520,16 +520,16 @@ func TestStatusUnitTest(t *testing.T) { in int want trace.Status }{ - {200, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}}, - {204, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}}, - {100, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}}, - {500, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}}, - {404, trace.Status{Code: trace.StatusCodeNotFound, Message: `"NOT_FOUND"`}}, - {600, trace.Status{Code: trace.StatusCodeUnknown, Message: `"UNKNOWN"`}}, - {401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `"UNAUTHENTICATED"`}}, - {403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `"PERMISSION_DENIED"`}}, - {301, trace.Status{Code: trace.StatusCodeOK, Message: `"OK"`}}, - {501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `"UNIMPLEMENTED"`}}, + {200, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, + {204, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, + {100, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, + {500, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, + {404, trace.Status{Code: trace.StatusCodeNotFound, Message: `NOT_FOUND`}}, + {600, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, + {401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `UNAUTHENTICATED`}}, + {403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `PERMISSION_DENIED`}}, + {301, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, + {501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `UNIMPLEMENTED`}}, } for _, tt := range tests { From 53d438757d994ef2e7624cad3de6ac36764496fa Mon Sep 17 00:00:00 2001 From: Jean de Klerk Date: Tue, 11 Sep 2018 17:36:22 -0700 Subject: [PATCH 065/212] doc: add SetReportingPeriod recommendations (#910) --- stats/view/worker.go | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/stats/view/worker.go b/stats/view/worker.go index fef7bf513..9255d27d2 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -116,8 +116,12 @@ func record(tags *tag.Map, ms interface{}) { } // SetReportingPeriod sets the interval between reporting aggregated views in -// the program. If duration is less than or -// equal to zero, it enables the default behavior. +// the program. If duration is less than or equal to zero, it enables the +// default behavior. +// +// Note: each exporter makes different promises about what the lowest supported +// duration is. For example, the Stackdriver exporter recommends a value no +// lower than 1 minute. Consult each exporter per your needs. func SetReportingPeriod(d time.Duration) { // TODO(acetechnologist): ensure that the duration d is more than a certain // value. e.g. 1s From 209434aebbce1f721cedc64c2c3ca402585bf70c Mon Sep 17 00:00:00 2001 From: Matt Ho Date: Thu, 13 Sep 2018 10:06:18 -0700 Subject: [PATCH 066/212] #762 enhanced output of example PrintExporter with a focus on clarity (#911) --- README.md | 1 + examples/exporter/exporter.go | 75 ++++++++++++++++++++++++- examples/http/helloworld_server/main.go | 7 +++ 3 files changed, 80 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 0cdfcea9e..e3a338e11 100644 --- a/README.md +++ b/README.md @@ -259,3 +259,4 @@ release in which the functionality was marked *Deprecated*. [exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite + diff --git a/examples/exporter/exporter.go b/examples/exporter/exporter.go index f9093103f..43bd73a66 100644 --- a/examples/exporter/exporter.go +++ b/examples/exporter/exporter.go @@ -15,22 +15,91 @@ package exporter // import "go.opencensus.io/examples/exporter" import ( - "log" + "encoding/hex" + "fmt" + "regexp" + "time" "go.opencensus.io/stats/view" "go.opencensus.io/trace" ) +// indent these many spaces +const indent = " " + +// reZero provides a simple way to detect an empty ID +var reZero = regexp.MustCompile(`^0+$`) + // PrintExporter is a stats and trace exporter that logs // the exported data to the console. +// +// The intent is help new users familiarize themselves with the +// capabilities of opencensus. +// +// This should NOT be used for production workloads. type PrintExporter struct{} // ExportView logs the view data. func (e *PrintExporter) ExportView(vd *view.Data) { - log.Println(vd) + for _, row := range vd.Rows { + fmt.Printf("%v %-45s", vd.End.Format("15:04:05"), vd.View.Name) + + switch v := row.Data.(type) { + case *view.DistributionData: + fmt.Printf("distribution: min=%.1f max=%.1f mean=%.1f", v.Min, v.Max, v.Mean) + case *view.CountData: + fmt.Printf("count: value=%v", v.Value) + case *view.SumData: + fmt.Printf("sum: value=%v", v.Value) + case *view.LastValueData: + fmt.Printf("last: value=%v", v.Value) + } + fmt.Println() + + for _, tag := range row.Tags { + fmt.Printf("%v- %v=%v\n", indent, tag.Key.Name(), tag.Value) + } + } } // ExportSpan logs the trace span. func (e *PrintExporter) ExportSpan(vd *trace.SpanData) { - log.Println(vd) + var ( + traceID = hex.EncodeToString(vd.SpanContext.TraceID[:]) + spanID = hex.EncodeToString(vd.SpanContext.SpanID[:]) + parentSpanID = hex.EncodeToString(vd.ParentSpanID[:]) + ) + fmt.Println() + fmt.Println("#----------------------------------------------") + fmt.Println() + fmt.Println("TraceID: ", traceID) + fmt.Println("SpanID: ", spanID) + if !reZero.MatchString(parentSpanID) { + fmt.Println("ParentSpanID:", parentSpanID) + } + + fmt.Println() + fmt.Printf("Span: %v\n", vd.Name) + fmt.Printf("Status: %v [%v]\n", vd.Status.Message, vd.Status.Code) + fmt.Printf("Elapsed: %v\n", vd.EndTime.Sub(vd.StartTime).Round(time.Millisecond)) + + if len(vd.Annotations) > 0 { + fmt.Println() + fmt.Println("Annotations:") + for _, item := range vd.Annotations { + fmt.Print(indent, item.Message) + for k, v := range item.Attributes { + fmt.Printf(" %v=%v", k, v) + } + fmt.Println() + } + } + + if len(vd.Attributes) > 0 { + fmt.Println() + fmt.Println("Attributes:") + for k, v := range vd.Attributes { + fmt.Printf("%v- %v=%v\n", indent, k, v) + } + } } diff --git a/examples/http/helloworld_server/main.go b/examples/http/helloworld_server/main.go index 1c99e428f..16ed55261 100644 --- a/examples/http/helloworld_server/main.go +++ b/examples/http/helloworld_server/main.go @@ -54,6 +54,13 @@ func main() { http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { fmt.Fprintf(w, "hello world") + // Provide an example of how spans can be annotated with metadata + _, span := trace.StartSpan(req.Context(), "child") + defer span.End() + span.Annotate([]trace.Attribute{trace.StringAttribute("key", "value")}, "something happened") + span.AddAttributes(trace.StringAttribute("hello", "world")) + time.Sleep(time.Millisecond * 125) + r, _ := http.NewRequest("GET", "https://example.com", nil) // Propagate the trace header info in the outgoing requests. From 79993219becaa7e29e3b60cb67f5b8e82dee11d6 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 13 Sep 2018 14:35:57 -0400 Subject: [PATCH 067/212] Tracestate propagate (#901) * Propagate tracestate * Fixed review comments. * Fixed leading/trailing OWS removal. * Refactor to create separate package to inject/extract tracestate for http. * Fix review comments. * removed unnecessary check and import comment. * Revert "removed unnecessary check and import comment." This reverts commit 5349341a2f537cca53fda10db25fdd5260afe445. * Revert "Fix review comments." This reverts commit 40c285864c24b750071b0fdec0d150b0fdad0b85. * Revert "Refactor to create separate package to inject/extract tracestate for http." This reverts commit 5574ce881c784466edb353f6e89d0b3d83d0bbf2. * shorten variable name. --- .../propagation/tracecontext/propagation.go | 73 ++++++++- .../tracecontext/propagation_test.go | 152 ++++++++++++++++++ 2 files changed, 219 insertions(+), 6 deletions(-) diff --git a/plugin/ochttp/propagation/tracecontext/propagation.go b/plugin/ochttp/propagation/tracecontext/propagation.go index e4f44eeab..a989e8a7b 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation.go +++ b/plugin/ochttp/propagation/tracecontext/propagation.go @@ -24,14 +24,21 @@ import ( "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" + "go.opencensus.io/trace/tracestate" + "regexp" ) const ( - supportedVersion = 0 - maxVersion = 254 - header = "traceparent" + supportedVersion = 0 + maxVersion = 254 + maxTracestateLen = 512 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" + trimOWSRegexFmt = `^[\x09\x20]*(.*[^\x20\x09])[\x09\x20]*$` ) +var trimOWSRegExp = regexp.MustCompile(trimOWSRegexFmt) + var _ propagation.HTTPFormat = (*HTTPFormat)(nil) // HTTPFormat implements the TraceContext trace propagation format. @@ -39,7 +46,7 @@ type HTTPFormat struct{} // SpanContextFromRequest extracts a span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(header) + h := req.Header.Get(traceparentHeader) if h == "" { return trace.SpanContext{}, false } @@ -87,15 +94,69 @@ func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanCon return trace.SpanContext{}, false } + sc.Tracestate = tracestateFromRequest(req) return sc, true } -// SpanContextToRequest modifies the given request to include a header. +// TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. +// Revisit to return additional boolean value to indicate parsing error when following issues +// are resolved. +// https://github.com/w3c/distributed-tracing/issues/172 +// https://github.com/w3c/distributed-tracing/issues/175 +func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { + h := req.Header.Get(tracestateHeader) + if h == "" { + return nil + } + + var entries []tracestate.Entry + pairs := strings.Split(h, ",") + hdrLenWithoutOWS := len(pairs) - 1 // Number of commas + for _, pair := range pairs { + matches := trimOWSRegExp.FindStringSubmatch(pair) + if matches == nil { + return nil + } + pair = matches[1] + hdrLenWithoutOWS += len(pair) + if hdrLenWithoutOWS > maxTracestateLen { + return nil + } + kv := strings.Split(pair, "=") + if len(kv) != 2 { + return nil + } + entries = append(entries, tracestate.Entry{Key: kv[0], Value: kv[1]}) + } + ts, err := tracestate.New(nil, entries...) + if err != nil { + return nil + } + + return ts +} + +func tracestateToRequest(sc trace.SpanContext, req *http.Request) { + var pairs = make([]string, 0, len(sc.Tracestate.Entries())) + if sc.Tracestate != nil { + for _, entry := range sc.Tracestate.Entries() { + pairs = append(pairs, strings.Join([]string{entry.Key, entry.Value}, "=")) + } + h := strings.Join(pairs, ",") + + if h != "" && len(h) <= maxTracestateLen { + req.Header.Set(tracestateHeader, h) + } + } +} + +// SpanContextToRequest modifies the given request to include traceparent and tracestate headers. func (f *HTTPFormat) SpanContextToRequest(sc trace.SpanContext, req *http.Request) { h := fmt.Sprintf("%x-%x-%x-%x", []byte{supportedVersion}, sc.TraceID[:], sc.SpanID[:], []byte{byte(sc.TraceOptions)}) - req.Header.Set(header, h) + req.Header.Set(traceparentHeader, h) + tracestateToRequest(sc, req) } diff --git a/plugin/ochttp/propagation/tracecontext/propagation_test.go b/plugin/ochttp/propagation/tracecontext/propagation_test.go index a9f02762f..c8ef00196 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation_test.go +++ b/plugin/ochttp/propagation/tracecontext/propagation_test.go @@ -15,11 +15,29 @@ package tracecontext import ( + "fmt" "net/http" "reflect" "testing" "go.opencensus.io/trace" + "go.opencensus.io/trace/tracestate" + "strings" +) + +var ( + tpHeader = "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01" + traceID = trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54} + spanID = trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183} + traceOpt = trace.TraceOptions(1) + oversizeValue = strings.Repeat("a", maxTracestateLen/2) + oversizeEntry1 = tracestate.Entry{Key: "foo", Value: oversizeValue} + oversizeEntry2 = tracestate.Entry{Key: "hello", Value: oversizeValue} + entry1 = tracestate.Entry{Key: "foo", Value: "bar"} + entry2 = tracestate.Entry{Key: "hello", Value: "world example"} + oversizeTs, _ = tracestate.New(nil, oversizeEntry1, oversizeEntry2) + defaultTs, _ = tracestate.New(nil, nil...) + nonDefaultTs, _ = tracestate.New(nil, entry1, entry2) ) func TestHTTPFormat_FromRequest(t *testing.T) { @@ -113,3 +131,137 @@ func TestHTTPFormat_ToRequest(t *testing.T) { }) } } + +func TestHTTPFormatTracestate_FromRequest(t *testing.T) { + scWithNonDefaultTracestate := trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: traceOpt, + Tracestate: nonDefaultTs, + } + + scWithDefaultTracestate := trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: traceOpt, + Tracestate: defaultTs, + } + + tests := []struct { + name string + tpHeader string + tsHeader string + wantSc trace.SpanContext + wantOk bool + }{ + { + name: "tracestate invalid entries delimiter", + tpHeader: tpHeader, + tsHeader: "foo=bar;hello=world", + wantSc: scWithDefaultTracestate, + wantOk: true, + }, + { + name: "tracestate invalid key-value delimiter", + tpHeader: tpHeader, + tsHeader: "foo=bar,hello-world", + wantSc: scWithDefaultTracestate, + wantOk: true, + }, + { + name: "tracestate invalid value character", + tpHeader: tpHeader, + tsHeader: "foo=bar,hello=world example \u00a0 ", + wantSc: scWithDefaultTracestate, + wantOk: true, + }, + { + name: "tracestate blank key-value", + tpHeader: tpHeader, + tsHeader: "foo=bar, ", + wantSc: scWithDefaultTracestate, + wantOk: true, + }, + { + name: "tracestate oversize header", + tpHeader: tpHeader, + tsHeader: fmt.Sprintf("foo=%s,hello=%s", oversizeValue, oversizeValue), + wantSc: scWithDefaultTracestate, + wantOk: true, + }, + { + name: "tracestate valid", + tpHeader: tpHeader, + tsHeader: "foo=bar , hello=world example", + wantSc: scWithNonDefaultTracestate, + wantOk: true, + }, + } + + f := &HTTPFormat{} + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + req, _ := http.NewRequest("GET", "http://example.com", nil) + req.Header.Set("traceparent", tt.tpHeader) + req.Header.Set("tracestate", tt.tsHeader) + + gotSc, gotOk := f.SpanContextFromRequest(req) + if !reflect.DeepEqual(gotSc, tt.wantSc) { + t.Errorf("HTTPFormat.FromRequest() gotTs = %v, want %v", gotSc.Tracestate, tt.wantSc.Tracestate) + } + if gotOk != tt.wantOk { + t.Errorf("HTTPFormat.FromRequest() gotOk = %v, want %v", gotOk, tt.wantOk) + } + }) + } +} + +func TestHTTPFormatTracestate_ToRequest(t *testing.T) { + tests := []struct { + name string + sc trace.SpanContext + wantHeader string + }{ + { + name: "valid span context with default tracestate", + sc: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: traceOpt, + }, + wantHeader: "", + }, + { + name: "valid span context with non default tracestate", + sc: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: traceOpt, + Tracestate: nonDefaultTs, + }, + wantHeader: "foo=bar,hello=world example", + }, + { + name: "valid span context with oversize tracestate", + sc: trace.SpanContext{ + TraceID: traceID, + SpanID: spanID, + TraceOptions: traceOpt, + Tracestate: oversizeTs, + }, + wantHeader: "", + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + f := &HTTPFormat{} + req, _ := http.NewRequest("GET", "http://example.com", nil) + f.SpanContextToRequest(tt.sc, req) + + h := req.Header.Get("tracestate") + if got, want := h, tt.wantHeader; got != want { + t.Errorf("HTTPFormat.ToRequest() tracestate header = %v, want %v", got, want) + } + }) + } +} From 8d56cf0ddb38dcaf22ea369dc8d86f461b24e10f Mon Sep 17 00:00:00 2001 From: Yang Song Date: Thu, 13 Sep 2018 15:36:33 -0700 Subject: [PATCH 068/212] Bump up version. (#914) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index 985297ee1..62f03486a 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.17.0" + return "0.18.0" } From 689e87b7b388b3d9173b3a5bc410349591a4bdef Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 13 Sep 2018 17:36:05 -0700 Subject: [PATCH 069/212] Upgrade Go versions used in CI; fix AppVeyor (#907) --- .travis.yml | 3 +-- appveyor.yml | 12 ++++++------ exporter/zipkin/zipkin_test.go | 10 +++++----- go.sum | 15 +++++++++++++++ 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/.travis.yml b/.travis.yml index 2d6daa6b2..b9ef77c4a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -2,7 +2,7 @@ language: go go: # 1.8 is tested by AppVeyor - - 1.10.x + - 1.11.x go_import_path: go.opencensus.io @@ -18,7 +18,6 @@ before_script: script: - embedmd -d README.md # Ensure embedded code is up-to-date - - dep ensure -v - go build ./... # Ensure dependency updates don't break build - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - go vet ./... diff --git a/appveyor.yml b/appveyor.yml index 5aa067183..98057888a 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -5,8 +5,10 @@ platform: x64 clone_folder: c:\gopath\src\go.opencensus.io environment: - GOPATH: c:\gopath - GOVERSION: 1.8 + GOPATH: 'c:\gopath' + GOVERSION: '1.11' + GO111MODULE: 'on' + CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% @@ -18,7 +20,5 @@ deploy: false test_script: - cd %APPVEYOR_BUILD_FOLDER% - - gofmt -w . - - go get -v -t .\... - - go test -race -v .\... - - go vet .\... + - go build -v .\... + - go test -v .\... # No -race because cgo is disabled diff --git a/exporter/zipkin/zipkin_test.go b/exporter/zipkin/zipkin_test.go index 24194e02c..2d5f81cc1 100644 --- a/exporter/zipkin/zipkin_test.go +++ b/exporter/zipkin/zipkin_test.go @@ -110,11 +110,11 @@ func TestExport(t *testing.T) { }, }, Tags: map[string]string{ - "stringkey": "value", - "intkey": "42", - "boolkey1": "true", - "boolkey2": "false", - "error": "INVALID_ARGUMENT", + "stringkey": "value", + "intkey": "42", + "boolkey1": "true", + "boolkey2": "false", + "error": "INVALID_ARGUMENT", "opencensus.status_description": "error", }, }, diff --git a/go.sum b/go.sum index 91d0af1e4..f36080d15 100644 --- a/go.sum +++ b/go.sum @@ -1,21 +1,36 @@ git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= From 332f678cadb824cb710f7dc5c3f93235ec572690 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 17 Sep 2018 15:31:31 -0700 Subject: [PATCH 070/212] Check measures on record rather than measurement creation (#916) Previously, we short-circuited recording with a check at measurement creation time. This could produce incorrect behavior if users construct measurements ahead of time and record them multiple times. This change moves the check into the record call itself. --- exporter/prometheus/prometheus_test.go | 2 +- go.mod | 1 + go.sum | 2 + stats/measure.go | 17 +++++++- stats/measure_float64.go | 22 +---------- stats/measure_int64.go | 22 +---------- stats/record.go | 14 ++++--- stats/view/doc.go | 55 +++++++++++++------------- stats/view/view_test.go | 36 +++++++++++++++++ 9 files changed, 96 insertions(+), 75 deletions(-) diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 371da56d6..69985209f 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -307,7 +307,7 @@ func TestCumulativenessFromHistograms(t *testing.T) { } ctx := context.Background() - ms := make([]stats.Measurement, len(values)) + ms := make([]stats.Measurement, 0, len(values)) for _, value := range values { mx := m.M(value) ms = append(ms, mx) diff --git a/go.mod b/go.mod index ae7cbc15c..f9cc7692b 100644 --- a/go.mod +++ b/go.mod @@ -4,6 +4,7 @@ require ( git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/golang/protobuf v1.2.0 + github.com/google/go-cmp v0.2.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/openzipkin/zipkin-go v0.1.1 github.com/prometheus/client_golang v0.8.0 diff --git a/go.sum b/go.sum index f36080d15..eab7da109 100644 --- a/go.sum +++ b/go.sum @@ -5,6 +5,8 @@ github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLM github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= +github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= diff --git a/stats/measure.go b/stats/measure.go index 7b4b49c67..64d02b196 100644 --- a/stats/measure.go +++ b/stats/measure.go @@ -68,6 +68,21 @@ func (m *measureDescriptor) subscribed() bool { return atomic.LoadInt32(&m.subs) == 1 } +// Name returns the name of the measure. +func (m *measureDescriptor) Name() string { + return m.name +} + +// Description returns the description of the measure. +func (m *measureDescriptor) Description() string { + return m.description +} + +// Unit returns the unit of the measure. +func (m *measureDescriptor) Unit() string { + return m.unit +} + var ( mu sync.RWMutex measures = make(map[string]*measureDescriptor) @@ -94,7 +109,7 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor { // provides M to convert an int64 into a measurement. type Measurement struct { v float64 - m Measure + m *measureDescriptor } // Value returns the value of the Measurement as a float64. diff --git a/stats/measure_float64.go b/stats/measure_float64.go index da4b5a83b..acedb21c4 100644 --- a/stats/measure_float64.go +++ b/stats/measure_float64.go @@ -17,31 +17,13 @@ package stats // Float64Measure is a measure for float64 values. type Float64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Float64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Float64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Float64Measure) Unit() string { - return m.md.unit + *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - if !m.md.subscribed() { - return Measurement{} - } - return Measurement{m: m, v: v} + return Measurement{m: m.measureDescriptor, v: v} } // Float64 creates a new measure for float64 values. diff --git a/stats/measure_int64.go b/stats/measure_int64.go index 5fedaad05..c4243ba74 100644 --- a/stats/measure_int64.go +++ b/stats/measure_int64.go @@ -17,31 +17,13 @@ package stats // Int64Measure is a measure for int64 values. type Int64Measure struct { - md *measureDescriptor -} - -// Name returns the name of the measure. -func (m *Int64Measure) Name() string { - return m.md.name -} - -// Description returns the description of the measure. -func (m *Int64Measure) Description() string { - return m.md.description -} - -// Unit returns the unit of the measure. -func (m *Int64Measure) Unit() string { - return m.md.unit + *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - if !m.md.subscribed() { - return Measurement{} - } - return Measurement{m: m, v: float64(v)} + return Measurement{m: m.measureDescriptor, v: float64(v)} } // Int64 creates a new measure for int64 values. diff --git a/stats/record.go b/stats/record.go index 98865ff69..9a6d7fea1 100644 --- a/stats/record.go +++ b/stats/record.go @@ -30,15 +30,19 @@ func init() { } } -// Record records one or multiple measurements with the same tags at once. +// Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { + recorder := internal.DefaultRecorder + if recorder == nil { + return + } if len(ms) == 0 { return } - var record bool + record := false for _, m := range ms { - if (m != Measurement{}) { + if m.m.subscribed() { record = true break } @@ -46,7 +50,5 @@ func Record(ctx context.Context, ms ...Measurement) { if !record { return } - if internal.DefaultRecorder != nil { - internal.DefaultRecorder(tag.FromContext(ctx), ms) - } + recorder(tag.FromContext(ctx), ms) } diff --git a/stats/view/doc.go b/stats/view/doc.go index 856fb4e15..dced225c3 100644 --- a/stats/view/doc.go +++ b/stats/view/doc.go @@ -13,33 +13,34 @@ // limitations under the License. // -/* -Package view contains support for collecting and exposing aggregates over stats. - -In order to collect measurements, views need to be defined and registered. -A view allows recorded measurements to be filtered and aggregated over a time window. - -All recorded measurements can be filtered by a list of tags. - -OpenCensus provides several aggregation methods: count, distribution and sum. -Count aggregation only counts the number of measurement points. Distribution -aggregation provides statistical summary of the aggregated data. Sum distribution -sums up the measurement points. Aggregations are cumulative. - -Users can dynamically create and delete views. - -Libraries can export their own views and claim the view names -by registering them themselves. - -Exporting - -Collected and aggregated data can be exported to a metric collection -backend by registering its exporter. - -Multiple exporters can be registered to upload the data to various -different backends. Users need to unregister the exporters once they -no longer are needed. -*/ +// Package view contains support for collecting and exposing aggregates over stats. +// +// In order to collect measurements, views need to be defined and registered. +// A view allows recorded measurements to be filtered and aggregated. +// +// All recorded measurements can be grouped by a list of tags. +// +// OpenCensus provides several aggregation methods: Count, Distribution and Sum. +// +// Count only counts the number of measurement points recorded. +// Distribution provides statistical summary of the aggregated data by counting +// how many recorded measurements fall into each bucket. +// Sum adds up the measurement values. +// LastValue just keeps track of the most recently recorded measurement value. +// All aggregations are cumulative. +// +// Views can be registerd and unregistered at any time during program execution. +// +// Libraries can define views but it is recommended that in most cases registering +// views be left up to applications. +// +// Exporting +// +// Collected and aggregated data can be exported to a metric collection +// backend by registering its exporter. +// +// Multiple exporters can be registered to upload the data to various +// different back ends. package view // import "go.opencensus.io/stats/view" // TODO(acetechnologist): Add a link to the language independent OpenCensus diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 45a4bd341..c99927779 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -399,3 +399,39 @@ func TestRegisterUnregisterParity(t *testing.T) { } } } + +func TestRegisterAfterMeasurement(t *testing.T) { + // Tests that we can register views after measurements are created and + // they still take effect. + + m := stats.Int64(t.Name(), "", stats.UnitDimensionless) + mm := m.M(1) + ctx := context.Background() + + stats.Record(ctx, mm) + v := &View{ + Measure: m, + Aggregation: Count(), + } + if err := Register(v); err != nil { + t.Fatal(err) + } + + rows, err := RetrieveData(v.Name) + if err != nil { + t.Fatal(err) + } + if len(rows) > 0 { + t.Error("View should not have data") + } + + stats.Record(ctx, mm) + + rows, err = RetrieveData(v.Name) + if err != nil { + t.Fatal(err) + } + if len(rows) == 0 { + t.Error("View should have data") + } +} From 3d73a6ce623ed9ea81d474461e9aea31c78b483b Mon Sep 17 00:00:00 2001 From: Ashwin Chandrasekar <42815627+achandras@users.noreply.github.com> Date: Tue, 18 Sep 2018 20:10:58 -0400 Subject: [PATCH 071/212] Jaeger: allow other endpoint URL paths than the default (#915) --- exporter/jaeger/jaeger.go | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index 34f69e455..d108cc039 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -37,8 +37,14 @@ const defaultServiceName = "OpenCensus" type Options struct { // Endpoint is the Jaeger HTTP Thrift endpoint. // For example, http://localhost:14268. + // + // Deprecated: Use CollectorEndpoint instead. Endpoint string + // CollectorEndpoint is the full url to the Jaeger HTTP Thrift collector. + // For example, http://localhost:14268/api/traces + CollectorEndpoint string + // AgentEndpoint instructs exporter to send spans to jaeger-agent at this address. // For example, localhost:6831. AgentEndpoint string @@ -68,15 +74,18 @@ type Options struct { // NewExporter returns a trace.Exporter implementation that exports // the collected spans to Jaeger. func NewExporter(o Options) (*Exporter, error) { - endpoint := o.Endpoint - if endpoint == "" && o.AgentEndpoint == "" { + if o.Endpoint == "" && o.CollectorEndpoint == "" && o.AgentEndpoint == "" { return nil, errors.New("missing endpoint for Jaeger exporter") } + var endpoint string var client *agentClientUDP var err error - if endpoint != "" { - endpoint = endpoint + "/api/traces?format=jaeger.thrift" + if o.Endpoint != "" { + endpoint = o.Endpoint + "/api/traces?format=jaeger.thrift" + log.Printf("Endpoint has been deprecated. Please use CollectorEndpoint instead.") + } else if o.CollectorEndpoint != "" { + endpoint = o.CollectorEndpoint } else { client, err = newAgentClientUDP(o.AgentEndpoint, udpPacketMaxLength) if err != nil { From 3500eaa0b5052e968a22daaf854342bfd8717854 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Di=C3=B3genes=20Falc=C3=A3o?= Date: Wed, 19 Sep 2018 14:41:26 -0300 Subject: [PATCH 072/212] Remove unused `exporterutil` package (#919) Fixes: #904 --- exporterutil/version.go | 27 --------------------------- 1 file changed, 27 deletions(-) delete mode 100644 exporterutil/version.go diff --git a/exporterutil/version.go b/exporterutil/version.go deleted file mode 100644 index 8572061fe..000000000 --- a/exporterutil/version.go +++ /dev/null @@ -1,27 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package exporterutil contains common utilities for exporter implementations. -// -// Deprecated: Don't use this package. -package exporterutil - -import "go.opencensus.io" - -// Version is the current release version of OpenCensus in use. It is made -// available for exporters to include in User-Agent-like metadata. -// Deprecated: Use opencensus.Version(). -var Version = opencensus.Version() - -// TODO(jbd): Remove this package at the next release. From 572ae0b6fd8649e8c977bf23283f20b8beb607ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Di=C3=B3genes=20Falc=C3=A3o?= Date: Thu, 20 Sep 2018 14:21:59 -0300 Subject: [PATCH 073/212] [travis] Run test suite against 386 arch (#920) --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index b9ef77c4a..73c8571c3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -22,5 +22,6 @@ script: - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - go vet ./... - go test -v -race $PKGS # Run all the tests with the race detector enabled + - GOARCH=386 go test -v $PKGS # Run all tests against a 386 architecture - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' - go run internal/check/version.go From 2269ed7aa5777d1157c9674a6079207a0f32b68a Mon Sep 17 00:00:00 2001 From: JBD Date: Fri, 21 Sep 2018 15:54:12 -0700 Subject: [PATCH 074/212] Add stats.RecordWithTags (#918) Fixes #834. --- plugin/ocgrpc/stats_common.go | 15 +++++++++------ plugin/ochttp/client_stats.go | 5 +++-- plugin/ochttp/server.go | 3 +-- stats/record.go | 14 ++++++++++++++ 4 files changed, 27 insertions(+), 10 deletions(-) diff --git a/plugin/ocgrpc/stats_common.go b/plugin/ocgrpc/stats_common.go index 119bbda9b..1737809e7 100644 --- a/plugin/ocgrpc/stats_common.go +++ b/plugin/ocgrpc/stats_common.go @@ -142,18 +142,21 @@ func handleRPCEnd(ctx context.Context, s *stats.End) { latencyMillis := float64(elapsedTime) / float64(time.Millisecond) if s.Client { - ctx, _ = tag.New(ctx, - tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st)) - ocstats.Record(ctx, + ocstats.RecordWithTags(ctx, + []tag.Mutator{ + tag.Upsert(KeyClientMethod, methodName(d.method)), + tag.Upsert(KeyClientStatus, st), + }, ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), ClientRoundtripLatency.M(latencyMillis)) } else { - ctx, _ = tag.New(ctx, tag.Upsert(KeyServerStatus, st)) - ocstats.Record(ctx, + ocstats.RecordWithTags(ctx, + []tag.Mutator{ + tag.Upsert(KeyServerStatus, st), + }, ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), diff --git a/plugin/ochttp/client_stats.go b/plugin/ochttp/client_stats.go index 9b286b929..4a1ea9332 100644 --- a/plugin/ochttp/client_stats.go +++ b/plugin/ochttp/client_stats.go @@ -99,8 +99,9 @@ func (t *tracker) end() { if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } - ctx, _ := tag.New(t.ctx, tag.Upsert(StatusCode, strconv.Itoa(t.statusCode))) - stats.Record(ctx, m...) + stats.RecordWithTags(t.ctx, []tag.Mutator{ + tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + }, m...) }) } diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index ea2e3e288..0ecbba37e 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -179,8 +179,7 @@ func (t *trackingResponseWriter) end(tags *addedTags) { allTags := make([]tag.Mutator, len(tags.t)+1) allTags[0] = tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)) copy(allTags[1:], tags.t) - ctx, _ := tag.New(t.ctx, allTags...) - stats.Record(ctx, m...) + stats.RecordWithTags(t.ctx, allTags, m...) }) } diff --git a/stats/record.go b/stats/record.go index 9a6d7fea1..e489009cb 100644 --- a/stats/record.go +++ b/stats/record.go @@ -52,3 +52,17 @@ func Record(ctx context.Context, ms ...Measurement) { } recorder(tag.FromContext(ctx), ms) } + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + ctx, err := tag.New(ctx, mutators...) + if err != nil { + return err + } + Record(ctx, ms...) + return nil +} From 944633c701cda8be3f5de1ed5cd3c584c22639cf Mon Sep 17 00:00:00 2001 From: JBD Date: Fri, 21 Sep 2018 16:43:24 -0700 Subject: [PATCH 075/212] Add Honeycomb exporter to README (#925) --- README.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index e3a338e11..d3fbc32b8 100644 --- a/README.md +++ b/README.md @@ -56,6 +56,7 @@ can implement their own exporters by implementing the exporter interfaces * [AWS X-Ray][exporter-xray] for traces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats +* [HoneyComb][exporter-honeycomb] for traces ## Overview @@ -259,4 +260,4 @@ release in which the functionality was marked *Deprecated*. [exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite - +[exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter From c50972bda57246b02f6bbb5aa53c6e3572b16a47 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 24 Sep 2018 17:10:49 -0700 Subject: [PATCH 076/212] Bring ochttp client views in line with spec (#929) --- go.mod | 4 + go.sum | 9 ++ plugin/ochttp/client_stats.go | 11 ++- plugin/ochttp/client_test.go | 99 ++++++++++++++++++- plugin/ochttp/example_test.go | 13 ++- .../propagation/tracecontext/propagation.go | 2 +- .../tracecontext/propagation_test.go | 2 +- plugin/ochttp/stats.go | 96 ++++++++++++++++-- plugin/ochttp/stats_test.go | 88 +++++++++++++++++ 9 files changed, 304 insertions(+), 20 deletions(-) create mode 100644 plugin/ochttp/stats_test.go diff --git a/go.mod b/go.mod index f9cc7692b..1236f4c2f 100644 --- a/go.mod +++ b/go.mod @@ -3,8 +3,11 @@ module go.opencensus.io require ( git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 + github.com/ghodss/yaml v1.0.0 // indirect + github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 + github.com/grpc-ecosystem/grpc-gateway v1.5.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/openzipkin/zipkin-go v0.1.1 github.com/prometheus/client_golang v0.8.0 @@ -18,4 +21,5 @@ require ( google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf google.golang.org/genproto v0.0.0-20180831171423-11092d34479b google.golang.org/grpc v1.14.0 + gopkg.in/yaml.v2 v2.2.1 // indirect ) diff --git a/go.sum b/go.sum index eab7da109..6765bc031 100644 --- a/go.sum +++ b/go.sum @@ -3,10 +3,16 @@ git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0i git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= +github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= +github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= @@ -36,3 +42,6 @@ google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXt google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/plugin/ochttp/client_stats.go b/plugin/ochttp/client_stats.go index 4a1ea9332..066ebb87f 100644 --- a/plugin/ochttp/client_stats.go +++ b/plugin/ochttp/client_stats.go @@ -34,8 +34,11 @@ type statsTransport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), + tag.Upsert(KeyClientHost, req.URL.Host), tag.Upsert(Host, req.URL.Host), + tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), + tag.Upsert(KeyClientMethod, req.Method), tag.Upsert(Method, req.Method)) req = req.WithContext(ctx) track := &tracker{ @@ -92,15 +95,21 @@ var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { + latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) m := []stats.Measurement{ - ClientLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), + ClientSentBytes.M(t.reqSize), + ClientReceivedBytes.M(t.respSize), + ClientRoundtripLatency.M(latencyMs), + ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), } if t.reqSize >= 0 { m = append(m, ClientRequestBytes.M(t.reqSize)) } + stats.RecordWithTags(t.ctx, []tag.Mutator{ tag.Upsert(StatusCode, strconv.Itoa(t.statusCode)), + tag.Upsert(KeyClientStatus, strconv.Itoa(t.statusCode)), }, m...) }) } diff --git a/plugin/ochttp/client_test.go b/plugin/ochttp/client_test.go index 5bed75f34..97d15ab97 100644 --- a/plugin/ochttp/client_test.go +++ b/plugin/ochttp/client_test.go @@ -30,7 +30,104 @@ import ( const reqCount = 5 -func TestClient(t *testing.T) { +func TestClientNew(t *testing.T) { + server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { + resp.Write([]byte("Hello, world!")) + })) + defer server.Close() + + if err := view.Register( + ochttp.ClientSentBytesDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientRoundtripLatencyDistribution, + ochttp.ClientCompletedCount, + ); err != nil { + t.Fatalf("Failed to register ochttp.DefaultClientViews error: %v", err) + } + + views := []string{ + "opencensus.io/http/client/sent_bytes", + "opencensus.io/http/client/received_bytes", + "opencensus.io/http/client/roundtrip_latency", + "opencensus.io/http/client/completed_count", + } + for _, name := range views { + v := view.Find(name) + if v == nil { + t.Errorf("view not found %q", name) + continue + } + } + + var wg sync.WaitGroup + var tr ochttp.Transport + errs := make(chan error, reqCount) + wg.Add(reqCount) + + for i := 0; i < reqCount; i++ { + go func() { + defer wg.Done() + req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) + if err != nil { + errs <- fmt.Errorf("error creating request: %v", err) + } + resp, err := tr.RoundTrip(req) + if err != nil { + errs <- fmt.Errorf("response error: %v", err) + } + if err := resp.Body.Close(); err != nil { + errs <- fmt.Errorf("error closing response body: %v", err) + } + if got, want := resp.StatusCode, 200; got != want { + errs <- fmt.Errorf("resp.StatusCode=%d; wantCount %d", got, want) + } + }() + } + + go func() { + wg.Wait() + close(errs) + }() + + for err := range errs { + if err != nil { + t.Fatal(err) + } + } + + for _, viewName := range views { + v := view.Find(viewName) + if v == nil { + t.Errorf("view not found %q", viewName) + continue + } + rows, err := view.RetrieveData(v.Name) + if err != nil { + t.Error(err) + continue + } + if got, want := len(rows), 1; got != want { + t.Errorf("len(%q) = %d; want %d", viewName, got, want) + continue + } + data := rows[0].Data + var count int64 + switch data := data.(type) { + case *view.CountData: + count = data.Value + case *view.DistributionData: + count = data.Count + default: + t.Errorf("Unkown data type: %v", data) + continue + } + if got := count; got != reqCount { + t.Fatalf("%s = %d; want %d", viewName, got, reqCount) + } + } +} + +func TestClientOld(t *testing.T) { server := httptest.NewServer(http.HandlerFunc(func(resp http.ResponseWriter, req *http.Request) { resp.Write([]byte("Hello, world!")) })) diff --git a/plugin/ochttp/example_test.go b/plugin/ochttp/example_test.go index bf060d6c2..bb115abb9 100644 --- a/plugin/ochttp/example_test.go +++ b/plugin/ochttp/example_test.go @@ -32,15 +32,14 @@ func ExampleTransport() { if err := view.Register( // Register a few default views. - ochttp.ClientRequestCountByMethod, - ochttp.ClientResponseCountByStatusCode, - ochttp.ClientLatencyView, - + ochttp.ClientSentBytesDistribution, + ochttp.ClientReceivedBytesDistribution, + ochttp.ClientRoundtripLatencyDistribution, // Register a custom view. &view.View{ - Name: "httpclient_latency_by_hostpath", - TagKeys: []tag.Key{ochttp.Host, ochttp.Path}, - Measure: ochttp.ClientLatency, + Name: "httpclient_latency_by_path", + TagKeys: []tag.Key{ochttp.KeyClientPath}, + Measure: ochttp.ClientRoundtripLatency, Aggregation: ochttp.DefaultLatencyDistribution, }, ); err != nil { diff --git a/plugin/ochttp/propagation/tracecontext/propagation.go b/plugin/ochttp/propagation/tracecontext/propagation.go index a989e8a7b..f6faff079 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation.go +++ b/plugin/ochttp/propagation/tracecontext/propagation.go @@ -20,12 +20,12 @@ import ( "encoding/hex" "fmt" "net/http" + "regexp" "strings" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" "go.opencensus.io/trace/tracestate" - "regexp" ) const ( diff --git a/plugin/ochttp/propagation/tracecontext/propagation_test.go b/plugin/ochttp/propagation/tracecontext/propagation_test.go index c8ef00196..5b2a2d72d 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation_test.go +++ b/plugin/ochttp/propagation/tracecontext/propagation_test.go @@ -18,11 +18,11 @@ import ( "fmt" "net/http" "reflect" + "strings" "testing" "go.opencensus.io/trace" "go.opencensus.io/trace/tracestate" - "strings" ) var ( diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index 21d651230..d99b98d12 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -22,10 +22,32 @@ import ( // The following client HTTP measures are supported for use in custom views. var ( - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. + ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + // Deprecated: Use ClientSentBytes. + ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + // Deprecated: Use ClientReceivedBytes. ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) + // Deprecated: Use ClientRoundtripLatency. + ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) +) + +var ( + ClientSentBytes = stats.Int64( + "opencensus.io/http/client/sent_bytes", + "Total bytes sent in request body (not including headers)", + stats.UnitBytes, + ) + ClientReceivedBytes = stats.Int64( + "opencensus.io/http/client/received_bytes", + "Total bytes received in response bodies (not including headers but including error responses with bodies)", + stats.UnitBytes, + ) + ClientRoundtripLatency = stats.Float64( + "opencensus.io/http/client/roundtrip_latency", + "Time between first byte of request headers sent to last byte of response received, or terminal error", + stats.UnitMilliseconds, + ) ) // The following server HTTP measures are supported for use in custom views: @@ -67,6 +89,18 @@ var ( KeyServerRoute, _ = tag.NewKey("http_server_route") ) +// Client tag keys. +var ( + // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). + KeyClientMethod, _ = tag.NewKey("http_client_method") + // KeyClientPath is the URL path (not including query string). + KeyClientPath, _ = tag.NewKey("http_client_path") + // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. + KeyClientStatus, _ = tag.NewKey("http_client_status") + // KeyClientHost is the value of the request Host header. + KeyClientHost, _ = tag.NewKey("http_client_host") +) + // Default distributions used by views in this package. var ( DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) @@ -74,8 +108,43 @@ var ( ) // Package ochttp provides some convenience views. -// You need to register the views for data to actually be collected. +// You still need to register these views for data to actually be collected. var ( + ClientSentBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/sent_bytes", + Measure: ClientSentBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes sent in request body (not including headers), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientReceivedBytesDistribution = &view.View{ + Name: "opencensus.io/http/client/received_bytes", + Measure: ClientReceivedBytes, + Aggregation: DefaultSizeDistribution, + Description: "Total bytes received in response bodies (not including headers but including error responses with bodies), by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientRoundtripLatencyDistribution = &view.View{ + Name: "opencensus.io/http/client/roundtrip_latency", + Measure: ClientRoundtripLatency, + Aggregation: DefaultLatencyDistribution, + Description: "End-to-end latency, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } + + ClientCompletedCount = &view.View{ + Name: "opencensus.io/http/client/completed_count", + Measure: ClientRoundtripLatency, + Aggregation: view.Count(), + Description: "Count of completed requests, by HTTP method and response status", + TagKeys: []tag.Key{KeyClientMethod, KeyClientStatus}, + } +) + +var ( + // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ Name: "opencensus.io/http/client/request_count", Description: "Count of HTTP requests started", @@ -83,43 +152,50 @@ var ( Aggregation: view.Count(), } + // Deprecated: Use ClientSentBytesDistribution. ClientRequestBytesView = &view.View{ Name: "opencensus.io/http/client/request_bytes", Description: "Size distribution of HTTP request body", - Measure: ClientRequestBytes, + Measure: ClientSentBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientReceivedBytesDistribution. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", - Measure: ClientResponseBytes, + Measure: ClientReceivedBytes, Aggregation: DefaultSizeDistribution, } + // Deprecated: Use ClientRoundtripLatencyDistribution. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: DefaultLatencyDistribution, } + // Deprecated: Use ClientCompletedCount. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", TagKeys: []tag.Key{Method}, - Measure: ClientRequestCount, + Measure: ClientSentBytes, Aggregation: view.Count(), } + // Deprecated: Use ClientCompletedCount. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", TagKeys: []tag.Key{StatusCode}, - Measure: ClientLatency, + Measure: ClientRoundtripLatency, Aggregation: view.Count(), } +) +var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", Description: "Count of HTTP requests started", @@ -166,6 +242,7 @@ var ( ) // DefaultClientViews are the default client views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultClientViews = []*view.View{ ClientRequestCountView, ClientRequestBytesView, @@ -176,6 +253,7 @@ var DefaultClientViews = []*view.View{ } // DefaultServerViews are the default server views provided by this package. +// Deprecated: No replacement. Register the views you would like individually. var DefaultServerViews = []*view.View{ ServerRequestCountView, ServerRequestBytesView, diff --git a/plugin/ochttp/stats_test.go b/plugin/ochttp/stats_test.go new file mode 100644 index 000000000..ce3621bf3 --- /dev/null +++ b/plugin/ochttp/stats_test.go @@ -0,0 +1,88 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "reflect" + "strings" + "testing" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +func TestClientViews(t *testing.T) { + for _, v := range []*view.View{ + ClientSentBytesDistribution, + ClientReceivedBytesDistribution, + ClientRoundtripLatencyDistribution, + ClientCompletedCount, + } { + + if v.Measure == nil { + t.Fatalf("nil measure: %v", v) + } + if m := v.Measure.Name(); !strings.HasPrefix(m, "opencensus.io/http/client/") { + t.Errorf("Unexpected measure name prefix: %v", v) + } + if v.Name == "" { + t.Errorf("Empty name: %v", v) + } + if !strings.HasPrefix(v.Name, "opencensus.io/http/client/") { + t.Errorf("Unexpected prefix: %s", v.Name) + } + if v.Description == "" { + t.Errorf("Empty description: %s", v.Name) + } + if !reflect.DeepEqual(v.TagKeys, []tag.Key{KeyClientMethod, KeyClientStatus}) { + t.Errorf("Unexpected tags for client view %s: %v", v.Name, v.TagKeys) + } + if strings.HasSuffix(v.Description, ".") { + t.Errorf("View description should not end with a period: %s", v.Name) + } + } +} + +func TestClientTagKeys(t *testing.T) { + for _, k := range []tag.Key{ + KeyClientStatus, + KeyClientMethod, + KeyClientHost, + KeyClientPath, + } { + if !strings.HasPrefix(k.Name(), "http_client_") { + t.Errorf("Unexpected prefix: %s", k.Name()) + } + } +} + +func TestClientMeasures(t *testing.T) { + for _, m := range []stats.Measure{ + ClientSentBytes, + ClientReceivedBytes, + ClientRoundtripLatency, + } { + if !strings.HasPrefix(m.Name(), "opencensus.io/http/client/") { + t.Errorf("Unexpected prefix: %v", m) + } + if strings.HasSuffix(m.Description(), ".") { + t.Errorf("View description should not end with a period: %s", m.Name()) + } + if len(m.Unit()) == 0 { + t.Errorf("No unit: %s", m.Name()) + } + } +} From 2b3f77574c056d7abc6d38e16bea03533908e85c Mon Sep 17 00:00:00 2001 From: "Julian V. Modesto" Date: Tue, 25 Sep 2018 13:05:41 -0400 Subject: [PATCH 077/212] Set status code attribute for ochttp.Server (#933) --- plugin/ochttp/server.go | 1 + 1 file changed, 1 insertion(+) diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 0ecbba37e..492b74b9e 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -168,6 +168,7 @@ func (t *trackingResponseWriter) end(tags *addedTags) { span := trace.FromContext(t.ctx) span.SetStatus(TraceStatus(t.statusCode, t.statusLine)) + span.AddAttributes(trace.Int64Attribute(StatusCodeAttribute, int64(t.statusCode))) m := []stats.Measurement{ ServerLatency.M(float64(time.Since(t.start)) / float64(time.Millisecond)), From ae11cd04b7789fa938bb4f0e696fd6bd76463fa4 Mon Sep 17 00:00:00 2001 From: Reiley Yang Date: Thu, 27 Sep 2018 10:25:11 -0700 Subject: [PATCH 078/212] Improve W3C Trace Context compliance (#934) --- .../propagation/tracecontext/propagation.go | 59 +++++++++++++------ .../tracecontext/propagation_test.go | 22 +++---- 2 files changed, 53 insertions(+), 28 deletions(-) diff --git a/plugin/ochttp/propagation/tracecontext/propagation.go b/plugin/ochttp/propagation/tracecontext/propagation.go index f6faff079..65ab1e996 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation.go +++ b/plugin/ochttp/propagation/tracecontext/propagation.go @@ -20,6 +20,7 @@ import ( "encoding/hex" "fmt" "net/http" + "net/textproto" "regexp" "strings" @@ -46,48 +47,54 @@ type HTTPFormat struct{} // SpanContextFromRequest extracts a span context from incoming requests. func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanContext, ok bool) { - h := req.Header.Get(traceparentHeader) - if h == "" { + h, ok := getRequestHeader(req, traceparentHeader, false) + if !ok { return trace.SpanContext{}, false } sections := strings.Split(h, "-") - if len(sections) < 3 { + if len(sections) < 4 { return trace.SpanContext{}, false } + if len(sections[0]) != 2 { + return trace.SpanContext{}, false + } ver, err := hex.DecodeString(sections[0]) if err != nil { return trace.SpanContext{}, false } - if len(ver) == 0 || int(ver[0]) > supportedVersion || int(ver[0]) > maxVersion { + version := int(ver[0]) + if version > maxVersion { return trace.SpanContext{}, false } - tid, err := hex.DecodeString(sections[1]) - if err != nil { + if version == 0 && len(sections) != 4 { return trace.SpanContext{}, false } - if len(tid) != 16 { + + if len(sections[1]) != 32 { + return trace.SpanContext{}, false + } + tid, err := hex.DecodeString(sections[1]) + if err != nil { return trace.SpanContext{}, false } copy(sc.TraceID[:], tid) - sid, err := hex.DecodeString(sections[2]) - if err != nil { + if len(sections[2]) != 16 { return trace.SpanContext{}, false } - if len(sid) != 8 { + sid, err := hex.DecodeString(sections[2]) + if err != nil { return trace.SpanContext{}, false } copy(sc.SpanID[:], sid) - if len(sections) == 4 { - opts, err := hex.DecodeString(sections[3]) - if err != nil || len(opts) < 1 { - return trace.SpanContext{}, false - } - sc.TraceOptions = trace.TraceOptions(opts[0]) + opts, err := hex.DecodeString(sections[3]) + if err != nil || len(opts) < 1 { + return trace.SpanContext{}, false } + sc.TraceOptions = trace.TraceOptions(opts[0]) // Don't allow all zero trace or span ID. if sc.TraceID == [16]byte{} || sc.SpanID == [8]byte{} { @@ -98,13 +105,31 @@ func (f *HTTPFormat) SpanContextFromRequest(req *http.Request) (sc trace.SpanCon return sc, true } +// getRequestHeader returns a combined header field according to RFC7230 section 3.2.2. +// If commaSeparated is true, multiple header fields with the same field name using be +// combined using ",". +// If no header was found using the given name, "ok" would be false. +// If more than one headers was found using the given name, while commaSeparated is false, +// "ok" would be false. +func getRequestHeader(req *http.Request, name string, commaSeparated bool) (hdr string, ok bool) { + v := req.Header[textproto.CanonicalMIMEHeaderKey(name)] + switch len(v) { + case 0: + return "", false + case 1: + return v[0], true + default: + return strings.Join(v, ","), commaSeparated + } +} + // TODO(rghetia): return an empty Tracestate when parsing tracestate header encounters an error. // Revisit to return additional boolean value to indicate parsing error when following issues // are resolved. // https://github.com/w3c/distributed-tracing/issues/172 // https://github.com/w3c/distributed-tracing/issues/175 func tracestateFromRequest(req *http.Request) *tracestate.Tracestate { - h := req.Header.Get(tracestateHeader) + h, _ := getRequestHeader(req, tracestateHeader, true) if h == "" { return nil } diff --git a/plugin/ochttp/propagation/tracecontext/propagation_test.go b/plugin/ochttp/propagation/tracecontext/propagation_test.go index 5b2a2d72d..996cfa883 100644 --- a/plugin/ochttp/propagation/tracecontext/propagation_test.go +++ b/plugin/ochttp/propagation/tracecontext/propagation_test.go @@ -48,10 +48,14 @@ func TestHTTPFormat_FromRequest(t *testing.T) { wantOk bool }{ { - name: "unsupported version", + name: "future version", header: "02-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-01", - wantSc: trace.SpanContext{}, - wantOk: false, + wantSc: trace.SpanContext{ + TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54}, + SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183}, + TraceOptions: trace.TraceOptions(1), + }, + wantOk: true, }, { name: "zero trace ID and span ID", @@ -70,17 +74,13 @@ func TestHTTPFormat_FromRequest(t *testing.T) { wantOk: true, }, { - name: "no options", + name: "missing options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7", - wantSc: trace.SpanContext{ - TraceID: trace.TraceID{75, 249, 47, 53, 119, 179, 77, 166, 163, 206, 146, 157, 14, 14, 71, 54}, - SpanID: trace.SpanID{0, 240, 103, 170, 11, 169, 2, 183}, - TraceOptions: trace.TraceOptions(0), - }, - wantOk: true, + wantSc: trace.SpanContext{}, + wantOk: false, }, { - name: "missing options", + name: "empty options", header: "00-4bf92f3577b34da6a3ce929d0e0e4736-00f067aa0ba902b7-", wantSc: trace.SpanContext{}, wantOk: false, From d5b2c1264a6e63d82d7b9527b34c0b36206627cb Mon Sep 17 00:00:00 2001 From: JBD Date: Thu, 27 Sep 2018 15:28:52 -0700 Subject: [PATCH 079/212] Fix Honeycomb capitalization (#936) --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index d3fbc32b8..e3d3770ce 100644 --- a/README.md +++ b/README.md @@ -56,7 +56,7 @@ can implement their own exporters by implementing the exporter interfaces * [AWS X-Ray][exporter-xray] for traces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats -* [HoneyComb][exporter-honeycomb] for traces +* [Honeycomb][exporter-honeycomb] for traces ## Overview From c20af95cab3c2d48b965ad7d5aea0b3185f06af4 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 3 Oct 2018 10:49:40 -0700 Subject: [PATCH 080/212] Don't rely on global trace.ApplyConfig in tests (#938) Some tests are relying on previous tests changing the global sampler leading to random failures when tests are run in isolation. Set an explicit sampler rather than relying on globals. --- plugin/ochttp/propagation_test.go | 3 +-- .../span_annotating_client_trace_test.go | 9 +++++-- plugin/ochttp/trace_test.go | 24 ++++++++++--------- 3 files changed, 21 insertions(+), 15 deletions(-) diff --git a/plugin/ochttp/propagation_test.go b/plugin/ochttp/propagation_test.go index f4645db39..9ebfd89f4 100644 --- a/plugin/ochttp/propagation_test.go +++ b/plugin/ochttp/propagation_test.go @@ -36,8 +36,7 @@ func TestRoundTripAllFormats(t *testing.T) { } ctx := context.Background() - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - ctx, span := trace.StartSpan(ctx, "test") + ctx, span := trace.StartSpan(ctx, "test", trace.WithSampler(trace.AlwaysSample())) sc := span.SpanContext() wantStr := fmt.Sprintf("trace_id=%x, span_id=%x, options=%d", sc.TraceID, sc.SpanID, sc.TraceOptions) defer span.End() diff --git a/plugin/ochttp/span_annotating_client_trace_test.go b/plugin/ochttp/span_annotating_client_trace_test.go index 9f0da0c51..3336d7ec9 100644 --- a/plugin/ochttp/span_annotating_client_trace_test.go +++ b/plugin/ochttp/span_annotating_client_trace_test.go @@ -36,7 +36,12 @@ func TestSpanAnnotatingClientTrace(t *testing.T) { trace.RegisterExporter(recorder) - tr := ochttp.Transport{NewClientTrace: ochttp.NewSpanAnnotatingClientTrace} + tr := ochttp.Transport{ + NewClientTrace: ochttp.NewSpanAnnotatingClientTrace, + StartOptions: trace.StartOptions{ + Sampler: trace.AlwaysSample(), + }, + } req, err := http.NewRequest("POST", server.URL, strings.NewReader("req-body")) if err != nil { @@ -55,7 +60,7 @@ func TestSpanAnnotatingClientTrace(t *testing.T) { } if got, want := len(recorder.spans), 1; got != want { - t.Errorf("span count=%d; want=%d", got, want) + t.Fatalf("span count=%d; want=%d", got, want) } var annotations []string diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index a14c866a7..ea9b77c05 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -200,8 +200,6 @@ func (c *collector) ExportSpan(s *trace.SpanData) { } func TestEndToEnd(t *testing.T) { - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - tc := []struct { name string handler *Handler @@ -246,12 +244,10 @@ func TestEndToEnd(t *testing.T) { // Start the server. serverDone := make(chan struct{}) serverReturn := make(chan time.Time) + tt.handler.StartOptions.Sampler = trace.AlwaysSample() url := serveHTTP(tt.handler, serverDone, serverReturn) - // Start a root Span in the client. - ctx, root := trace.StartSpan( - context.Background(), - "top-level") + ctx := context.Background() // Make the request. req, err := http.NewRequest( http.MethodPost, @@ -261,7 +257,11 @@ func TestEndToEnd(t *testing.T) { t.Fatal(err) } req = req.WithContext(ctx) - resp, err := tt.transport.RoundTrip(req) + tt.transport.StartOptions.Sampler = trace.AlwaysSample() + c := &http.Client{ + Transport: tt.transport, + } + resp, err := c.Do(req) if err != nil { t.Fatal(err) } @@ -328,9 +328,6 @@ func TestEndToEnd(t *testing.T) { t.Errorf("len(server.Links) = %d; want %d", got, want) } else { link := server.Links[0] - if got, want := link.TraceID, root.SpanContext().TraceID; got != want { - t.Errorf("link.TraceID = %q; want %q", got, want) - } if got, want := link.Type, trace.LinkTypeChild; got != want { t.Errorf("link.Type = %v; want %v", got, want) } @@ -411,7 +408,12 @@ func TestFormatSpanName(t *testing.T) { defer server.Close() client := &http.Client{ - Transport: &Transport{FormatSpanName: formatSpanName}, + Transport: &Transport{ + FormatSpanName: formatSpanName, + StartOptions: trace.StartOptions{ + Sampler: trace.AlwaysSample(), + }, + }, } tests := []struct { From fb92c347f423413c4522dc97dcd0d464ab30e469 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 4 Oct 2018 15:52:02 -0700 Subject: [PATCH 081/212] Fix some doc lint warnings (#939) --- plugin/ochttp/span_annotating_client_trace.go | 2 ++ plugin/ochttp/stats.go | 1 + trace/tracestate/tracestate.go | 2 ++ 3 files changed, 5 insertions(+) diff --git a/plugin/ochttp/span_annotating_client_trace.go b/plugin/ochttp/span_annotating_client_trace.go index 7aa03cd5d..05c6c56cc 100644 --- a/plugin/ochttp/span_annotating_client_trace.go +++ b/plugin/ochttp/span_annotating_client_trace.go @@ -29,6 +29,8 @@ type spanAnnotator struct { // TODO: Remove NewSpanAnnotator at the next release. +// NewSpanAnnotator returns a httptrace.ClientTrace which annotates +// all emitted httptrace events on the provided Span. // Deprecated: Use NewSpanAnnotatingClientTrace instead func NewSpanAnnotator(r *http.Request, s *trace.Span) *httptrace.ClientTrace { return NewSpanAnnotatingClientTrace(r, s) diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index d99b98d12..46dcc8e57 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -32,6 +32,7 @@ var ( ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) ) +// Client measures supported for use in custom views. var ( ClientSentBytes = stats.Int64( "opencensus.io/http/client/sent_bytes", diff --git a/trace/tracestate/tracestate.go b/trace/tracestate/tracestate.go index 2345dd379..2d6c713eb 100644 --- a/trace/tracestate/tracestate.go +++ b/trace/tracestate/tracestate.go @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +// Package tracestate implements support for the Tracestate header of the +// W3C TraceContext propagation format. package tracestate import ( From f261ec04be89980b6d3eb26e798159db82f20599 Mon Sep 17 00:00:00 2001 From: JBD Date: Mon, 8 Oct 2018 10:58:52 -0700 Subject: [PATCH 082/212] Allow user to set start options perf HTTP request (#924) Fixes #869. --- plugin/ochttp/client.go | 12 +++++++++++- plugin/ochttp/server.go | 14 ++++++++++++-- 2 files changed, 23 insertions(+), 3 deletions(-) diff --git a/plugin/ochttp/client.go b/plugin/ochttp/client.go index 68faf24f5..da815b2a7 100644 --- a/plugin/ochttp/client.go +++ b/plugin/ochttp/client.go @@ -47,6 +47,10 @@ type Transport struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // NameFromRequest holds the function to use for generating the span name // from the information found in the outgoing HTTP Request. By default the // name equals the URL Path. @@ -75,11 +79,17 @@ func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { if spanNameFormatter == nil { spanNameFormatter = spanNameFromURL } + + startOpts := t.StartOptions + if t.GetStartOptions != nil { + startOpts = t.GetStartOptions(req) + } + rt = &traceTransport{ base: rt, format: format, startOptions: trace.StartOptions{ - Sampler: t.StartOptions.Sampler, + Sampler: startOpts.Sampler, SpanKind: trace.SpanKindClient, }, formatSpanName: spanNameFormatter, diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 492b74b9e..ff72de97a 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -56,6 +56,10 @@ type Handler struct { // for spans started by this transport. StartOptions trace.StartOptions + // GetStartOptions allows to set start options per request. If set, + // StartOptions is going to be ignored. + GetStartOptions func(*http.Request) trace.StartOptions + // IsPublicEndpoint should be set to true for publicly accessible HTTP(S) // servers. If true, any trace metadata set on the incoming request will // be added as a linked trace instead of being added as a parent of the @@ -93,15 +97,21 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ name = h.FormatSpanName(r) } ctx := r.Context() + + startOpts := h.StartOptions + if h.GetStartOptions != nil { + startOpts = h.GetStartOptions(r) + } + var span *trace.Span sc, ok := h.extractSpanContext(r) if ok && !h.IsPublicEndpoint { ctx, span = trace.StartSpanWithRemoteParent(ctx, name, sc, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer)) } else { ctx, span = trace.StartSpan(ctx, name, - trace.WithSampler(h.StartOptions.Sampler), + trace.WithSampler(startOpts.Sampler), trace.WithSpanKind(trace.SpanKindServer), ) if ok { From b6be004b5cc2139ee0d2ec8444bd22e73f6bf6d8 Mon Sep 17 00:00:00 2001 From: JBD Date: Mon, 8 Oct 2018 14:51:26 -0700 Subject: [PATCH 083/212] Always end execution tracer task (#943) Fixes #942. --- trace/trace.go | 9 ++++++--- trace/trace_test.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+), 3 deletions(-) diff --git a/trace/trace.go b/trace/trace.go index 77578a3c5..9e5e5f033 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -243,13 +243,16 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa // End ends the span. func (s *Span) End() { + if s == nil { + return + } + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } if !s.IsRecordingEvents() { return } s.endOnce.Do(func() { - if s.executionTracerTaskEnd != nil { - s.executionTracerTaskEnd() - } exp, _ := exporters.Load().(exportersMap) mustExport := s.spanContext.IsSampled() && len(exp) > 0 if s.spanStore != nil || mustExport { diff --git a/trace/trace_test.go b/trace/trace_test.go index 852b43301..0c25cc7f3 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -18,6 +18,7 @@ import ( "context" "fmt" "reflect" + "sync/atomic" "testing" "time" @@ -675,3 +676,39 @@ func TestStartSpanAfterEnd(t *testing.T) { t.Errorf("span-2.ParentSpanID=%q; want %q (span1.SpanID)", got, want) } } + +func TestNilSpanEnd(t *testing.T) { + var span *Span + span.End() +} + +func TestExecutionTracerTaskEnd(t *testing.T) { + var n uint64 + executionTracerTaskEnd := func() { + atomic.AddUint64(&n, 1) + } + + var spans []*Span + _, span := StartSpan(context.Background(), "foo", WithSampler(NeverSample())) + span.executionTracerTaskEnd = executionTracerTaskEnd + spans = append(spans, span) // never sample + + _, span = StartSpanWithRemoteParent(context.Background(), "foo", SpanContext{ + TraceID: TraceID{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, + SpanID: SpanID{0, 1, 2, 3, 4, 5, 6, 7}, + TraceOptions: 0, + }) + span.executionTracerTaskEnd = executionTracerTaskEnd + spans = append(spans, span) // parent not sampled + + _, span = StartSpan(context.Background(), "foo", WithSampler(AlwaysSample())) + span.executionTracerTaskEnd = executionTracerTaskEnd + spans = append(spans, span) // always sample + + for _, span := range spans { + span.End() + } + if got, want := n, uint64(len(spans)); got != want { + t.Fatalf("Execution tracer task ended for %v spans; want %v", got, want) + } +} From ae36bd8445ff9cde752c94cf0d4f3f9dced1facc Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 9 Oct 2018 12:06:01 -0400 Subject: [PATCH 084/212] Exemplar implementation (#917) See: stats/Exemplars.md in opencensus-specs Fixes: #873 --- exemplar/exemplar.go | 78 ++++++++++++++++ go.sum | 1 + plugin/ocgrpc/server_stats_handler_test.go | 4 - stats/doc.go | 38 +++++--- stats/internal/record.go | 2 +- stats/record.go | 3 +- stats/view/aggregation_data.go | 92 ++++++++++++------- stats/view/aggregation_data_test.go | 84 +++++++++++++++++ stats/view/benchmark_test.go | 2 + stats/view/collector.go | 6 +- stats/view/view.go | 6 +- stats/view/view_test.go | 33 ++++--- stats/view/worker.go | 8 +- stats/view/worker_commands.go | 15 +++- tag/context.go | 28 +++++- tag/context_test.go | 44 +++++++++ trace/exemplar.go | 43 +++++++++ trace/exemplar_test.go | 100 +++++++++++++++++++++ 18 files changed, 514 insertions(+), 73 deletions(-) create mode 100644 exemplar/exemplar.go create mode 100644 tag/context_test.go create mode 100644 trace/exemplar.go create mode 100644 trace/exemplar_test.go diff --git a/exemplar/exemplar.go b/exemplar/exemplar.go new file mode 100644 index 000000000..e676df837 --- /dev/null +++ b/exemplar/exemplar.go @@ -0,0 +1,78 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar implements support for exemplars. Exemplars are additional +// data associated with each measurement. +// +// Their purpose it to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +package exemplar + +import ( + "context" + "time" +) + +const ( + KeyTraceID = "trace_id" + KeySpanID = "span_id" + KeyPrefixTag = "tag:" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +// The map should only be mutated from AttachmentExtractor functions. +type Attachments map[string]string + +// AttachmentExtractor is a function capable of extracting exemplar attachments +// from the context used to record measurements. +// The map passed to the function should be mutated and returned. It will +// initially be nil: the first AttachmentExtractor that would like to add keys to the +// map is responsible for initializing it. +type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments + +var extractors []AttachmentExtractor + +// RegisterAttachmentExtractor registers the given extractor associated with the exemplar +// type name. +// +// Extractors will be used to attempt to extract exemplars from the context +// associated with each recorded measurement. +// +// Packages that support exemplars should register their extractor functions on +// initialization. +// +// RegisterAttachmentExtractor should not be called after any measurements have +// been recorded. +func RegisterAttachmentExtractor(e AttachmentExtractor) { + extractors = append(extractors, e) +} + +// NewFromContext extracts exemplars from the given context. +// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an +// unspecified order to add attachments to the exemplar. +func AttachmentsFromContext(ctx context.Context) Attachments { + var a Attachments + for _, extractor := range extractors { + a = extractor(ctx, a) + } + return a +} diff --git a/go.sum b/go.sum index 6765bc031..3e0bab884 100644 --- a/go.sum +++ b/go.sum @@ -31,6 +31,7 @@ golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index 3837758ad..1b96edf88 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -324,10 +324,6 @@ func TestServerDefaultCollections(t *testing.T) { } } -func newCountData(v int) *view.CountData { - return &view.CountData{Value: int64(v)} -} - func newDistributionData(countPerBucket []int64, count int64, min, max, mean, sumOfSquaredDev float64) *view.DistributionData { return &view.DistributionData{ Count: count, diff --git a/stats/doc.go b/stats/doc.go index 7a8a62c14..00d473ee0 100644 --- a/stats/doc.go +++ b/stats/doc.go @@ -21,35 +21,49 @@ aggregate the collected data, and export the aggregated data. Measures -A measure represents a type of metric to be tracked and recorded. +A measure represents a type of data point to be tracked and recorded. For example, latency, request Mb/s, and response Mb/s are measures to collect from a server. -Each measure needs to be registered before being used. Measure -constructors such as Int64 and Float64 automatically +Measure constructors such as Int64 and Float64 automatically register the measure by the given name. Each registered measure needs to be unique by name. Measures also have a description and a unit. -Libraries can define and export measures for their end users to -create views and collect instrumentation data. +Libraries can define and export measures. Application authors can then +create views and collect and break down measures by the tags they are +interested in. Recording measurements Measurement is a data point to be collected for a measure. For example, for a latency (ms) measure, 100 is a measurement that represents a 100ms -latency event. Users collect data points on the existing measures with +latency event. Measurements are created from measures with the current context. Tags from the current context are recorded with the measurements if they are any. -Recorded measurements are dropped immediately if user is not aggregating -them via views. Users don't necessarily need to conditionally enable/disable +Recorded measurements are dropped immediately if no views are registered for them. +There is usually no need to conditionally enable and disable recording to reduce cost. Recording of measurements is cheap. -Libraries can always record measurements, and end-users can later decide +Libraries can always record measurements, and applications can later decide on which measurements they want to collect by registering views. This allows libraries to turn on the instrumentation by default. + +Exemplars + +For a given recorded measurement, the associated exemplar is a diagnostic map +that gives more information about the measurement. + +When aggregated using a Distribution aggregation, an exemplar is kept for each +bucket in the Distribution. This allows you to easily find an example of a +measurement that fell into each bucket. + +For example, if you also use the OpenCensus trace package and you +record a measurement with a context that contains a sampled trace span, +then the trace span will be added to the exemplar associated with the measurement. + +When exported to a supporting back end, you should be able to easily navigate +to example traces that fell into each bucket in the Distribution. + */ package stats // import "go.opencensus.io/stats" - -// TODO(acetechnologist): Add a link to the language independent OpenCensus -// spec when it is available. diff --git a/stats/internal/record.go b/stats/internal/record.go index 6341eb2ad..ed5455205 100644 --- a/stats/internal/record.go +++ b/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(*tag.Map, interface{}) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/stats/record.go b/stats/record.go index e489009cb..0aced02c3 100644 --- a/stats/record.go +++ b/stats/record.go @@ -18,6 +18,7 @@ package stats import ( "context" + "go.opencensus.io/exemplar" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -50,7 +51,7 @@ func Record(ctx context.Context, ms ...Measurement) { if !record { return } - recorder(tag.FromContext(ctx), ms) + recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) } // RecordWithTags records one or multiple measurements at once. diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index 88c500bff..960b94601 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -17,6 +17,8 @@ package view import ( "math" + + "go.opencensus.io/exemplar" ) // AggregationData represents an aggregated value from a collection. @@ -24,7 +26,7 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64) + addSample(e *exemplar.Exemplar) clone() AggregationData equal(other AggregationData) bool } @@ -41,7 +43,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(v float64) { +func (a *CountData) addSample(_ *exemplar.Exemplar) { a.Value = a.Value + 1 } @@ -68,8 +70,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(f float64) { - a.Value += f +func (a *SumData) addSample(e *exemplar.Exemplar) { + a.Value += e.Value } func (a *SumData) clone() AggregationData { @@ -88,22 +90,30 @@ func (a *SumData) equal(other AggregationData) bool { // Distribution aggregation. // // Most users won't directly access distribution data. +// +// For a distribution with N bounds, the associated DistributionData will have +// N+1 buckets. type DistributionData struct { - Count int64 // number of data points aggregated - Min float64 // minimum value in the distribution - Max float64 // max value in the distribution - Mean float64 // mean of the distribution - SumOfSquaredDev float64 // sum of the squared deviation from the mean - CountPerBucket []int64 // number of occurrences per bucket - bounds []float64 // histogram distribution of the values + Count int64 // number of data points aggregated + Min float64 // minimum value in the distribution + Max float64 // max value in the distribution + Mean float64 // mean of the distribution + SumOfSquaredDev float64 // sum of the squared deviation from the mean + CountPerBucket []int64 // number of occurrences per bucket + // ExemplarsPerBucket is slice the same length as CountPerBucket containing + // an exemplar for the associated bucket, or nil. + ExemplarsPerBucket []*exemplar.Exemplar + bounds []float64 // histogram distribution of the values } func newDistributionData(bounds []float64) *DistributionData { + bucketCount := len(bounds) + 1 return &DistributionData{ - CountPerBucket: make([]int64, len(bounds)+1), - bounds: bounds, - Min: math.MaxFloat64, - Max: math.SmallestNonzeroFloat64, + CountPerBucket: make([]int64, bucketCount), + ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), + bounds: bounds, + Min: math.MaxFloat64, + Max: math.SmallestNonzeroFloat64, } } @@ -119,7 +129,8 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(f float64) { +func (a *DistributionData) addSample(e *exemplar.Exemplar) { + f := e.Value if f < a.Min { a.Min = f } @@ -127,7 +138,7 @@ func (a *DistributionData) addSample(f float64) { a.Max = f } a.Count++ - a.incrementBucketCount(f) + a.addToBucket(e) if a.Count == 1 { a.Mean = f @@ -139,26 +150,43 @@ func (a *DistributionData) addSample(f float64) { a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) } -func (a *DistributionData) incrementBucketCount(f float64) { - if len(a.bounds) == 0 { - a.CountPerBucket[0]++ - return - } - +func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { + var count *int64 + var ex **exemplar.Exemplar for i, b := range a.bounds { - if f < b { - a.CountPerBucket[i]++ - return + if e.Value < b { + count = &a.CountPerBucket[i] + ex = &a.ExemplarsPerBucket[i] + break } } - a.CountPerBucket[len(a.bounds)]++ + if count == nil { + count = &a.CountPerBucket[len(a.bounds)] + ex = &a.ExemplarsPerBucket[len(a.bounds)] + } + *count++ + *ex = maybeRetainExemplar(*ex, e) +} + +func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { + if old == nil { + return cur + } + + // Heuristic to pick the "better" exemplar: first keep the one with a + // sampled trace attachment, if neither have a trace attachment, pick the + // one with more attachments. + _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] + if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { + return cur + } + return old } func (a *DistributionData) clone() AggregationData { - counts := make([]int64, len(a.CountPerBucket)) - copy(counts, a.CountPerBucket) c := *a - c.CountPerBucket = counts + c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) + c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -190,8 +218,8 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64) { - l.Value = v +func (l *LastValueData) addSample(e *exemplar.Exemplar) { + l.Value = e.Value } func (l *LastValueData) clone() AggregationData { diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index 359ffa656..9b12b8537 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -18,6 +18,11 @@ package view import ( "reflect" "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go.opencensus.io/exemplar" ) func TestDataClone(t *testing.T) { @@ -59,3 +64,82 @@ func TestDataClone(t *testing.T) { }) } } + +func TestDistributionData_addSample(t *testing.T) { + dd := newDistributionData([]float64{0, 1, 2}) + t1, _ := time.Parse("Mon Jan 2 15:04:05 -0700 MST 2006", "Mon Jan 2 15:04:05 -0700 MST 2006") + e1 := &exemplar.Exemplar{ + Attachments: exemplar.Attachments{ + "tag:X": "Y", + "tag:A": "B", + }, + Timestamp: t1, + Value: 0.5, + } + dd.addSample(e1) + + want := &DistributionData{ + Count: 1, + CountPerBucket: []int64{0, 1, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{nil, e1, nil, nil}, + Max: 0.5, + Min: 0.5, + Mean: 0.5, + SumOfSquaredDev: 0, + } + if diff := cmpDD(dd, want); diff != "" { + t.Fatalf("Unexpected DistributionData -got +want: %s", diff) + } + + t2 := t1.Add(time.Microsecond) + e2 := &exemplar.Exemplar{ + Attachments: exemplar.Attachments{ + "tag:X": "Y", + }, + Timestamp: t2, + Value: 0.7, + } + dd.addSample(e2) + + // Previous exemplar should be preserved, since it has more annotations. + want = &DistributionData{ + Count: 2, + CountPerBucket: []int64{0, 2, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{nil, e1, nil, nil}, + Max: 0.7, + Min: 0.5, + Mean: 0.6, + SumOfSquaredDev: 0, + } + if diff := cmpDD(dd, want); diff != "" { + t.Fatalf("Unexpected DistributionData -got +want: %s", diff) + } + + t3 := t2.Add(time.Microsecond) + e3 := &exemplar.Exemplar{ + Attachments: exemplar.Attachments{ + exemplar.KeyTraceID: "abcd", + }, + Timestamp: t3, + Value: 0.2, + } + dd.addSample(e3) + + // Exemplar should be replaced since it has a trace_id. + want = &DistributionData{ + Count: 3, + CountPerBucket: []int64{0, 3, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{nil, e3, nil, nil}, + Max: 0.7, + Min: 0.2, + Mean: 0.4666666666666667, + SumOfSquaredDev: 0, + } + if diff := cmpDD(dd, want); diff != "" { + t.Fatalf("Unexpected DistributionData -got +want: %s", diff) + } +} + +func cmpDD(got, want *DistributionData) string { + return cmp.Diff(got, want, cmpopts.IgnoreFields(DistributionData{}, "SumOfSquaredDev"), cmpopts.IgnoreUnexported(DistributionData{})) +} diff --git a/stats/view/benchmark_test.go b/stats/view/benchmark_test.go index f5d0efa02..0f195d43b 100644 --- a/stats/view/benchmark_test.go +++ b/stats/view/benchmark_test.go @@ -19,6 +19,7 @@ import ( "context" "fmt" "testing" + "time" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -84,6 +85,7 @@ func BenchmarkRecordReqCommand(b *testing.B) { m.M(1), }, tm: tag.FromContext(ctxs[i%len(ctxs)]), + t: time.Now(), } record.handleCommand(w) } diff --git a/stats/view/collector.go b/stats/view/collector.go index 250395db2..32415d485 100644 --- a/stats/view/collector.go +++ b/stats/view/collector.go @@ -18,6 +18,8 @@ package view import ( "sort" + "go.opencensus.io/exemplar" + "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" ) @@ -31,13 +33,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64) { +func (c *collector) addSample(s string, e *exemplar.Exemplar) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v) + aggregator.addSample(e) } // collectRows returns a snapshot of the collected Row values. diff --git a/stats/view/view.go b/stats/view/view.go index 22323e2c5..c2a08af67 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -23,6 +23,8 @@ import ( "sync/atomic" "time" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -127,12 +129,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val) + v.collector.addSample(sig, e) } // A Data is a set of rows about usage of the single measure associated diff --git a/stats/view/view_test.go b/stats/view/view_test.go index c99927779..445e56a1e 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -19,6 +19,8 @@ import ( "context" "testing" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -65,7 +67,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, }, @@ -80,13 +82,13 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, }, @@ -104,25 +106,25 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 other"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &DistributionData{ - 1, 5, 5, 5, 0, []int64{0, 1}, []float64{2}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, }, @@ -142,19 +144,19 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}}, &DistributionData{ - 2, 1, 5, 3, 8, []int64{1, 1}, []float64{2}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is another very long value key"}}, &DistributionData{ - 1, 1, 1, 1, 0, []int64{1, 0}, []float64{2}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}, {Key: k2, Value: "v2 is a very long value key"}}, &DistributionData{ - 4, 1, 5, 3, 2.66666666666667 * 3, []int64{1, 3}, []float64{2}, + Count: 4, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 2.66666666666667 * 3, CountPerBucket: []int64{1, 3}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, }, }, }, @@ -173,7 +175,11 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tc.label, err) } - view.addSample(tag.FromContext(ctx), r.f) + e := &exemplar.Exemplar{ + Value: r.f, + Attachments: exemplar.AttachmentsFromContext(ctx), + } + view.addSample(tag.FromContext(ctx), e) } gotRows := view.collectedRows() @@ -289,7 +295,10 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tt.label, err) } - view.addSample(tag.FromContext(ctx), r.f) + e := &exemplar.Exemplar{ + Value: r.f, + } + view.addSample(tag.FromContext(ctx), e) } gotRows := view.collectedRows() diff --git a/stats/view/worker.go b/stats/view/worker.go index 9255d27d2..63b0ee3cc 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -107,10 +107,12 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}) { +func record(tags *tag.Map, ms interface{}, attachments map[string]string) { req := &recordReq{ - tm: tags, - ms: ms.([]stats.Measurement), + tm: tags, + ms: ms.([]stats.Measurement), + attachments: attachments, + t: time.Now(), } defaultWorker.c <- req } diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index 06c3c5464..b38f26f42 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -21,6 +21,8 @@ import ( "strings" "time" + "go.opencensus.io/exemplar" + "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -140,8 +142,10 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { // recordReq is the command to record data related to multiple measures // at once. type recordReq struct { - tm *tag.Map - ms []stats.Measurement + tm *tag.Map + ms []stats.Measurement + attachments map[string]string + t time.Time } func (cmd *recordReq) handleCommand(w *worker) { @@ -151,7 +155,12 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value()) + e := &exemplar.Exemplar{ + Value: m.Value(), + Timestamp: cmd.t, + Attachments: cmd.attachments, + } + v.addSample(cmd.tm, e) } } } diff --git a/tag/context.go b/tag/context.go index ed528bcb3..dcc13f498 100644 --- a/tag/context.go +++ b/tag/context.go @@ -15,7 +15,11 @@ package tag -import "context" +import ( + "context" + + "go.opencensus.io/exemplar" +) // FromContext returns the tag map stored in the context. func FromContext(ctx context.Context) *Map { @@ -39,3 +43,25 @@ func NewContext(ctx context.Context, m *Map) context.Context { type ctxKey struct{} var mapCtxKey = ctxKey{} + +func init() { + exemplar.RegisterAttachmentExtractor(extractTagsAttachments) +} + +func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + m := FromContext(ctx) + if m == nil { + return a + } + if len(m.m) == 0 { + return a + } + if a == nil { + a = make(map[string]string) + } + + for k, v := range m.m { + a[exemplar.KeyPrefixTag+k.Name()] = v + } + return a +} diff --git a/tag/context_test.go b/tag/context_test.go new file mode 100644 index 000000000..e85b1c40c --- /dev/null +++ b/tag/context_test.go @@ -0,0 +1,44 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +import ( + "context" + "testing" +) + +func TestExtractTagsAttachment(t *testing.T) { + // We can't depend on the stats of view package without creating a + // dependency cycle. + + var m map[string]string + ctx := context.Background() + + res := extractTagsAttachments(ctx, m) + if res != nil { + t.Fatalf("res = %v; want nil", res) + } + + k, _ := NewKey("test") + ctx, _ = New(ctx, Insert(k, "test123")) + res = extractTagsAttachments(ctx, m) + if res == nil { + t.Fatal("res = nil") + } + if got, want := res["tag:test"], "test123"; got != want { + t.Fatalf("res[Tags:test] = %v; want %v", got, want) + } +} diff --git a/trace/exemplar.go b/trace/exemplar.go new file mode 100644 index 000000000..416d80590 --- /dev/null +++ b/trace/exemplar.go @@ -0,0 +1,43 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "context" + "encoding/hex" + + "go.opencensus.io/exemplar" +) + +func init() { + exemplar.RegisterAttachmentExtractor(attachSpanContext) +} + +func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { + span := FromContext(ctx) + if span == nil { + return a + } + sc := span.SpanContext() + if !sc.IsSampled() { + return a + } + if a == nil { + a = make(exemplar.Attachments) + } + a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) + a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) + return a +} diff --git a/trace/exemplar_test.go b/trace/exemplar_test.go new file mode 100644 index 000000000..de27631f9 --- /dev/null +++ b/trace/exemplar_test.go @@ -0,0 +1,100 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace_test + +import ( + "context" + "testing" + + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/trace" +) + +func TestTraceExemplar(t *testing.T) { + m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) + v := &view.View{ + Measure: m, + Aggregation: view.Distribution(0, 1, 2, 3), + } + view.Register(v) + ctx := context.Background() + ctx, span := trace.StartSpan(ctx, t.Name(), trace.WithSampler(trace.AlwaysSample())) + stats.Record(ctx, m.M(1.5)) + span.End() + + rows, err := view.RetrieveData(v.Name) + if err != nil { + t.Fatal(err) + } + if len(rows) == 0 { + t.Fatal("len(rows) = 0; want > 0") + } + dd := rows[0].Data.(*view.DistributionData) + if got := len(dd.ExemplarsPerBucket); got < 3 { + t.Fatalf("len(dd.ExemplarsPerBucket) = %d; want >= 2", got) + } + exemplar := dd.ExemplarsPerBucket[2] + if exemplar == nil { + t.Fatal("Expected exemplar") + } + if got, want := exemplar.Value, 1.5; got != want { + t.Fatalf("exemplar.Value = %v; got %v", got, want) + } + if _, ok := exemplar.Attachments["trace_id"]; !ok { + t.Fatalf("exemplar.Attachments = %v; want trace_id key", exemplar.Attachments) + } + if _, ok := exemplar.Attachments["span_id"]; !ok { + t.Fatalf("exemplar.Attachments = %v; want span_id key", exemplar.Attachments) + } +} + +func TestTraceExemplar_notSampled(t *testing.T) { + m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) + v := &view.View{ + Measure: m, + Aggregation: view.Distribution(0, 1, 2, 3), + } + view.Register(v) + ctx := context.Background() + ctx, span := trace.StartSpan(ctx, t.Name(), trace.WithSampler(trace.NeverSample())) + stats.Record(ctx, m.M(1.5)) + span.End() + + rows, err := view.RetrieveData(v.Name) + if err != nil { + t.Fatal(err) + } + if len(rows) == 0 { + t.Fatal("len(rows) = 0; want > 0") + } + dd := rows[0].Data.(*view.DistributionData) + if got := len(dd.ExemplarsPerBucket); got < 3 { + t.Fatalf("len(buckets) = %d; want >= 2", got) + } + exemplar := dd.ExemplarsPerBucket[2] + if exemplar == nil { + t.Fatal("Expected exemplar") + } + if got, want := exemplar.Value, 1.5; got != want { + t.Fatalf("exemplar.Value = %v; got %v", got, want) + } + if _, ok := exemplar.Attachments["trace_id"]; ok { + t.Fatalf("exemplar.Attachments = %v; want no trace_id", exemplar.Attachments) + } + if _, ok := exemplar.Attachments["span_id"]; ok { + t.Fatalf("exemplar.Attachments = %v; want span_id key", exemplar.Attachments) + } +} From 6d8bb17cc31254e34ffe1b5e2a10abe839139feb Mon Sep 17 00:00:00 2001 From: Philippe MARTIN Date: Mon, 15 Oct 2018 18:36:07 +0200 Subject: [PATCH 085/212] Correct some typos on README (#947) --- README.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/README.md b/README.md index e3d3770ce..97d66983d 100644 --- a/README.md +++ b/README.md @@ -7,7 +7,7 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. -Currently it consists of three major components: tags, stats, and tracing. +Currently it consists of three major components: tags, stats and tracing. ## Installation @@ -38,7 +38,7 @@ integration with your RPC framework: * [Redis goredis/redis](https://godoc.org/github.com/orijtech/redis) * [Memcache](https://godoc.org/github.com/orijtech/gomemcache) -If you're a framework not listed here, you could either implement your own middleware for your +If you're using a framework not listed here, you could either implement your own middleware for your framework or use [custom stats](#stats) and [spans](#spans) directly in your application. ## Exporters @@ -74,7 +74,7 @@ in the same process or can be encoded to be transmitted on the wire. Usually, th be handled by an integration plugin, e.g. `ocgrpc.ServerHandler` and `ocgrpc.ClientHandler` for gRPC. -Package tag allows adding or modifying tags in the current context. +Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go @@ -178,8 +178,8 @@ Spans can have parents or can be root spans if they don't have any parents. The current span is propagated in-process and across the network to allow associating new child spans with the parent. -In the same process, context.Context is used to propagate spans. -trace.StartSpan creates a new span as a root if the current context +In the same process, `context.Context` is used to propagate spans. +`trace.StartSpan` creates a new span as a root if the current context doesn't contain a span. Or, it creates a child of the span that is already in current context. The returned context can be used to keep propagating the newly created span in the current context. @@ -195,8 +195,8 @@ defer span.End() Across the network, OpenCensus provides different propagation methods for different protocols. -* gRPC integrations uses the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). -* HTTP integrations uses Zipkin's [B3](https://github.com/openzipkin/b3-propagation) +* gRPC integrations use the OpenCensus' [binary propagation format](https://godoc.org/go.opencensus.io/trace/propagation). +* HTTP integrations use Zipkin's [B3](https://github.com/openzipkin/b3-propagation) by default but can be configured to use a custom propagation method by setting another [propagation.HTTPFormat](https://godoc.org/go.opencensus.io/trace/propagation#HTTPFormat). From 1eb9a13c7dd02141e065a665f6bf5c99a090a16a Mon Sep 17 00:00:00 2001 From: Philippe MARTIN Date: Mon, 15 Oct 2018 20:34:46 +0200 Subject: [PATCH 086/212] set countFormatter param to int64 (#948) --- zpages/formatter_test.go | 5 ++++- zpages/templates.go | 22 +++++++++++++--------- 2 files changed, 17 insertions(+), 10 deletions(-) diff --git a/zpages/formatter_test.go b/zpages/formatter_test.go index 33e2b2ab9..ea27a2abf 100644 --- a/zpages/formatter_test.go +++ b/zpages/formatter_test.go @@ -19,7 +19,7 @@ import "testing" func TestCountFormatter(t *testing.T) { tests := []struct { - in int + in int64 want string }{ {-1, " "}, @@ -30,6 +30,9 @@ func TestCountFormatter(t *testing.T) { {1e6, "1.000 M "}, {1e9, "1.000 G "}, {1e8 + 2e9, "2.100 G "}, + {1e12, "1.000 T "}, + {1e15, "1.000 P "}, + {1e18, "1.000 E "}, } for _, tt := range tests { diff --git a/zpages/templates.go b/zpages/templates.go index 9c3b1fc0e..9746f5660 100644 --- a/zpages/templates.go +++ b/zpages/templates.go @@ -57,23 +57,27 @@ func parseTemplate(name string) *template.Template { return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) } -func countFormatter(num int) string { +func countFormatter(num int64) string { if num <= 0 { return " " } var floatVal float64 var suffix string - num64 := int64(num) - - if num64 >= 1e12 { - floatVal = float64(num64) / 1e12 + if num >= 1e18 { + floatVal = float64(num) / 1e18 + suffix = " E " + } else if num >= 1e15 { + floatVal = float64(num) / 1e15 + suffix = " P " + } else if num >= 1e12 { + floatVal = float64(num) / 1e12 suffix = " T " - } else if num64 >= 1e9 { - floatVal = float64(num64) / 1e9 + } else if num >= 1e9 { + floatVal = float64(num) / 1e9 suffix = " G " - } else if num64 >= 1e6 { - floatVal = float64(num64) / 1e6 + } else if num >= 1e6 { + floatVal = float64(num) / 1e6 suffix = " M " } From ac292c6ab0bf90d19f4d9993dbedc5e906e3e9bf Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sat, 20 Oct 2018 16:37:24 -0700 Subject: [PATCH 087/212] zpages: fix Snapshot int->*int64 changes + tests on count The signature for countFormatter's arguments was changed in PR #948 from int to int64 but unfortunately we forgot to update the int fields from int to the new type. Issue #948's recommendation was to use uint64 since that affords us more time to overflow e.g. even at 25QPs it'd take a couple of millenia to overflow with uint64. This change completes the change of types of the int variables but also adds tests that make use of the template functions and a snapshot and then compare the output or fail. Previously the tests complained but since they were HTTP tests not attached to an output buffer that could be inspected, the bug innocently crept away in standard output and the tests incorrectly passed as in: * https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/builds/19498802#L700 * https://travis-ci.org/census-instrumentation/opencensus-go/builds/441324783#L1183 This problem was reported by an OpenCensus-Service user in * https://github.com/census-instrumentation/opencensus-service/issues/123 Fixes #895 Fixes #951 --- zpages/formatter_test.go | 3 +- zpages/rpcz.go | 20 ++++---- zpages/rpcz_test.go | 2 +- zpages/templates.go | 2 +- zpages/templates_test.go | 99 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 zpages/templates_test.go diff --git a/zpages/formatter_test.go b/zpages/formatter_test.go index ea27a2abf..92f5a8f4d 100644 --- a/zpages/formatter_test.go +++ b/zpages/formatter_test.go @@ -19,10 +19,9 @@ import "testing" func TestCountFormatter(t *testing.T) { tests := []struct { - in int64 + in uint64 want string }{ - {-1, " "}, {0, " "}, {1, "1"}, {1024, "1024"}, diff --git a/zpages/rpcz.go b/zpages/rpcz.go index 30193d1db..dee28f982 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -170,9 +170,9 @@ type statSnapshot struct { // TODO: compute hour/minute values from cumulative Method string Received bool - CountMinute int - CountHour int - CountTotal int + CountMinute uint64 + CountHour uint64 + CountTotal uint64 AvgLatencyMinute time.Duration AvgLatencyHour time.Duration AvgLatencyTotal time.Duration @@ -185,9 +185,9 @@ type statSnapshot struct { OutputRateMinute float64 OutputRateHour float64 OutputRateTotal float64 - ErrorsMinute int - ErrorsHour int - ErrorsTotal int + ErrorsMinute uint64 + ErrorsHour uint64 + ErrorsTotal uint64 } type methodKey struct { @@ -267,7 +267,7 @@ func (s snapExporter) ExportView(vd *view.Data) { } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyClientStatus && tag.Value != "OK" { - s.ErrorsTotal += int(count) + s.ErrorsTotal += uint64(count) } } @@ -281,7 +281,7 @@ func (s snapExporter) ExportView(vd *view.Data) { s.InputRateTotal = computeRate(0, sum) case ocgrpc.ClientSentMessagesPerRPCView: - s.CountTotal = int(count) + s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ClientReceivedMessagesPerRPCView: @@ -294,7 +294,7 @@ func (s snapExporter) ExportView(vd *view.Data) { } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyServerStatus && tag.Value != "OK" { - s.ErrorsTotal += int(count) + s.ErrorsTotal += uint64(count) } } @@ -305,7 +305,7 @@ func (s snapExporter) ExportView(vd *view.Data) { s.OutputRateTotal = computeRate(0, sum) case ocgrpc.ServerReceivedMessagesPerRPCView: - s.CountTotal = int(count) + s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ServerSentMessagesPerRPCView: diff --git a/zpages/rpcz_test.go b/zpages/rpcz_test.go index 9764f6fb3..5a5f13b65 100644 --- a/zpages/rpcz_test.go +++ b/zpages/rpcz_test.go @@ -49,7 +49,7 @@ func TestRpcz(t *testing.T) { t.Fatal("Expected method stats not recorded") } - if got, want := snapshot.CountTotal, 1; got != want { + if got, want := snapshot.CountTotal, uint64(1); got != want { t.Errorf("snapshot.CountTotal = %d; want %d", got, want) } } diff --git a/zpages/templates.go b/zpages/templates.go index 9746f5660..6675b0ab0 100644 --- a/zpages/templates.go +++ b/zpages/templates.go @@ -57,7 +57,7 @@ func parseTemplate(name string) *template.Template { return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) } -func countFormatter(num int64) string { +func countFormatter(num uint64) string { if num <= 0 { return " " } diff --git a/zpages/templates_test.go b/zpages/templates_test.go new file mode 100644 index 000000000..e3adf6cf5 --- /dev/null +++ b/zpages/templates_test.go @@ -0,0 +1,99 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpages + +import ( + "bytes" + "html/template" + "testing" +) + +const tmplBody = ` + {{.Method}} + + {{.CountMinute|count}} + {{.CountHour|count}} + {{.CountTotal|count}} + {{.AvgLatencyMinute|ms}} + {{.AvgLatencyHour|ms}} + {{.AvgLatencyTotal|ms}} + {{.RPCRateMinute|rate}} + {{.RPCRateHour|rate}} + {{.RPCRateTotal|rate}} + {{.InputRateMinute|datarate}} + {{.InputRateHour|datarate}} + {{.InputRateTotal|datarate}} + {{.OutputRateMinute|datarate}} + {{.OutputRateHour|datarate}} + {{.OutputRateTotal|datarate}} + {{.ErrorsMinute|count}} + {{.ErrorsHour|count}} + {{.ErrorsTotal|count}} +` + +var tmpl = template.Must(template.New("countTest").Funcs(templateFunctions).Parse(tmplBody)) + +func TestTemplateFuncs(t *testing.T) { + buf := new(bytes.Buffer) + sshot := &statSnapshot{ + Method: "Foo", + CountMinute: 1e9, + CountHour: 5000, + CountTotal: 1e12, + AvgLatencyMinute: 10000, + AvgLatencyHour: 1000, + AvgLatencyTotal: 20000, + RPCRateMinute: 2000, + RPCRateHour: 5000, + RPCRateTotal: 75000, + InputRateMinute: 75000, + InputRateHour: 75000, + InputRateTotal: 75000, + OutputRateMinute: 75000, + OutputRateHour: 75000, + OutputRateTotal: 75000, + ErrorsMinute: 120000000, + ErrorsHour: 75000000, + ErrorsTotal: 7500000, + } + if err := tmpl.Execute(buf, sshot); err != nil { + t.Fatalf("Failed to execute template: %v", err) + } + want := ` + Foo + + 1.000 G + 5000 + 1.000 T + 0.010 + 0.001 + 0.020 + 2000.000 + 5000.000 + 75000.000 + 0.075 + 0.075 + 0.075 + 0.075 + 0.075 + 0.075 + 120.000 M + 75.000 M + 7.500 M +` + if g, w := buf.String(), want; g != w { + t.Errorf("Output mismatch:\nGot:\n\t%s\nWant:\n\t%s", g, w) + } +} From 486d66aed0ffe5d13ed9f0d59f96fa761cd0abcf Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sun, 21 Oct 2018 18:24:13 -0700 Subject: [PATCH 088/212] zpages: fix Snapshot int->*int64 changes + tests on count (#952) The signature for countFormatter's arguments was changed in PR #948 from int to int64 but unfortunately we forgot to update the int fields from int to the new type. Issue #948's recommendation was to use uint64 since that affords us more time to overflow e.g. even at 25QPs it'd take a couple of millenia to overflow with uint64. This change completes the change of types of the int variables but also adds tests that make use of the template functions and a snapshot and then compare the output or fail. Previously the tests complained but since they were HTTP tests not attached to an output buffer that could be inspected, the bug innocently crept away in standard output and the tests incorrectly passed as in: * https://ci.appveyor.com/project/opencensusgoteam/opencensus-go/builds/19498802#L700 * https://travis-ci.org/census-instrumentation/opencensus-go/builds/441324783#L1183 This problem was reported by an OpenCensus-Service user in * https://github.com/census-instrumentation/opencensus-service/issues/123 Fixes #895 Fixes #951 --- zpages/formatter_test.go | 3 +- zpages/rpcz.go | 20 ++++---- zpages/rpcz_test.go | 2 +- zpages/templates.go | 2 +- zpages/templates_test.go | 99 ++++++++++++++++++++++++++++++++++++++++ 5 files changed, 112 insertions(+), 14 deletions(-) create mode 100644 zpages/templates_test.go diff --git a/zpages/formatter_test.go b/zpages/formatter_test.go index ea27a2abf..92f5a8f4d 100644 --- a/zpages/formatter_test.go +++ b/zpages/formatter_test.go @@ -19,10 +19,9 @@ import "testing" func TestCountFormatter(t *testing.T) { tests := []struct { - in int64 + in uint64 want string }{ - {-1, " "}, {0, " "}, {1, "1"}, {1024, "1024"}, diff --git a/zpages/rpcz.go b/zpages/rpcz.go index 30193d1db..dee28f982 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -170,9 +170,9 @@ type statSnapshot struct { // TODO: compute hour/minute values from cumulative Method string Received bool - CountMinute int - CountHour int - CountTotal int + CountMinute uint64 + CountHour uint64 + CountTotal uint64 AvgLatencyMinute time.Duration AvgLatencyHour time.Duration AvgLatencyTotal time.Duration @@ -185,9 +185,9 @@ type statSnapshot struct { OutputRateMinute float64 OutputRateHour float64 OutputRateTotal float64 - ErrorsMinute int - ErrorsHour int - ErrorsTotal int + ErrorsMinute uint64 + ErrorsHour uint64 + ErrorsTotal uint64 } type methodKey struct { @@ -267,7 +267,7 @@ func (s snapExporter) ExportView(vd *view.Data) { } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyClientStatus && tag.Value != "OK" { - s.ErrorsTotal += int(count) + s.ErrorsTotal += uint64(count) } } @@ -281,7 +281,7 @@ func (s snapExporter) ExportView(vd *view.Data) { s.InputRateTotal = computeRate(0, sum) case ocgrpc.ClientSentMessagesPerRPCView: - s.CountTotal = int(count) + s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ClientReceivedMessagesPerRPCView: @@ -294,7 +294,7 @@ func (s snapExporter) ExportView(vd *view.Data) { } for _, tag := range row.Tags { if tag.Key == ocgrpc.KeyServerStatus && tag.Value != "OK" { - s.ErrorsTotal += int(count) + s.ErrorsTotal += uint64(count) } } @@ -305,7 +305,7 @@ func (s snapExporter) ExportView(vd *view.Data) { s.OutputRateTotal = computeRate(0, sum) case ocgrpc.ServerReceivedMessagesPerRPCView: - s.CountTotal = int(count) + s.CountTotal = uint64(count) s.RPCRateTotal = computeRate(0, count) case ocgrpc.ServerSentMessagesPerRPCView: diff --git a/zpages/rpcz_test.go b/zpages/rpcz_test.go index 9764f6fb3..5a5f13b65 100644 --- a/zpages/rpcz_test.go +++ b/zpages/rpcz_test.go @@ -49,7 +49,7 @@ func TestRpcz(t *testing.T) { t.Fatal("Expected method stats not recorded") } - if got, want := snapshot.CountTotal, 1; got != want { + if got, want := snapshot.CountTotal, uint64(1); got != want { t.Errorf("snapshot.CountTotal = %d; want %d", got, want) } } diff --git a/zpages/templates.go b/zpages/templates.go index 9746f5660..6675b0ab0 100644 --- a/zpages/templates.go +++ b/zpages/templates.go @@ -57,7 +57,7 @@ func parseTemplate(name string) *template.Template { return template.Must(template.New(name).Funcs(templateFunctions).Parse(string(text))) } -func countFormatter(num int64) string { +func countFormatter(num uint64) string { if num <= 0 { return " " } diff --git a/zpages/templates_test.go b/zpages/templates_test.go new file mode 100644 index 000000000..e3adf6cf5 --- /dev/null +++ b/zpages/templates_test.go @@ -0,0 +1,99 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package zpages + +import ( + "bytes" + "html/template" + "testing" +) + +const tmplBody = ` + {{.Method}} + + {{.CountMinute|count}} + {{.CountHour|count}} + {{.CountTotal|count}} + {{.AvgLatencyMinute|ms}} + {{.AvgLatencyHour|ms}} + {{.AvgLatencyTotal|ms}} + {{.RPCRateMinute|rate}} + {{.RPCRateHour|rate}} + {{.RPCRateTotal|rate}} + {{.InputRateMinute|datarate}} + {{.InputRateHour|datarate}} + {{.InputRateTotal|datarate}} + {{.OutputRateMinute|datarate}} + {{.OutputRateHour|datarate}} + {{.OutputRateTotal|datarate}} + {{.ErrorsMinute|count}} + {{.ErrorsHour|count}} + {{.ErrorsTotal|count}} +` + +var tmpl = template.Must(template.New("countTest").Funcs(templateFunctions).Parse(tmplBody)) + +func TestTemplateFuncs(t *testing.T) { + buf := new(bytes.Buffer) + sshot := &statSnapshot{ + Method: "Foo", + CountMinute: 1e9, + CountHour: 5000, + CountTotal: 1e12, + AvgLatencyMinute: 10000, + AvgLatencyHour: 1000, + AvgLatencyTotal: 20000, + RPCRateMinute: 2000, + RPCRateHour: 5000, + RPCRateTotal: 75000, + InputRateMinute: 75000, + InputRateHour: 75000, + InputRateTotal: 75000, + OutputRateMinute: 75000, + OutputRateHour: 75000, + OutputRateTotal: 75000, + ErrorsMinute: 120000000, + ErrorsHour: 75000000, + ErrorsTotal: 7500000, + } + if err := tmpl.Execute(buf, sshot); err != nil { + t.Fatalf("Failed to execute template: %v", err) + } + want := ` + Foo + + 1.000 G + 5000 + 1.000 T + 0.010 + 0.001 + 0.020 + 2000.000 + 5000.000 + 75000.000 + 0.075 + 0.075 + 0.075 + 0.075 + 0.075 + 0.075 + 120.000 M + 75.000 M + 7.500 M +` + if g, w := buf.String(), want; g != w { + t.Errorf("Output mismatch:\nGot:\n\t%s\nWant:\n\t%s", g, w) + } +} From 96e75b88df843315da521168a0e3b11792088728 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Mon, 22 Oct 2018 16:41:13 -0700 Subject: [PATCH 089/212] Bump version to 0.19.0 (#955) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index 62f03486a..7faf9e821 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.18.0" + return "0.19.0" } From 4f7fcb478fc0e34627b4da801e1502ee05a87127 Mon Sep 17 00:00:00 2001 From: Fabian Reinartz Date: Mon, 29 Oct 2018 16:37:51 +0100 Subject: [PATCH 090/212] Add resource package (#878) * Add resource package This package adds resources as a core concept to OpenCensus. Signed-off-by: Fabian Reinartz * Address comments Signed-off-by: Fabian Reinartz * Added and fixed tests Signed-off-by: Fabian Reinartz * Make Detectall unexported Signed-off-by: Fabian Reinartz * Address comments Signed-off-by: Fabian Reinartz * Rename Tags to Labels, expose envvar names Signed-off-by: Fabian Reinartz * Adjust label parsing to new syntax spec Signed-off-by: Fabian Reinartz * Fix replace error Signed-off-by: Fabian Reinartz * Address comments Signed-off-by: Fabian Reinartz * Address comments Signed-off-by: Fabian Reinartz --- resource/resource.go | 163 ++++++++++++++++++++++++++++++++++++++ resource/resource_test.go | 163 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 326 insertions(+) create mode 100644 resource/resource.go create mode 100644 resource/resource_test.go diff --git a/resource/resource.go b/resource/resource.go new file mode 100644 index 000000000..ec89b216c --- /dev/null +++ b/resource/resource.go @@ -0,0 +1,163 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides functionality for resource, which capture +// identifying information about the entities for which signals are exported. +package resource + +import ( + "context" + "fmt" + "os" + "regexp" + "sort" + "strconv" + "strings" +) + +const ( + EnvVarType = "OC_RESOURCE_TYPE" + EnvVarLabels = "OC_RESOURCE_LABELS" +) + +// Resource describes an entity about which identifying information and metadata is exposed. +// For example, a type "k8s.io/container" may hold labels describing the pod name and namespace. +type Resource struct { + Type string + Labels map[string]string +} + +// EncodeLabels encodes a labels map to a string as provided via the OC_RESOURCE_LABELS environment variable. +func EncodeLabels(labels map[string]string) string { + sortedKeys := make([]string, 0, len(labels)) + for k := range labels { + sortedKeys = append(sortedKeys, k) + } + sort.Strings(sortedKeys) + + s := "" + for i, k := range sortedKeys { + if i > 0 { + s += "," + } + s += k + "=" + strconv.Quote(labels[k]) + } + return s +} + +var labelRegex = regexp.MustCompile(`^\s*([[:ascii:]]{1,256}?)=("[[:ascii:]]{0,256}?")\s*,`) + +// DecodeLabels decodes a serialized label map as used in the OC_RESOURCE_LABELS variable. +// A list of labels of the form `="",="",...` is accepted. +// Domain names and paths are accepted as label keys. +// Most users will want to use FromEnv instead. +func DecodeLabels(s string) (map[string]string, error) { + m := map[string]string{} + // Ensure a trailing comma, which allows us to keep the regex simpler + s = strings.TrimRight(strings.TrimSpace(s), ",") + "," + + for len(s) > 0 { + match := labelRegex.FindStringSubmatch(s) + if len(match) == 0 { + return nil, fmt.Errorf("invalid label formatting, remainder: %s", s) + } + v := match[2] + if v == "" { + v = match[3] + } else { + var err error + if v, err = strconv.Unquote(v); err != nil { + return nil, fmt.Errorf("invalid label formatting, remainder: %s, err: %s", s, err) + } + } + m[match[1]] = v + + s = s[len(match[0]):] + } + return m, nil +} + +// FromEnv is a detector that loads resource information from the OC_RESOURCE_TYPE +// and OC_RESOURCE_labelS environment variables. +func FromEnv(context.Context) (*Resource, error) { + res := &Resource{ + Type: strings.TrimSpace(os.Getenv(EnvVarType)), + } + labels := strings.TrimSpace(os.Getenv(EnvVarLabels)) + if labels == "" { + return res, nil + } + var err error + if res.Labels, err = DecodeLabels(labels); err != nil { + return nil, err + } + return res, nil +} + +var _ Detector = FromEnv + +// merge resource information from b into a. In case of a collision, a takes precedence. +func merge(a, b *Resource) *Resource { + if a == nil { + return b + } + if b == nil { + return a + } + res := &Resource{ + Type: a.Type, + Labels: map[string]string{}, + } + if res.Type == "" { + res.Type = b.Type + } + for k, v := range b.Labels { + res.Labels[k] = v + } + // Labels from resource a overwrite labels from resource b. + for k, v := range a.Labels { + res.Labels[k] = v + } + return res +} + +// Detector attempts to detect resource information. +// If the detector cannot find resource information, the returned resource is nil but no +// error is returned. +// An error is only returned on unexpected failures. +type Detector func(context.Context) (*Resource, error) + +// MultiDetector returns a Detector that calls all input detectors in order and +// merges each result with the previous one. In case a type of label key is already set, +// the first set value is takes precedence. +// It returns on the first error that a sub-detector encounters. +func MultiDetector(detectors ...Detector) Detector { + return func(ctx context.Context) (*Resource, error) { + return detectAll(ctx, detectors...) + } +} + +// detectall calls all input detectors sequentially an merges each result with the previous one. +// It returns on the first error that a sub-detector encounters. +func detectAll(ctx context.Context, detectors ...Detector) (*Resource, error) { + var res *Resource + for _, d := range detectors { + r, err := d(ctx) + if err != nil { + return nil, err + } + res = merge(res, r) + } + return res, nil +} diff --git a/resource/resource_test.go b/resource/resource_test.go new file mode 100644 index 000000000..77fef040e --- /dev/null +++ b/resource/resource_test.go @@ -0,0 +1,163 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource + +import ( + "context" + "errors" + "fmt" + "reflect" + "testing" +) + +func TestMerge(t *testing.T) { + cases := []struct { + a, b, want *Resource + }{ + { + a: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1", "b": "2"}, + }, + b: &Resource{ + Type: "t2", + Labels: map[string]string{"a": "1", "b": "3", "c": "4"}, + }, + want: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1", "b": "2", "c": "4"}, + }, + }, + { + a: nil, + b: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1"}, + }, + want: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1"}, + }, + }, + { + a: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1"}, + }, + b: nil, + want: &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1"}, + }, + }, + } + for i, c := range cases { + t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { + res := merge(c.a, c.b) + if !reflect.DeepEqual(res, c.want) { + t.Fatalf("unwanted result: want %+v, got %+v", c.want, res) + } + }) + } +} + +func TestDecodeLabels(t *testing.T) { + cases := []struct { + encoded string + wantLabels map[string]string + wantFail bool + }{ + { + encoded: `example.org/test-1="test $ \"" , Abc="Def"`, + wantLabels: map[string]string{"example.org/test-1": "test $ \"", "Abc": "Def"}, + }, { + encoded: `single="key"`, + wantLabels: map[string]string{"single": "key"}, + }, + {encoded: `invalid-char-ü="test"`, wantFail: true}, + {encoded: `invalid-char="ü-test"`, wantFail: true}, + {encoded: `missing="trailing-quote`, wantFail: true}, + {encoded: `missing=leading-quote"`, wantFail: true}, + {encoded: `extra="chars", a`, wantFail: true}, + } + for i, c := range cases { + t.Run(fmt.Sprintf("case-%d", i), func(t *testing.T) { + res, err := DecodeLabels(c.encoded) + if err != nil && !c.wantFail { + t.Fatalf("unwanted error: %s", err) + } + if c.wantFail && err == nil { + t.Fatalf("wanted failure but got none, result: %v", res) + } + if !reflect.DeepEqual(res, c.wantLabels) { + t.Fatalf("wanted result %v, got %v", c.wantLabels, res) + } + }) + } +} + +func TestEncodeLabels(t *testing.T) { + got := EncodeLabels(map[string]string{ + "example.org/test-1": "test ¥ \"", + "un": "quøted", + "Abc": "Def", + }) + if want := `Abc="Def",example.org/test-1="test ¥ \"",un="quøted"`; got != want { + t.Fatalf("got %q, want %q", got, want) + } +} + +func TestMultiDetector(t *testing.T) { + got, err := MultiDetector( + func(context.Context) (*Resource, error) { + return &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1", "b": "2"}, + }, nil + }, + func(context.Context) (*Resource, error) { + return &Resource{ + Type: "t2", + Labels: map[string]string{"a": "11", "c": "3"}, + }, nil + }, + )(context.Background()) + if err != nil { + t.Fatalf("unexpected error: %s", err) + } + want := &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1", "b": "2", "c": "3"}, + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("unexpected resource: want %v, got %v", want, got) + } + + wantErr := errors.New("err1") + got, err = MultiDetector( + func(context.Context) (*Resource, error) { + return &Resource{ + Type: "t1", + Labels: map[string]string{"a": "1", "b": "2"}, + }, nil + }, + func(context.Context) (*Resource, error) { + return nil, wantErr + }, + )(context.Background()) + if err != wantErr { + t.Fatalf("unexpected error: want %v, got %v", wantErr, err) + } +} From 94fd1d08391b28f9c9f721b2a67fcf57bfd0a70e Mon Sep 17 00:00:00 2001 From: Johan Brandhorst Date: Sun, 4 Nov 2018 20:14:41 +0000 Subject: [PATCH 091/212] exporter/prometheus: provide ConstLabels (#962) Allows the user to configure labels consistent across all views for the exporter. Fixes #961 --- exporter/prometheus/prometheus.go | 9 +-- exporter/prometheus/prometheus_test.go | 91 ++++++++++++++++++++++++++ 2 files changed, 96 insertions(+), 4 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 50665dcb1..0266d9d7c 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -44,9 +44,10 @@ type Exporter struct { // Options contains options for configuring the exporter. type Options struct { - Namespace string - Registry *prometheus.Registry - OnError func(err error) + Namespace string + Registry *prometheus.Registry + OnError func(err error) + ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. } // NewExporter returns an exporter that exports stats to Prometheus. @@ -80,7 +81,7 @@ func (c *collector) registerViews(views ...*view.View) { viewName(c.opts.Namespace, view), view.Description, tagKeysToLabels(view.TagKeys), - nil, + c.opts.ConstLabels, ) c.registeredViewsMu.Lock() c.registeredViews[sig] = desc diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 69985209f..e6a0f4dbe 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -342,3 +342,94 @@ func TestCumulativenessFromHistograms(t *testing.T) { t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want) } } + +func TestConstLabelsIncluded(t *testing.T) { + constLabels := prometheus.Labels{ + "service": "spanner", + } + measureLabel, _ := tag.NewKey("method") + + exporter, err := NewExporter(Options{ + ConstLabels: constLabels, + }) + if err != nil { + t.Fatalf("failed to create prometheus exporter: %v", err) + } + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + + names := []string{"foo", "bar", "baz"} + + var measures mSlice + for _, name := range names { + measures.createAndAppend("tests/"+name, name, "") + } + + var vc vCreator + for _, m := range measures { + vc.createAndAppend(m.Name(), m.Description(), []tag.Key{measureLabel}, m, view.Count()) + } + + if err := view.Register(vc...); err != nil { + t.Fatalf("failed to create views: %v", err) + } + defer view.Unregister(vc...) + + view.SetReportingPeriod(time.Millisecond) + + ctx, _ := tag.New(context.Background(), tag.Upsert(measureLabel, "issue961")) + for _, m := range measures { + stats.Record(ctx, m.M(1)) + } + + srv := httptest.NewServer(exporter) + defer srv.Close() + + var i int + var output string + for { + time.Sleep(10 * time.Millisecond) + if i == 1000 { + t.Fatal("no output at /metrics (10s wait)") + } + i++ + + resp, err := http.Get(srv.URL) + if err != nil { + t.Fatalf("failed to get /metrics: %v", err) + } + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %v", err) + } + resp.Body.Close() + + output = string(body) + if output != "" { + break + } + } + + if strings.Contains(output, "collected before with the same name and label values") { + t.Fatal("metric name and labels being duplicated but must be unique") + } + + if strings.Contains(output, "error(s) occurred") { + t.Fatal("error reported by prometheus registry") + } + + want := `# HELP tests_bar bar +# TYPE tests_bar counter +tests_bar{method="issue961",service="spanner"} 1 +# HELP tests_baz baz +# TYPE tests_baz counter +tests_baz{method="issue961",service="spanner"} 1 +# HELP tests_foo foo +# TYPE tests_foo counter +tests_foo{method="issue961",service="spanner"} 1 +` + if output != want { + t.Fatal("output differed from expected") + } +} From 91a0276ece6ad4cbdc4b46116f88d2b47a5f58e5 Mon Sep 17 00:00:00 2001 From: Johan Brandhorst Date: Mon, 12 Nov 2018 17:26:09 +0000 Subject: [PATCH 092/212] Update README link to database/sql wrapper (#972) This was incorrectly pointing to @basvanbeek's repo, which has since been merged into opencensus-integrations. --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 97d66983d..b8a5107bf 100644 --- a/README.md +++ b/README.md @@ -29,7 +29,7 @@ integration with your RPC framework: * [net/http](https://godoc.org/go.opencensus.io/plugin/ochttp) * [gRPC](https://godoc.org/go.opencensus.io/plugin/ocgrpc) -* [database/sql](https://godoc.org/github.com/basvanbeek/ocsql) +* [database/sql](https://godoc.org/github.com/opencensus-integrations/ocsql) * [Go kit](https://godoc.org/github.com/go-kit/kit/tracing/opencensus) * [Groupcache](https://godoc.org/github.com/orijtech/groupcache) * [Caddy webserver](https://godoc.org/github.com/orijtech/caddy) From a91cf186985104201e26eb267eac146a531c321b Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 20 Nov 2018 12:29:42 -0800 Subject: [PATCH 093/212] Add metric API (#975) --- metric/doc.go | 16 +++ metric/metric.go | 55 +++++++++ metric/point.go | 239 +++++++++++++++++++++++++++++++++++++ metric/producer.go | 84 +++++++++++++ metric/producer_test.go | 43 +++++++ metric/type_string.go | 16 +++ metric/unit.go | 25 ++++ metric/valuetype_string.go | 16 +++ tag/map_codec.go | 3 + 9 files changed, 497 insertions(+) create mode 100644 metric/doc.go create mode 100644 metric/metric.go create mode 100644 metric/point.go create mode 100644 metric/producer.go create mode 100644 metric/producer_test.go create mode 100644 metric/type_string.go create mode 100644 metric/unit.go create mode 100644 metric/valuetype_string.go diff --git a/metric/doc.go b/metric/doc.go new file mode 100644 index 000000000..c0b2f5d11 --- /dev/null +++ b/metric/doc.go @@ -0,0 +1,16 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metric contains a data model and exporter support for metrics. +package metric // import "go.opencensus.io/metric" diff --git a/metric/metric.go b/metric/metric.go new file mode 100644 index 000000000..74b8e2b52 --- /dev/null +++ b/metric/metric.go @@ -0,0 +1,55 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "time" + + "go.opencensus.io/resource" +) + +// LabelValue represents the value of a label. A missing value (nil) is distinct +// from an empty string value. +type LabelValue *string + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return &val +} + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []string // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/metric/point.go b/metric/point.go new file mode 100644 index 000000000..1a433af03 --- /dev/null +++ b/metric/point.go @@ -0,0 +1,239 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "time" + + "go.opencensus.io/exemplar" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // ValueType is the type of value held in this point. + ValueType ValueType + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// ValueType is the type of value held in a point. +type ValueType int + +// Value types. New value types may be added. +const ( + ValueTypeFloat64 ValueType = iota + ValueTypeInt64 + ValueTypeDistribution + ValueTypeSummary +) + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + ValueType: ValueTypeFloat64, + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + ValueType: ValueTypeInt64, + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + ValueType: ValueTypeDistribution, + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + ValueType: ValueTypeSummary, + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of buckets with arbitrary upper-bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *exemplar.Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) + +// IsGuage returns true if the metric type represents a gauge-type value. +func (t Type) IsGuage() bool { + switch t { + case TypeGaugeInt64, TypeGaugeFloat64, TypeGaugeDistribution: + return true + default: + return false + } +} + +// ValueType returns the type of value of the points of metrics of the receiver +// type. +func (t Type) ValueType() ValueType { + switch t { + case TypeGaugeFloat64, TypeCumulativeFloat64: + return ValueTypeFloat64 + case TypeGaugeDistribution, TypeCumulativeDistribution: + return ValueTypeDistribution + case TypeGaugeInt64, TypeCumulativeInt64: + return ValueTypeInt64 + case TypeSummary: + return ValueTypeSummary + default: + panic("unexpected metric.Type value") + } +} diff --git a/metric/producer.go b/metric/producer.go new file mode 100644 index 000000000..7902324f7 --- /dev/null +++ b/metric/producer.go @@ -0,0 +1,84 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "sync" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*Metric +} + +// Registry maintains a set of metric producers for exporting. Most users will +// rely on the DefaultRegistry. +type Registry struct { + mu sync.RWMutex + sources map[*uintptr]Producer + ind uint64 +} + +var _ Producer = (*Registry)(nil) + +// NewRegistry creates a new Registry. +func NewRegistry() *Registry { + m := &Registry{ + sources: make(map[*uintptr]Producer), + ind: 0, + } + return m +} + +// Read returns all the metrics from all the metric produces in this registry. +func (m *Registry) Read() []*Metric { + m.mu.RLock() + defer m.mu.RUnlock() + ms := make([]*Metric, 0, len(m.sources)) + for _, s := range m.sources { + ms = append(ms, s.Read()...) + } + return ms +} + +// AddProducer adds a producer to this registry. +func (m *Registry) AddProducer(source Producer) (remove func()) { + m.mu.Lock() + defer m.mu.Unlock() + if source == m { + panic("attempt to add registry to itself") + } + tok := new(uintptr) + m.sources[tok] = source + return func() { + m.mu.Lock() + defer m.mu.Unlock() + delete(m.sources, tok) + } +} + +var defaultReg = NewRegistry() + +// DefaultRegistry returns the default, global metric registry for the current +// process. +// Most applications will rely on this registry but libraries should not assume +// the default registry is used. +func DefaultRegistry() *Registry { + return defaultReg +} diff --git a/metric/producer_test.go b/metric/producer_test.go new file mode 100644 index 000000000..eb7ffe4f0 --- /dev/null +++ b/metric/producer_test.go @@ -0,0 +1,43 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "testing" +) + +func TestRegistry_AddProducer(t *testing.T) { + r := NewRegistry() + m1 := &Metric{ + Descriptor: Descriptor{ + Name: "test", + Unit: UnitDimensionless, + }, + } + remove := r.AddProducer(&constProducer{m1}) + if got, want := len(r.Read()), 1; got != want { + t.Fatal("Expected to read a single metric") + } + remove() + if got, want := len(r.Read()), 0; got != want { + t.Fatal("Expected to read no metrics") + } +} + +type constProducer []*Metric + +func (cp constProducer) Read() []*Metric { + return cp +} diff --git a/metric/type_string.go b/metric/type_string.go new file mode 100644 index 000000000..ae5be36b1 --- /dev/null +++ b/metric/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metric + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/metric/unit.go b/metric/unit.go new file mode 100644 index 000000000..7372b592b --- /dev/null +++ b/metric/unit.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/metric/valuetype_string.go b/metric/valuetype_string.go new file mode 100644 index 000000000..9fa4a3fad --- /dev/null +++ b/metric/valuetype_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type ValueType"; DO NOT EDIT. + +package metric + +import "strconv" + +const _ValueType_name = "ValueTypeFloat64ValueTypeInt64ValueTypeDistributionValueTypeSummary" + +var _ValueType_index = [...]uint8{0, 16, 30, 51, 67} + +func (i ValueType) String() string { + if i < 0 || i >= ValueType(len(_ValueType_index)-1) { + return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] +} diff --git a/tag/map_codec.go b/tag/map_codec.go index 3e998950c..e88e72777 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -162,6 +162,9 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { + if m == nil { + return nil + } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } From 296c89cfe1ba025d7618edc49b98c9611a94dd3b Mon Sep 17 00:00:00 2001 From: JBD Date: Wed, 21 Nov 2018 11:19:40 -0800 Subject: [PATCH 094/212] Revert "Add metric API (#975)" (#981) This reverts commit a91cf186985104201e26eb267eac146a531c321b. The API was merged without an API review. --- metric/doc.go | 16 --- metric/metric.go | 55 --------- metric/point.go | 239 ------------------------------------- metric/producer.go | 84 ------------- metric/producer_test.go | 43 ------- metric/type_string.go | 16 --- metric/unit.go | 25 ---- metric/valuetype_string.go | 16 --- tag/map_codec.go | 3 - 9 files changed, 497 deletions(-) delete mode 100644 metric/doc.go delete mode 100644 metric/metric.go delete mode 100644 metric/point.go delete mode 100644 metric/producer.go delete mode 100644 metric/producer_test.go delete mode 100644 metric/type_string.go delete mode 100644 metric/unit.go delete mode 100644 metric/valuetype_string.go diff --git a/metric/doc.go b/metric/doc.go deleted file mode 100644 index c0b2f5d11..000000000 --- a/metric/doc.go +++ /dev/null @@ -1,16 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package metric contains a data model and exporter support for metrics. -package metric // import "go.opencensus.io/metric" diff --git a/metric/metric.go b/metric/metric.go deleted file mode 100644 index 74b8e2b52..000000000 --- a/metric/metric.go +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "time" - - "go.opencensus.io/resource" -) - -// LabelValue represents the value of a label. A missing value (nil) is distinct -// from an empty string value. -type LabelValue *string - -// NewLabelValue creates a new non-nil LabelValue that represents the given string. -func NewLabelValue(val string) LabelValue { - return &val -} - -// Descriptor holds metadata about a metric. -type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []string // label keys -} - -// Metric represents a quantity measured against a resource with different -// label value combinations. -type Metric struct { - Descriptor Descriptor // metric descriptor - Resource *resource.Resource // resource against which this was measured - TimeSeries []*TimeSeries // one time series for each combination of label values -} - -// TimeSeries is a sequence of points associated with a combination of label -// values. -type TimeSeries struct { - LabelValues []LabelValue // label values, same order as keys in the metric descriptor - Points []Point // points sequence - StartTime time.Time // time we started recording this time series -} diff --git a/metric/point.go b/metric/point.go deleted file mode 100644 index 1a433af03..000000000 --- a/metric/point.go +++ /dev/null @@ -1,239 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "time" - - "go.opencensus.io/exemplar" -) - -// Point is a single data point of a time series. -type Point struct { - // Time is the point in time that this point represents in a time series. - Time time.Time - // ValueType is the type of value held in this point. - ValueType ValueType - // Value is the value of this point. Prefer using ReadValue to switching on - // the value type, since new value types might be added. - Value interface{} -} - -//go:generate stringer -type ValueType - -// ValueType is the type of value held in a point. -type ValueType int - -// Value types. New value types may be added. -const ( - ValueTypeFloat64 ValueType = iota - ValueTypeInt64 - ValueTypeDistribution - ValueTypeSummary -) - -// NewFloat64Point creates a new Point holding a float64 value. -func NewFloat64Point(t time.Time, val float64) Point { - return Point{ - ValueType: ValueTypeFloat64, - Value: val, - Time: t, - } -} - -// NewInt64Point creates a new Point holding an int64 value. -func NewInt64Point(t time.Time, val int64) Point { - return Point{ - ValueType: ValueTypeInt64, - Value: val, - Time: t, - } -} - -// NewDistributionPoint creates a new Point holding a Distribution value. -func NewDistributionPoint(t time.Time, val *Distribution) Point { - return Point{ - ValueType: ValueTypeDistribution, - Value: val, - Time: t, - } -} - -// NewSummaryPoint creates a new Point holding a Summary value. -func NewSummaryPoint(t time.Time, val *Summary) Point { - return Point{ - ValueType: ValueTypeSummary, - Value: val, - Time: t, - } -} - -// ValueVisitor allows reading the value of a point. -type ValueVisitor interface { - VisitFloat64Value(float64) - VisitInt64Value(int64) - VisitDistributionValue(*Distribution) - VisitSummaryValue(*Summary) -} - -// ReadValue accepts a ValueVisitor and calls the appropriate method with the -// value of this point. -// Consumers of Point should use this in preference to switching on the type -// of the value directly, since new value types may be added. -func (p Point) ReadValue(vv ValueVisitor) { - switch v := p.Value.(type) { - case int64: - vv.VisitInt64Value(v) - case float64: - vv.VisitFloat64Value(v) - case *Distribution: - vv.VisitDistributionValue(v) - case *Summary: - vv.VisitSummaryValue(v) - default: - panic("unexpected value type") - } -} - -// Distribution contains summary statistics for a population of values. It -// optionally contains a histogram representing the distribution of those -// values across a set of buckets. -type Distribution struct { - // Count is the number of values in the population. Must be non-negative. This value - // must equal the sum of the values in bucket_counts if a histogram is - // provided. - Count int64 - // Sum is the sum of the values in the population. If count is zero then this field - // must be zero. - Sum float64 - // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the - // population. For values x_i this is: - // - // Sum[i=1..n]((x_i - mean)^2) - // - // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition - // describes Welford's method for accumulating this sum in one pass. - // - // If count is zero then this field must be zero. - SumOfSquaredDeviation float64 - // BucketOptions describes the bounds of the histogram buckets in this - // distribution. - // - // A Distribution may optionally contain a histogram of the values in the - // population. - // - // If nil, there is no associated histogram. - BucketOptions *BucketOptions - // Bucket If the distribution does not have a histogram, then omit this field. - // If there is a histogram, then the sum of the values in the Bucket counts - // must equal the value in the count field of the distribution. - Buckets []Bucket -} - -// BucketOptions describes the bounds of the histogram buckets in this -// distribution. -type BucketOptions struct { - // Bounds specifies a set of buckets with arbitrary upper-bounds. - // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket - // index i are: - // - // [0, Bounds[i]) for i == 0 - // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 - // [Bounds[i-1], +infinity) for i == N-1 - Bounds []float64 -} - -// Bucket represents a single bucket (value range) in a distribution. -type Bucket struct { - // Count is the number of values in each bucket of the histogram, as described in - // bucket_bounds. - Count int64 - // Exemplar associated with this bucket (if any). - Exemplar *exemplar.Exemplar -} - -// Summary is a representation of percentiles. -type Summary struct { - // Count is the cumulative count (if available). - Count int64 - // Sum is the cumulative sum of values (if available). - Sum float64 - // HasCountAndSum is true if Count and Sum are available. - HasCountAndSum bool - // Snapshot represents percentiles calculated over an arbitrary time window. - // The values in this struct can be reset at arbitrary unknown times, with - // the requirement that all of them are reset at the same time. - Snapshot Snapshot -} - -// Snapshot represents percentiles over an arbitrary time. -// The values in this struct can be reset at arbitrary unknown times, with -// the requirement that all of them are reset at the same time. -type Snapshot struct { - // Count is the number of values in the snapshot. Optional since some systems don't - // expose this. Set to 0 if not available. - Count int64 - // Sum is the sum of values in the snapshot. Optional since some systems don't - // expose this. If count is 0 then this field must be zero. - Sum float64 - // Percentiles is a map from percentile (range (0-100.0]) to the value of - // the percentile. - Percentiles map[float64]float64 -} - -//go:generate stringer -type Type - -// Type is the overall type of metric, including its value type and whether it -// represents a cumulative total (since the start time) or if it represents a -// gauge value. -type Type int - -// Metric types. -const ( - TypeGaugeInt64 Type = iota - TypeGaugeFloat64 - TypeGaugeDistribution - TypeCumulativeInt64 - TypeCumulativeFloat64 - TypeCumulativeDistribution - TypeSummary -) - -// IsGuage returns true if the metric type represents a gauge-type value. -func (t Type) IsGuage() bool { - switch t { - case TypeGaugeInt64, TypeGaugeFloat64, TypeGaugeDistribution: - return true - default: - return false - } -} - -// ValueType returns the type of value of the points of metrics of the receiver -// type. -func (t Type) ValueType() ValueType { - switch t { - case TypeGaugeFloat64, TypeCumulativeFloat64: - return ValueTypeFloat64 - case TypeGaugeDistribution, TypeCumulativeDistribution: - return ValueTypeDistribution - case TypeGaugeInt64, TypeCumulativeInt64: - return ValueTypeInt64 - case TypeSummary: - return ValueTypeSummary - default: - panic("unexpected metric.Type value") - } -} diff --git a/metric/producer.go b/metric/producer.go deleted file mode 100644 index 7902324f7..000000000 --- a/metric/producer.go +++ /dev/null @@ -1,84 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "sync" -) - -// Producer is a source of metrics. -type Producer interface { - // Read should return the current values of all metrics supported by this - // metric provider. - // The returned metrics should be unique for each combination of name and - // resource. - Read() []*Metric -} - -// Registry maintains a set of metric producers for exporting. Most users will -// rely on the DefaultRegistry. -type Registry struct { - mu sync.RWMutex - sources map[*uintptr]Producer - ind uint64 -} - -var _ Producer = (*Registry)(nil) - -// NewRegistry creates a new Registry. -func NewRegistry() *Registry { - m := &Registry{ - sources: make(map[*uintptr]Producer), - ind: 0, - } - return m -} - -// Read returns all the metrics from all the metric produces in this registry. -func (m *Registry) Read() []*Metric { - m.mu.RLock() - defer m.mu.RUnlock() - ms := make([]*Metric, 0, len(m.sources)) - for _, s := range m.sources { - ms = append(ms, s.Read()...) - } - return ms -} - -// AddProducer adds a producer to this registry. -func (m *Registry) AddProducer(source Producer) (remove func()) { - m.mu.Lock() - defer m.mu.Unlock() - if source == m { - panic("attempt to add registry to itself") - } - tok := new(uintptr) - m.sources[tok] = source - return func() { - m.mu.Lock() - defer m.mu.Unlock() - delete(m.sources, tok) - } -} - -var defaultReg = NewRegistry() - -// DefaultRegistry returns the default, global metric registry for the current -// process. -// Most applications will rely on this registry but libraries should not assume -// the default registry is used. -func DefaultRegistry() *Registry { - return defaultReg -} diff --git a/metric/producer_test.go b/metric/producer_test.go deleted file mode 100644 index eb7ffe4f0..000000000 --- a/metric/producer_test.go +++ /dev/null @@ -1,43 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -import ( - "testing" -) - -func TestRegistry_AddProducer(t *testing.T) { - r := NewRegistry() - m1 := &Metric{ - Descriptor: Descriptor{ - Name: "test", - Unit: UnitDimensionless, - }, - } - remove := r.AddProducer(&constProducer{m1}) - if got, want := len(r.Read()), 1; got != want { - t.Fatal("Expected to read a single metric") - } - remove() - if got, want := len(r.Read()), 0; got != want { - t.Fatal("Expected to read no metrics") - } -} - -type constProducer []*Metric - -func (cp constProducer) Read() []*Metric { - return cp -} diff --git a/metric/type_string.go b/metric/type_string.go deleted file mode 100644 index ae5be36b1..000000000 --- a/metric/type_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type Type"; DO NOT EDIT. - -package metric - -import "strconv" - -const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" - -var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} - -func (i Type) String() string { - if i < 0 || i >= Type(len(_Type_index)-1) { - return "Type(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _Type_name[_Type_index[i]:_Type_index[i+1]] -} diff --git a/metric/unit.go b/metric/unit.go deleted file mode 100644 index 7372b592b..000000000 --- a/metric/unit.go +++ /dev/null @@ -1,25 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package metric - -// Unit is a string encoded according to the case-sensitive abbreviations from the -// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html -type Unit string - -const ( - UnitDimensionless Unit = "1" - UnitBytes Unit = "By" - UnitMilliseconds Unit = "ms" -) diff --git a/metric/valuetype_string.go b/metric/valuetype_string.go deleted file mode 100644 index 9fa4a3fad..000000000 --- a/metric/valuetype_string.go +++ /dev/null @@ -1,16 +0,0 @@ -// Code generated by "stringer -type ValueType"; DO NOT EDIT. - -package metric - -import "strconv" - -const _ValueType_name = "ValueTypeFloat64ValueTypeInt64ValueTypeDistributionValueTypeSummary" - -var _ValueType_index = [...]uint8{0, 16, 30, 51, 67} - -func (i ValueType) String() string { - if i < 0 || i >= ValueType(len(_ValueType_index)-1) { - return "ValueType(" + strconv.FormatInt(int64(i), 10) + ")" - } - return _ValueType_name[_ValueType_index[i]:_ValueType_index[i+1]] -} diff --git a/tag/map_codec.go b/tag/map_codec.go index e88e72777..3e998950c 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -162,9 +162,6 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { - if m == nil { - return nil - } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } From fa1e35e3ffe868399138d03aaf2e949d52884f1b Mon Sep 17 00:00:00 2001 From: JBD Date: Wed, 21 Nov 2018 13:58:46 -0800 Subject: [PATCH 095/212] Keep pointer receiver name consistency for stats.Measure types (#982) Fixing the receiver name inconsistency from: type Float64Measure func Float64(name, description, unit string) *Float64Measure func (m Float64Measure) Description() string func (m *Float64Measure) M(v float64) Measurement func (m Float64Measure) Name() string func (m Float64Measure) Unit() string to: type Float64Measure func Float64(name, description, unit string) *Float64Measure func (m *Float64Measure) Description() string func (m *Float64Measure) M(v float64) Measurement func (m *Float64Measure) Name() string func (m *Float64Measure) Unit() string Fixes #974. --- stats/measure.go | 20 +++----------------- stats/measure_float64.go | 23 +++++++++++++++++++++-- stats/measure_int64.go | 23 +++++++++++++++++++++-- stats/record.go | 2 +- 4 files changed, 46 insertions(+), 22 deletions(-) diff --git a/stats/measure.go b/stats/measure.go index 64d02b196..1ffd3cefc 100644 --- a/stats/measure.go +++ b/stats/measure.go @@ -68,21 +68,6 @@ func (m *measureDescriptor) subscribed() bool { return atomic.LoadInt32(&m.subs) == 1 } -// Name returns the name of the measure. -func (m *measureDescriptor) Name() string { - return m.name -} - -// Description returns the description of the measure. -func (m *measureDescriptor) Description() string { - return m.description -} - -// Unit returns the unit of the measure. -func (m *measureDescriptor) Unit() string { - return m.unit -} - var ( mu sync.RWMutex measures = make(map[string]*measureDescriptor) @@ -108,8 +93,9 @@ func registerMeasureHandle(name, desc, unit string) *measureDescriptor { // provides methods to create measurements of their kind. For example, Int64Measure // provides M to convert an int64 into a measurement. type Measurement struct { - v float64 - m *measureDescriptor + v float64 + m Measure + desc *measureDescriptor } // Value returns the value of the Measurement as a float64. diff --git a/stats/measure_float64.go b/stats/measure_float64.go index acedb21c4..f02c1eda8 100644 --- a/stats/measure_float64.go +++ b/stats/measure_float64.go @@ -17,13 +17,17 @@ package stats // Float64Measure is a measure for float64 values. type Float64Measure struct { - *measureDescriptor + desc *measureDescriptor } // M creates a new float64 measurement. // Use Record to record measurements. func (m *Float64Measure) M(v float64) Measurement { - return Measurement{m: m.measureDescriptor, v: v} + return Measurement{ + m: m, + desc: m.desc, + v: v, + } } // Float64 creates a new measure for float64 values. @@ -34,3 +38,18 @@ func Float64(name, description, unit string) *Float64Measure { mi := registerMeasureHandle(name, description, unit) return &Float64Measure{mi} } + +// Name returns the name of the measure. +func (m *Float64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Float64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Float64Measure) Unit() string { + return m.desc.unit +} diff --git a/stats/measure_int64.go b/stats/measure_int64.go index c4243ba74..d101d7973 100644 --- a/stats/measure_int64.go +++ b/stats/measure_int64.go @@ -17,13 +17,17 @@ package stats // Int64Measure is a measure for int64 values. type Int64Measure struct { - *measureDescriptor + desc *measureDescriptor } // M creates a new int64 measurement. // Use Record to record measurements. func (m *Int64Measure) M(v int64) Measurement { - return Measurement{m: m.measureDescriptor, v: float64(v)} + return Measurement{ + m: m, + desc: m.desc, + v: float64(v), + } } // Int64 creates a new measure for int64 values. @@ -34,3 +38,18 @@ func Int64(name, description, unit string) *Int64Measure { mi := registerMeasureHandle(name, description, unit) return &Int64Measure{mi} } + +// Name returns the name of the measure. +func (m *Int64Measure) Name() string { + return m.desc.name +} + +// Description returns the description of the measure. +func (m *Int64Measure) Description() string { + return m.desc.description +} + +// Unit returns the unit of the measure. +func (m *Int64Measure) Unit() string { + return m.desc.unit +} diff --git a/stats/record.go b/stats/record.go index 0aced02c3..86f491e22 100644 --- a/stats/record.go +++ b/stats/record.go @@ -43,7 +43,7 @@ func Record(ctx context.Context, ms ...Measurement) { } record := false for _, m := range ms { - if m.m.subscribed() { + if m.desc.subscribed() { record = true break } From aeef0d75ef15b93e190debdf6d665717f52205be Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 28 Nov 2018 16:47:30 -0800 Subject: [PATCH 096/212] Fix prometheus exporter when tags are missing. (#989) * Fix prometheus exporter when tags are missing. * Change the TestViewMeasureWithoutTag to force sanitization. * Revert the dead code removing. * Add more tag combinations and in different orders. --- exporter/prometheus/prometheus.go | 21 ++++++-- exporter/prometheus/prometheus_test.go | 71 ++++++++++++++++++++++++++ 2 files changed, 87 insertions(+), 5 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 0266d9d7c..ca930eb1e 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -208,7 +208,7 @@ func (c *collector) Collect(ch chan<- prometheus.Metric) { func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { switch data := row.Data.(type) { case *view.CountData: - return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...) case *view.DistributionData: points := make(map[float64]uint64) @@ -235,13 +235,13 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) cumCount += uint64(data.CountPerBucket[i]) points[b] = cumCount } - return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags)...) + return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...) case *view.SumData: - return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...) case *view.LastValueData: - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags)...) + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...) default: return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) @@ -272,10 +272,21 @@ func newCollector(opts Options, registrar *prometheus.Registry) *collector { } } -func tagValues(t []tag.Tag) []string { +func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string { var values []string + // Add empty string for all missing keys in the tags map. + idx := 0 for _, t := range t { + for t.Key != expectedKeys[idx] { + idx++ + values = append(values, "") + } values = append(values, t.Value) + idx++ + } + for idx < len(expectedKeys) { + idx++ + values = append(values, "") } return values } diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index e6a0f4dbe..67b05f020 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -433,3 +433,74 @@ tests_foo{method="issue961",service="spanner"} 1 t.Fatal("output differed from expected") } } + +func TestViewMeasureWithoutTag(t *testing.T) { + exporter, err := NewExporter(Options{}) + if err != nil { + t.Fatalf("failed to create prometheus exporter: %v", err) + } + view.RegisterExporter(exporter) + defer view.UnregisterExporter(exporter) + m := stats.Int64("tests/foo", "foo", stats.UnitDimensionless) + k1, _ := tag.NewKey("key/1") + k2, _ := tag.NewKey("key/2") + k3, _ := tag.NewKey("key/3") + k4, _ := tag.NewKey("key/4") + k5, _ := tag.NewKey("key/5") + randomKey, _ := tag.NewKey("issue659") + v := &view.View{ + Name: m.Name(), + Description: m.Description(), + TagKeys: []tag.Key{k2, k5, k3, k1, k4}, // Ensure view has a tag + Measure: m, + Aggregation: view.Count(), + } + if err := view.Register(v); err != nil { + t.Fatalf("failed to create views: %v", err) + } + defer view.Unregister(v) + view.SetReportingPeriod(time.Millisecond) + // Make a measure without some tags in the view. + ctx1, _ := tag.New(context.Background(), tag.Upsert(k4, "issue659"), tag.Upsert(randomKey, "value"), tag.Upsert(k2, "issue659")) + stats.Record(ctx1, m.M(1)) + ctx2, _ := tag.New(context.Background(), tag.Upsert(k5, "issue659"), tag.Upsert(k3, "issue659"), tag.Upsert(k1, "issue659")) + stats.Record(ctx2, m.M(2)) + srv := httptest.NewServer(exporter) + defer srv.Close() + var i int + var output string + for { + time.Sleep(10 * time.Millisecond) + if i == 1000 { + t.Fatal("no output at /metrics (10s wait)") + } + i++ + resp, err := http.Get(srv.URL) + if err != nil { + t.Fatalf("failed to get /metrics: %v", err) + } + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("failed to read body: %v", err) + } + resp.Body.Close() + output = string(body) + if output != "" { + break + } + } + if strings.Contains(output, "collected before with the same name and label values") { + t.Fatal("metric name and labels being duplicated but must be unique") + } + if strings.Contains(output, "error(s) occurred") { + t.Fatal("error reported by prometheus registry") + } + want := `# HELP tests_foo foo +# TYPE tests_foo counter +tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1 +tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1 +` + if output != want { + t.Fatalf("output differed from expected output: %s want: %s", output, want) + } +} From 8b019f31bc1c8ea10debf1f91ae4ce3276fca09f Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Wed, 28 Nov 2018 16:57:06 -0800 Subject: [PATCH 097/212] Remove dead code in Prometheus exporter. (#990) --- exporter/prometheus/prometheus.go | 8 -------- 1 file changed, 8 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index ca930eb1e..968409816 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -255,14 +255,6 @@ func tagKeysToLabels(keys []tag.Key) (labels []string) { return labels } -func tagsToLabels(tags []tag.Tag) []string { - var names []string - for _, tag := range tags { - names = append(names, internal.Sanitize(tag.Key.Name())) - } - return names -} - func newCollector(opts Options, registrar *prometheus.Registry) *collector { return &collector{ reg: registrar, From aab39bd6a98b853ab66c8a564f5d6cfcad59ce8a Mon Sep 17 00:00:00 2001 From: easy Date: Tue, 4 Dec 2018 13:35:38 +1100 Subject: [PATCH 098/212] Make our user-agent consistent with other user-agents. (#991) --- internal/internal.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/internal/internal.go b/internal/internal.go index e1d1238d0..2d6baf177 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -23,7 +23,7 @@ import ( // UserAgent is the user agent to be added to the outgoing // requests from the exporters. -var UserAgent = fmt.Sprintf("opencensus-go [%s]", opencensus.Version()) +var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // MonotonicEndTime returns the end time at present // but offset from start, monotonically. From 9f101dc03858797a09dc3bb9ee7cf8ebf83d770c Mon Sep 17 00:00:00 2001 From: Le Van Nghia Date: Sat, 15 Dec 2018 01:55:05 +0900 Subject: [PATCH 099/212] Improve the accuracy of the response size in ochttp (#997) * Improve the accuracy of the response size in ochttp * Ignore the ContentLength from the response when the request method is HEAD --- plugin/ochttp/client_stats.go | 26 +++++++++++++++++--------- 1 file changed, 17 insertions(+), 9 deletions(-) diff --git a/plugin/ochttp/client_stats.go b/plugin/ochttp/client_stats.go index 066ebb87f..b432316e0 100644 --- a/plugin/ochttp/client_stats.go +++ b/plugin/ochttp/client_stats.go @@ -61,6 +61,9 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.statusCode = resp.StatusCode + if req.Method != "HEAD" { + track.respContentLength = resp.ContentLength + } if resp.Body == nil { track.end() } else { @@ -82,13 +85,14 @@ func (t statsTransport) CancelRequest(req *http.Request) { } type tracker struct { - ctx context.Context - respSize int64 - reqSize int64 - start time.Time - body io.ReadCloser - statusCode int - endOnce sync.Once + ctx context.Context + respSize int64 + respContentLength int64 + reqSize int64 + start time.Time + body io.ReadCloser + statusCode int + endOnce sync.Once } var _ io.ReadCloser = (*tracker)(nil) @@ -96,9 +100,13 @@ var _ io.ReadCloser = (*tracker)(nil) func (t *tracker) end() { t.endOnce.Do(func() { latencyMs := float64(time.Since(t.start)) / float64(time.Millisecond) + respSize := t.respSize + if t.respSize == 0 && t.respContentLength > 0 { + respSize = t.respContentLength + } m := []stats.Measurement{ ClientSentBytes.M(t.reqSize), - ClientReceivedBytes.M(t.respSize), + ClientReceivedBytes.M(respSize), ClientRoundtripLatency.M(latencyMs), ClientLatency.M(latencyMs), ClientResponseBytes.M(t.respSize), @@ -116,9 +124,9 @@ func (t *tracker) end() { func (t *tracker) Read(b []byte) (int, error) { n, err := t.body.Read(b) + t.respSize += int64(n) switch err { case nil: - t.respSize += int64(n) return n, nil case io.EOF: t.end() From 8b95f1c9dfabe8d924e11cf1c30c71c19a7ac35c Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Fri, 14 Dec 2018 13:10:46 -0800 Subject: [PATCH 100/212] Add metric data model and export support (#977) --- metric/metricdata/doc.go | 19 +++ metric/metricdata/label.go | 28 ++++ metric/metricdata/metric.go | 46 +++++++ metric/metricdata/point.go | 195 +++++++++++++++++++++++++++ metric/metricdata/type_string.go | 16 +++ metric/metricdata/unit.go | 25 ++++ metric/metricexport/doc.go | 19 +++ metric/metricexport/producer.go | 100 ++++++++++++++ metric/metricexport/producer_test.go | 45 +++++++ tag/map_codec.go | 3 + 10 files changed, 496 insertions(+) create mode 100644 metric/metricdata/doc.go create mode 100644 metric/metricdata/label.go create mode 100644 metric/metricdata/metric.go create mode 100644 metric/metricdata/point.go create mode 100644 metric/metricdata/type_string.go create mode 100644 metric/metricdata/unit.go create mode 100644 metric/metricexport/doc.go create mode 100644 metric/metricexport/producer.go create mode 100644 metric/metricexport/producer_test.go diff --git a/metric/metricdata/doc.go b/metric/metricdata/doc.go new file mode 100644 index 000000000..52a7b3bf8 --- /dev/null +++ b/metric/metricdata/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricdata contains the metrics data model. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricdata // import "go.opencensus.io/metric/metricdata" diff --git a/metric/metricdata/label.go b/metric/metricdata/label.go new file mode 100644 index 000000000..87c55b9c8 --- /dev/null +++ b/metric/metricdata/label.go @@ -0,0 +1,28 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// LabelValue represents the value of a label. +// The zero value represents a missing label value, which may be treated +// differently to an empty string value by some back ends. +type LabelValue struct { + Value string // string value of the label + Present bool // flag that indicated whether a value is present or not +} + +// NewLabelValue creates a new non-nil LabelValue that represents the given string. +func NewLabelValue(val string) LabelValue { + return LabelValue{Value: val, Present: true} +} diff --git a/metric/metricdata/metric.go b/metric/metricdata/metric.go new file mode 100644 index 000000000..6ccdec583 --- /dev/null +++ b/metric/metricdata/metric.go @@ -0,0 +1,46 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/resource" +) + +// Descriptor holds metadata about a metric. +type Descriptor struct { + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []string // label keys +} + +// Metric represents a quantity measured against a resource with different +// label value combinations. +type Metric struct { + Descriptor Descriptor // metric descriptor + Resource *resource.Resource // resource against which this was measured + TimeSeries []*TimeSeries // one time series for each combination of label values +} + +// TimeSeries is a sequence of points associated with a combination of label +// values. +type TimeSeries struct { + LabelValues []LabelValue // label values, same order as keys in the metric descriptor + Points []Point // points sequence + StartTime time.Time // time we started recording this time series +} diff --git a/metric/metricdata/point.go b/metric/metricdata/point.go new file mode 100644 index 000000000..2c2a87156 --- /dev/null +++ b/metric/metricdata/point.go @@ -0,0 +1,195 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +import ( + "time" + + "go.opencensus.io/exemplar" +) + +// Point is a single data point of a time series. +type Point struct { + // Time is the point in time that this point represents in a time series. + Time time.Time + // Value is the value of this point. Prefer using ReadValue to switching on + // the value type, since new value types might be added. + Value interface{} +} + +//go:generate stringer -type ValueType + +// NewFloat64Point creates a new Point holding a float64 value. +func NewFloat64Point(t time.Time, val float64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewInt64Point creates a new Point holding an int64 value. +func NewInt64Point(t time.Time, val int64) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewDistributionPoint creates a new Point holding a Distribution value. +func NewDistributionPoint(t time.Time, val *Distribution) Point { + return Point{ + Value: val, + Time: t, + } +} + +// NewSummaryPoint creates a new Point holding a Summary value. +func NewSummaryPoint(t time.Time, val *Summary) Point { + return Point{ + Value: val, + Time: t, + } +} + +// ValueVisitor allows reading the value of a point. +type ValueVisitor interface { + VisitFloat64Value(float64) + VisitInt64Value(int64) + VisitDistributionValue(*Distribution) + VisitSummaryValue(*Summary) +} + +// ReadValue accepts a ValueVisitor and calls the appropriate method with the +// value of this point. +// Consumers of Point should use this in preference to switching on the type +// of the value directly, since new value types may be added. +func (p Point) ReadValue(vv ValueVisitor) { + switch v := p.Value.(type) { + case int64: + vv.VisitInt64Value(v) + case float64: + vv.VisitFloat64Value(v) + case *Distribution: + vv.VisitDistributionValue(v) + case *Summary: + vv.VisitSummaryValue(v) + default: + panic("unexpected value type") + } +} + +// Distribution contains summary statistics for a population of values. It +// optionally contains a histogram representing the distribution of those +// values across a set of buckets. +type Distribution struct { + // Count is the number of values in the population. Must be non-negative. This value + // must equal the sum of the values in bucket_counts if a histogram is + // provided. + Count int64 + // Sum is the sum of the values in the population. If count is zero then this field + // must be zero. + Sum float64 + // SumOfSquaredDeviation is the sum of squared deviations from the mean of the values in the + // population. For values x_i this is: + // + // Sum[i=1..n]((x_i - mean)^2) + // + // Knuth, "The Art of Computer Programming", Vol. 2, page 323, 3rd edition + // describes Welford's method for accumulating this sum in one pass. + // + // If count is zero then this field must be zero. + SumOfSquaredDeviation float64 + // BucketOptions describes the bounds of the histogram buckets in this + // distribution. + // + // A Distribution may optionally contain a histogram of the values in the + // population. + // + // If nil, there is no associated histogram. + BucketOptions *BucketOptions + // Bucket If the distribution does not have a histogram, then omit this field. + // If there is a histogram, then the sum of the values in the Bucket counts + // must equal the value in the count field of the distribution. + Buckets []Bucket +} + +// BucketOptions describes the bounds of the histogram buckets in this +// distribution. +type BucketOptions struct { + // Bounds specifies a set of bucket upper bounds. + // This defines len(bounds) + 1 (= N) buckets. The boundaries for bucket + // index i are: + // + // [0, Bounds[i]) for i == 0 + // [Bounds[i-1], Bounds[i]) for 0 < i < N-1 + // [Bounds[i-1], +infinity) for i == N-1 + Bounds []float64 +} + +// Bucket represents a single bucket (value range) in a distribution. +type Bucket struct { + // Count is the number of values in each bucket of the histogram, as described in + // bucket_bounds. + Count int64 + // Exemplar associated with this bucket (if any). + Exemplar *exemplar.Exemplar +} + +// Summary is a representation of percentiles. +type Summary struct { + // Count is the cumulative count (if available). + Count int64 + // Sum is the cumulative sum of values (if available). + Sum float64 + // HasCountAndSum is true if Count and Sum are available. + HasCountAndSum bool + // Snapshot represents percentiles calculated over an arbitrary time window. + // The values in this struct can be reset at arbitrary unknown times, with + // the requirement that all of them are reset at the same time. + Snapshot Snapshot +} + +// Snapshot represents percentiles over an arbitrary time. +// The values in this struct can be reset at arbitrary unknown times, with +// the requirement that all of them are reset at the same time. +type Snapshot struct { + // Count is the number of values in the snapshot. Optional since some systems don't + // expose this. Set to 0 if not available. + Count int64 + // Sum is the sum of values in the snapshot. Optional since some systems don't + // expose this. If count is 0 then this field must be zero. + Sum float64 + // Percentiles is a map from percentile (range (0-100.0]) to the value of + // the percentile. + Percentiles map[float64]float64 +} + +//go:generate stringer -type Type + +// Type is the overall type of metric, including its value type and whether it +// represents a cumulative total (since the start time) or if it represents a +// gauge value. +type Type int + +// Metric types. +const ( + TypeGaugeInt64 Type = iota + TypeGaugeFloat64 + TypeGaugeDistribution + TypeCumulativeInt64 + TypeCumulativeFloat64 + TypeCumulativeDistribution + TypeSummary +) diff --git a/metric/metricdata/type_string.go b/metric/metricdata/type_string.go new file mode 100644 index 000000000..c3f8ec27b --- /dev/null +++ b/metric/metricdata/type_string.go @@ -0,0 +1,16 @@ +// Code generated by "stringer -type Type"; DO NOT EDIT. + +package metricdata + +import "strconv" + +const _Type_name = "TypeGaugeInt64TypeGaugeFloat64TypeGaugeDistributionTypeCumulativeInt64TypeCumulativeFloat64TypeCumulativeDistributionTypeSummary" + +var _Type_index = [...]uint8{0, 14, 30, 51, 70, 91, 117, 128} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/metric/metricdata/unit.go b/metric/metricdata/unit.go new file mode 100644 index 000000000..72887d2ee --- /dev/null +++ b/metric/metricdata/unit.go @@ -0,0 +1,25 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricdata + +// Unit is a string encoded according to the case-sensitive abbreviations from the +// Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html +type Unit string + +const ( + UnitDimensionless Unit = "1" + UnitBytes Unit = "By" + UnitMilliseconds Unit = "ms" +) diff --git a/metric/metricexport/doc.go b/metric/metricexport/doc.go new file mode 100644 index 000000000..df632a792 --- /dev/null +++ b/metric/metricexport/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metricexport contains support for exporting metric data. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metricexport // import "go.opencensus.io/metric/metricexport" diff --git a/metric/metricexport/producer.go b/metric/metricexport/producer.go new file mode 100644 index 000000000..0b651f7b0 --- /dev/null +++ b/metric/metricexport/producer.go @@ -0,0 +1,100 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricexport + +import ( + "go.opencensus.io/metric/metricdata" + "sync" + "sync/atomic" +) + +// Producer is a source of metrics. +type Producer interface { + // Read should return the current values of all metrics supported by this + // metric provider. + // The returned metrics should be unique for each combination of name and + // resource. + Read() []*metricdata.Metric +} + +// Registry maintains a set of metric producers for exporting. Most users will +// rely on the DefaultRegistry. +type Registry struct { + mu sync.RWMutex + state atomic.Value +} + +type registryState struct { + producers map[Producer]struct{} +} + +// NewRegistry creates a new Registry. +func NewRegistry() *Registry { + m := &Registry{} + m.state.Store(®istryState{ + producers: make(map[Producer]struct{}), + }) + return m +} + +// Read returns all the metrics from all the metric produces in this registry. +func (m *Registry) ReadAll() []*metricdata.Metric { + s := m.state.Load().(*registryState) + ms := make([]*metricdata.Metric, 0, len(s.producers)) + for p := range s.producers { + ms = append(ms, p.Read()...) + } + return ms +} + +// AddProducer adds a producer to this registry. +func (m *Registry) AddProducer(p Producer) { + m.mu.Lock() + defer m.mu.Unlock() + newState := ®istryState{ + make(map[Producer]struct{}), + } + state := m.state.Load().(*registryState) + for producer := range state.producers { + newState.producers[producer] = struct{}{} + } + newState.producers[p] = struct{}{} + m.state.Store(newState) +} + +// RemoveProducer removes the given producer from this registry. +func (m *Registry) RemoveProducer(p Producer) { + m.mu.Lock() + defer m.mu.Unlock() + newState := ®istryState{ + make(map[Producer]struct{}), + } + state := m.state.Load().(*registryState) + for producer := range state.producers { + newState.producers[producer] = struct{}{} + } + delete(newState.producers, p) + m.state.Store(newState) +} + +var defaultReg = NewRegistry() + +// DefaultRegistry returns the default, global metric registry for the current +// process. +// Most applications will rely on this registry but libraries should not assume +// the default registry is used. +func DefaultRegistry() *Registry { + return defaultReg +} diff --git a/metric/metricexport/producer_test.go b/metric/metricexport/producer_test.go new file mode 100644 index 000000000..7aa653f63 --- /dev/null +++ b/metric/metricexport/producer_test.go @@ -0,0 +1,45 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricexport + +import ( + "go.opencensus.io/metric/metricdata" + "testing" +) + +func TestRegistry_AddProducer(t *testing.T) { + r := NewRegistry() + m1 := &metricdata.Metric{ + Descriptor: metricdata.Descriptor{ + Name: "test", + Unit: metricdata.UnitDimensionless, + }, + } + p := &constProducer{m1} + r.AddProducer(p) + if got, want := len(r.ReadAll()), 1; got != want { + t.Fatal("Expected to read a single metric") + } + r.RemoveProducer(p) + if got, want := len(r.ReadAll()), 0; got != want { + t.Fatal("Expected to read no metrics") + } +} + +type constProducer []*metricdata.Metric + +func (cp constProducer) Read() []*metricdata.Metric { + return cp +} diff --git a/tag/map_codec.go b/tag/map_codec.go index 3e998950c..e88e72777 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -162,6 +162,9 @@ func (eg *encoderGRPC) bytes() []byte { // Encode encodes the tag map into a []byte. It is useful to propagate // the tag maps on wire in binary format. func Encode(m *Map) []byte { + if m == nil { + return nil + } eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } From df705edf02be0c9ee837ec472f5a549573be64ed Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Thu, 20 Dec 2018 10:41:30 -0800 Subject: [PATCH 101/212] Upgrade dependencies; resolve test issue with Prom exporter (#1000) --- exporter/prometheus/prometheus_test.go | 33 ++++++++--------- go.mod | 35 ++++++++++-------- go.sum | 50 ++++++++++++++++++++++++++ 3 files changed, 88 insertions(+), 30 deletions(-) diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 67b05f020..80157145e 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -17,6 +17,7 @@ package prometheus import ( "context" "fmt" + "github.com/google/go-cmp/cmp" "io/ioutil" "net/http" "net/http/httptest" @@ -294,16 +295,16 @@ func TestCumulativenessFromHistograms(t *testing.T) { // 100: [] | 0 + prev(i) = 0 + 4 = 4 // 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7 wantLines := []string{ - `cash_register_bucket{le="1"} 1`, - `cash_register_bucket{le="5"} 2`, - `cash_register_bucket{le="10"} 3`, - `cash_register_bucket{le="20"} 4`, - `cash_register_bucket{le="50"} 4`, - `cash_register_bucket{le="100"} 4`, - `cash_register_bucket{le="250"} 7`, - `cash_register_bucket{le="+Inf"} 7`, + `cash_register_bucket{le="1.0"} 1.0`, + `cash_register_bucket{le="5.0"} 2.0`, + `cash_register_bucket{le="10.0"} 3.0`, + `cash_register_bucket{le="20.0"} 4.0`, + `cash_register_bucket{le="50.0"} 4.0`, + `cash_register_bucket{le="100.0"} 4.0`, + `cash_register_bucket{le="250.0"} 7.0`, + `cash_register_bucket{le="+Inf"} 7.0`, `cash_register_sum 654.0799999999999`, // Summation of the input values - `cash_register_count 7`, + `cash_register_count 7.0`, } ctx := context.Background() @@ -421,16 +422,16 @@ func TestConstLabelsIncluded(t *testing.T) { want := `# HELP tests_bar bar # TYPE tests_bar counter -tests_bar{method="issue961",service="spanner"} 1 +tests_bar{method="issue961",service="spanner"} 1.0 # HELP tests_baz baz # TYPE tests_baz counter -tests_baz{method="issue961",service="spanner"} 1 +tests_baz{method="issue961",service="spanner"} 1.0 # HELP tests_foo foo # TYPE tests_foo counter -tests_foo{method="issue961",service="spanner"} 1 +tests_foo{method="issue961",service="spanner"} 1.0 ` - if output != want { - t.Fatal("output differed from expected") + if diff := cmp.Diff(output, want); diff != "" { + t.Fatalf("output differed from expected -got +want: %s", diff) } } @@ -497,8 +498,8 @@ func TestViewMeasureWithoutTag(t *testing.T) { } want := `# HELP tests_foo foo # TYPE tests_foo counter -tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1 -tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1 +tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1.0 +tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1.0 ` if output != want { t.Fatalf("output differed from expected output: %s want: %s", output, want) diff --git a/go.mod b/go.mod index 1236f4c2f..3da37b230 100644 --- a/go.mod +++ b/go.mod @@ -1,25 +1,32 @@ module go.opencensus.io require ( - git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 + cloud.google.com/go v0.34.0 // indirect + git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/ghodss/yaml v1.0.0 // indirect - github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b // indirect + github.com/golang/mock v1.2.0 // indirect github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.5.0 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.6.2 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/openzipkin/zipkin-go v0.1.1 - github.com/prometheus/client_golang v0.8.0 + github.com/openzipkin/zipkin-go v0.1.3 + github.com/prometheus/client_golang v0.9.2 github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e - github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 - golang.org/x/net v0.0.0-20180906233101-161cd47e91fd - golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f - golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e + github.com/prometheus/common v0.0.0-20181218105931-67670fe90761 + github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a + golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 // indirect + golang.org/x/net v0.0.0-20181217023233-e147a9138326 + golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect + golang.org/x/sync v0.0.0-20181108010431-42b317875d0f + golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 golang.org/x/text v0.3.0 - google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf - google.golang.org/genproto v0.0.0-20180831171423-11092d34479b - google.golang.org/grpc v1.14.0 - gopkg.in/yaml.v2 v2.2.1 // indirect + golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e // indirect + google.golang.org/api v0.0.0-20181220000619-583d854617af + google.golang.org/appengine v1.3.0 // indirect + google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb + google.golang.org/grpc v1.17.0 + gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect + gopkg.in/yaml.v2 v2.2.2 // indirect + honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 // indirect ) diff --git a/go.sum b/go.sum index 3e0bab884..90a87b1df 100644 --- a/go.sum +++ b/go.sum @@ -1,48 +1,98 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a h1:ROdxmPITVHDxX22/qs2j1XXyJ/riQh8MB4UeWA4ZfVw= +git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/openzipkin/zipkin-go v0.1.3 h1:36hTtUTQR/vPX7YVJo2PYexSbHdAJiAkDrjuXw/YlYQ= +github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= +github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/common v0.0.0-20181218105931-67670fe90761 h1:z6tvbDJ5OLJ48FFmnksv04a78maSTRBUIhkdHYV5Y98= +github.com/prometheus/common v0.0.0-20181218105931-67670fe90761/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= +github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181217023233-e147a9138326 h1:iCzOf0xz39Tstp+Tu/WwyGjUXCk34QhQORRxBeXXTA4= +golang.org/x/net v0.0.0-20181217023233-e147a9138326/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= +google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb h1:dQshZyyJ5W/Xk8myF4GKBak1pZW6EywJuQ8+44EQhGA= +google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 25040cf06cf283c0b4f2c3f3bb5a325818f7643e Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 21 Nov 2018 10:09:54 -0800 Subject: [PATCH 102/212] Return an error if negative bucket bounds are provided --- stats/view/view.go | 9 +++++++++ stats/view/view_test.go | 30 ++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) diff --git a/stats/view/view.go b/stats/view/view.go index c2a08af67..02644e1f4 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -17,6 +17,7 @@ package view import ( "bytes" + "errors" "fmt" "reflect" "sort" @@ -69,6 +70,8 @@ func (v *View) same(other *View) bool { v.Measure.Name() == other.Measure.Name() } +var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") + // canonicalize canonicalizes v by setting explicit // defaults for Name and Description and sorting the TagKeys func (v *View) canonicalize() error { @@ -90,6 +93,12 @@ func (v *View) canonicalize() error { sort.Slice(v.TagKeys, func(i, j int) bool { return v.TagKeys[i].Name() < v.TagKeys[j].Name() }) + sort.Float64s(v.Aggregation.Buckets) + for _, b := range v.Aggregation.Buckets { + if b < 0 { + return ErrNegativeBucketBounds + } + } return nil } diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 445e56a1e..756319572 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -19,6 +19,8 @@ import ( "context" "testing" + "github.com/google/go-cmp/cmp" + "go.opencensus.io/exemplar" "go.opencensus.io/stats" @@ -444,3 +446,31 @@ func TestRegisterAfterMeasurement(t *testing.T) { t.Error("View should have data") } } + +func TestViewRegister_negativeBucketBounds(t *testing.T) { + m := stats.Int64("TestViewRegister_negativeBucketBounds", "", "") + v := &View{ + Measure: m, + Aggregation: Distribution(-1, 2), + } + err := Register(v) + if err != ErrNegativeBucketBounds { + t.Errorf("Expected ErrNegativeBucketBounds, got %v", err) + } +} + +func TestViewRegister_sortBuckets(t *testing.T) { + m := stats.Int64("TestViewRegister_sortBuckets", "", "") + v := &View{ + Measure: m, + Aggregation: Distribution(2, 1), + } + err := Register(v) + if err != nil { + t.Fatalf("Unexpected err %s", err) + } + want := []float64{1, 2} + if diff := cmp.Diff(v.Aggregation.Buckets, want); diff != "" { + t.Errorf("buckets differ -got +want: %s", diff) + } +} From 950a67f393d867cfbe91414063b69e511f42fefb Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Wed, 19 Dec 2018 18:06:23 -0800 Subject: [PATCH 103/212] Fix data race when registering views multiple times Fixed by moving view canonicalization into worker. --- stats/view/view_test.go | 12 ++++++++++++ stats/view/worker.go | 5 ----- stats/view/worker_commands.go | 6 ++++++ 3 files changed, 18 insertions(+), 5 deletions(-) diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 756319572..d50379fef 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -459,6 +459,18 @@ func TestViewRegister_negativeBucketBounds(t *testing.T) { } } +func TestViewRegister_zeroBucketBounds(t *testing.T) { + m := stats.Int64("TestViewRegister_negativeBucketBounds", "", "") + v := &View{ + Measure: m, + Aggregation: Distribution(0, 2), + } + err := Register(v) + if err != nil { + t.Errorf("Expected no error, got %v", err) + } +} + func TestViewRegister_sortBuckets(t *testing.T) { m := stats.Int64("TestViewRegister_sortBuckets", "", "") v := &View{ diff --git a/stats/view/worker.go b/stats/view/worker.go index 63b0ee3cc..0069e4bc1 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -64,11 +64,6 @@ func Find(name string) (v *View) { // Register begins collecting data for the given views. // Once a view is registered, it reports data to the registered exporters. func Register(views ...*View) error { - for _, v := range views { - if err := v.canonicalize(); err != nil { - return err - } - } req := ®isterViewReq{ views: views, err: make(chan error), diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index b38f26f42..f71ec1eb0 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -58,6 +58,12 @@ type registerViewReq struct { } func (cmd *registerViewReq) handleCommand(w *worker) { + for _, v := range cmd.views { + if err := v.canonicalize(); err != nil { + cmd.err <- err + return + } + } var errstr []string for _, view := range cmd.views { vi, err := w.tryRegisterView(view) From 6e8ca2ce486b96f131e9b5e2304d5e51403634f5 Mon Sep 17 00:00:00 2001 From: Vijay Samuel Date: Thu, 17 Jan 2019 14:25:07 -0800 Subject: [PATCH 104/212] Guard bundler from buffering too many messages in memory (#1011) --- exporter/jaeger/jaeger.go | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index d108cc039..9a433d9bc 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -69,6 +69,9 @@ type Options struct { // Process contains the information about the exporting process. Process Process + + //BufferMaxCount defines the total number of traces that can be buffered in memory + BufferMaxCount int } // NewExporter returns a trace.Exporter implementation that exports @@ -126,6 +129,14 @@ func NewExporter(o Options) (*Exporter, error) { onError(err) } }) + + // Set BufferedByteLimit with the total number of spans that are permissible to be held in memory. + // This needs to be done since the size of messages is always set to 1. Failing to set this would allow + // 1G messages to be held in memory since that is the default value of BufferedByteLimit. + if o.BufferMaxCount != 0 { + bundler.BufferedByteLimit = o.BufferMaxCount + } + e.bundler = bundler return e, nil } From 2f39cd45ab48b7dc7e5b0d7393bae371e201b1c4 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 17 Jan 2019 14:29:00 -0800 Subject: [PATCH 105/212] Update internal import. (#1014) --- go.sum | 1 + internal/internal.go | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/go.sum b/go.sum index 90a87b1df..baf0a1da7 100644 --- a/go.sum +++ b/go.sum @@ -67,6 +67,7 @@ golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 h1:MXtOG7w2ND9qNCUZSDBGll/SpVIq7ftozR9I8/JGBHY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/internal/internal.go b/internal/internal.go index 2d6baf177..9a638781c 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -18,7 +18,7 @@ import ( "fmt" "time" - "go.opencensus.io" + opencensus "go.opencensus.io" ) // UserAgent is the user agent to be added to the outgoing From 2b5032d79456124f42db6b7eb19ac6c155449dc2 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Fri, 14 Dec 2018 16:12:30 -0500 Subject: [PATCH 106/212] Add metric package containing Gauge support --- metric/doc.go | 19 ++ .../producer_test.go => examples_test.go} | 37 ++-- metric/gauge.go | 196 ++++++++++++++++++ metric/gauge_test.go | 183 ++++++++++++++++ metric/metricdata/unit.go | 2 + metric/metricexport/producer.go | 72 ------- metric/registry.go | 81 ++++++++ 7 files changed, 495 insertions(+), 95 deletions(-) create mode 100644 metric/doc.go rename metric/{metricexport/producer_test.go => examples_test.go} (52%) create mode 100644 metric/gauge.go create mode 100644 metric/gauge_test.go create mode 100644 metric/registry.go diff --git a/metric/doc.go b/metric/doc.go new file mode 100644 index 000000000..485ee8f58 --- /dev/null +++ b/metric/doc.go @@ -0,0 +1,19 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package metric support for gauge metrics. +// +// This is an EXPERIMENTAL package, and may change in arbitrary ways without +// notice. +package metric // import "go.opencensus.io/metric" diff --git a/metric/metricexport/producer_test.go b/metric/examples_test.go similarity index 52% rename from metric/metricexport/producer_test.go rename to metric/examples_test.go index 7aa653f63..b510ce8c1 100644 --- a/metric/metricexport/producer_test.go +++ b/metric/examples_test.go @@ -12,34 +12,25 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metricexport +package metric_test import ( + "net/http" + + "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" - "testing" ) -func TestRegistry_AddProducer(t *testing.T) { - r := NewRegistry() - m1 := &metricdata.Metric{ - Descriptor: metricdata.Descriptor{ - Name: "test", - Unit: metricdata.UnitDimensionless, - }, - } - p := &constProducer{m1} - r.AddProducer(p) - if got, want := len(r.ReadAll()), 1; got != want { - t.Fatal("Expected to read a single metric") - } - r.RemoveProducer(p) - if got, want := len(r.ReadAll()), 0; got != want { - t.Fatal("Expected to read no metrics") - } -} +func ExampleRegistry_AddInt64Gauge() { + r := metric.NewRegistry() + // TODO: allow exporting from a registry -type constProducer []*metricdata.Metric + g := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") -func (cp constProducer) Read() []*metricdata.Metric { - return cp + http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { + e := g.GetEntry(metricdata.NewLabelValue(request.Method)) + e.Add(1) + defer e.Add(-1) + // process request ... + }) } diff --git a/metric/gauge.go b/metric/gauge.go new file mode 100644 index 000000000..c64a9f074 --- /dev/null +++ b/metric/gauge.go @@ -0,0 +1,196 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "math" + "sync" + "sync/atomic" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/metric/metricdata" +) + +// gauge represents a quantity that can go up an down, for example queue depth +// or number of outstanding requests. +// +// gauge maintains a value for each combination of of label values passed to +// the Set or Add methods. +// +// gauge should not be used directly, use Float64Gauge or Int64Gauge. +type gauge struct { + vals sync.Map + desc metricdata.Descriptor + start time.Time + keys []string + isFloat bool +} + +type gaugeEntry interface { + read(t time.Time) metricdata.Point +} + +// Read returns the current values of the gauge as a metric for export. +func (g *gauge) read() *metricdata.Metric { + now := time.Now() + m := &metricdata.Metric{ + Descriptor: g.desc, + } + g.vals.Range(func(k, v interface{}) bool { + entry := v.(gaugeEntry) + key := k.(string) + labelVals := g.labelValues(key) + m.TimeSeries = append(m.TimeSeries, &metricdata.TimeSeries{ + StartTime: now, // Gauge value is instantaneous. + LabelValues: labelVals, + Points: []metricdata.Point{ + entry.read(now), + }, + }) + return true + }) + return m +} + +func (g *gauge) mapKey(labelVals []metricdata.LabelValue) string { + vb := &tagencoding.Values{} + for _, v := range labelVals { + b := make([]byte, 1, len(v.Value)+1) + if v.Present { + b[0] = 1 + b = append(b, []byte(v.Value)...) + } + vb.WriteValue(b) + } + return string(vb.Bytes()) +} + +func (g *gauge) labelValues(s string) []metricdata.LabelValue { + vals := make([]metricdata.LabelValue, 0, len(g.keys)) + vb := &tagencoding.Values{Buffer: []byte(s)} + for range g.keys { + v := vb.ReadValue() + if v[0] == 0 { + vals = append(vals, metricdata.LabelValue{}) + } else { + vals = append(vals, metricdata.NewLabelValue(string(v[1:]))) + } + } + return vals +} + +func (g *gauge) entryForValues(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) interface{} { + if len(labelVals) != len(g.keys) { + panic("must supply the same number of label values as keys used to construct this gauge") + } + mapKey := g.mapKey(labelVals) + if entry, ok := g.vals.Load(mapKey); ok { + return entry + } + entry, _ := g.vals.LoadOrStore(mapKey, newEntry()) + return entry +} + +// Float64Gauge represents a float64 value that can go up and down. +// +// Float64Gauge maintains a float64 value for each combination of of label values +// passed to the Set or Add methods. +type Float64Gauge struct { + g gauge +} + +// Float64Entry represents a single value of the gauge corresponding to a set +// of label values. +type Float64Entry struct { + val uint64 // needs to be uint64 for atomic access, interpret with math.Float64frombits +} + +func (e *Float64Entry) read(t time.Time) metricdata.Point { + v := math.Float64frombits(atomic.LoadUint64(&e.val)) + if v < 0 { + v = 0 + } + return metricdata.NewFloat64Point(t, v) +} + +// GetEntry returns a gauge entry where each key for this gauge has the value +// given. +// +// The number of label values supplied must be exactly the same as the number +// of keys supplied when this gauge was created. +func (g *Float64Gauge) GetEntry(labelVals ...metricdata.LabelValue) *Float64Entry { + return g.g.entryForValues(labelVals, func() gaugeEntry { + return &Float64Entry{} + }).(*Float64Entry) +} + +// Set sets the gauge entry value to val. +func (e *Float64Entry) Set(val float64) { + atomic.StoreUint64(&e.val, math.Float64bits(val)) +} + +// Add increments the gauge entry value by val. +func (e *Float64Entry) Add(val float64) { + var swapped bool + for !swapped { + oldVal := atomic.LoadUint64(&e.val) + newVal := math.Float64bits(math.Float64frombits(oldVal) + val) + swapped = atomic.CompareAndSwapUint64(&e.val, oldVal, newVal) + } +} + +// Int64Gauge represents a int64 gauge value that can go up and down. +// +// Int64Gauge maintains an int64 value for each combination of label values passed to the +// Set or Add methods. +type Int64Gauge struct { + g gauge +} + +// Int64GaugeEntry represents a single value of the gauge corresponding to a set +// of label values. +type Int64GaugeEntry struct { + val int64 +} + +func (e *Int64GaugeEntry) read(t time.Time) metricdata.Point { + v := atomic.LoadInt64(&e.val) + if v < 0 { + v = 0.0 + } + return metricdata.NewInt64Point(t, v) +} + +// GetEntry returns a gauge entry where each key for this gauge has the value +// given. +// +// The number of label values supplied must be exactly the same as the number +// of keys supplied when this gauge was created. +func (g *Int64Gauge) GetEntry(labelVals ...metricdata.LabelValue) *Int64GaugeEntry { + return g.g.entryForValues(labelVals, func() gaugeEntry { + return &Int64GaugeEntry{} + }).(*Int64GaugeEntry) +} + +// Set sets the value of the gauge entry to the provided value. +func (e *Int64GaugeEntry) Set(val int64) { + atomic.StoreInt64(&e.val, val) +} + +// Add increments the current gauge entry value by val, which may be negative. +func (e *Int64GaugeEntry) Add(val int64) { + atomic.AddInt64(&e.val, val) +} diff --git a/metric/gauge_test.go b/metric/gauge_test.go new file mode 100644 index 000000000..e5f8f2e9e --- /dev/null +++ b/metric/gauge_test.go @@ -0,0 +1,183 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "fmt" + "go.opencensus.io/metric/metricdata" + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" +) + +func TestGauge(t *testing.T) { + r := NewRegistry() + f := r.AddFloat64Gauge("TestGauge", "", "", "k1", "k2") + f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}).Set(5) + f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) + f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) + f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")).Add(1) + m := r.ReadAll() + want := []*metricdata.Metric{ + { + Descriptor: metricdata.Descriptor{ + Name: "TestGauge", + LabelKeys: []string{"k1", "k2"}, + }, + TimeSeries: []*metricdata.TimeSeries{ + { + LabelValues: []metricdata.LabelValue{ + {}, {}, + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 5), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("k1v1"), + {}, + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 2), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("k1v2"), + metricdata.NewLabelValue("k2v2"), + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 1), + }, + }, + }, + }, + } + canonicalize(m) + canonicalize(want) + if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { + t.Errorf("-got +want: %s", diff) + } +} + +func TestFloat64Entry_Add(t *testing.T) { + r := NewRegistry() + g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + g.GetEntry().Add(0) + ms := r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { + t.Errorf("value = %v, want %v", got, want) + } + g.GetEntry().Add(1) + ms = r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 1.0; got != want { + t.Errorf("value = %v, want %v", got, want) + } + g.GetEntry().Add(-1) + ms = r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { + r := NewRegistry() + g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + g.GetEntry().Add(-1.0) + ms := r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestInt64GaugeEntry_Add(t *testing.T) { + r := NewRegistry() + g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + g.GetEntry().Add(0) + ms := r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { + t.Errorf("value = %v, want %v", got, want) + } + g.GetEntry().Add(1) + ms = r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { + r := NewRegistry() + g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + g.GetEntry().Add(-1) + ms := r.ReadAll() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestMapKey(t *testing.T) { + cases := [][]metricdata.LabelValue{ + {}, + {metricdata.LabelValue{}}, + {metricdata.NewLabelValue("")}, + {metricdata.NewLabelValue("-")}, + {metricdata.NewLabelValue(",")}, + {metricdata.NewLabelValue("v1"), metricdata.NewLabelValue("v2")}, + {metricdata.NewLabelValue("v1"), metricdata.LabelValue{}}, + {metricdata.NewLabelValue("v1"), metricdata.LabelValue{}, metricdata.NewLabelValue(string([]byte{0}))}, + {metricdata.LabelValue{}, metricdata.LabelValue{}}, + } + for i, tc := range cases { + t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { + g := &gauge{ + keys: make([]string, len(tc)), + } + mk := g.mapKey(tc) + vals := g.labelValues(mk) + if diff := cmp.Diff(vals, tc); diff != "" { + t.Errorf("values differ after serialization -got +want: %s", diff) + } + }) + } +} + +func ignoreTimes(_, _ time.Time) bool { + return true +} + +func canonicalize(ms []*metricdata.Metric) { + for _, m := range ms { + sort.Slice(m.TimeSeries, func(i, j int) bool { + // sort time series by their label values + iLabels := m.TimeSeries[i].LabelValues + jLabels := m.TimeSeries[j].LabelValues + for k := 0; k < len(iLabels); k++ { + if !iLabels[k].Present { + if jLabels[k].Present { + return true + } + } else if !jLabels[k].Present { + return false + } else { + return iLabels[k].Value < jLabels[k].Value + } + } + panic("should have returned") + }) + } +} diff --git a/metric/metricdata/unit.go b/metric/metricdata/unit.go index 72887d2ee..b483a1371 100644 --- a/metric/metricdata/unit.go +++ b/metric/metricdata/unit.go @@ -18,6 +18,8 @@ package metricdata // Unified Code for Units of Measure: http://unitsofmeasure.org/ucum.html type Unit string +// Predefined units. To record against a unit not represented here, create your +// own Unit type constant from a string. const ( UnitDimensionless Unit = "1" UnitBytes Unit = "By" diff --git a/metric/metricexport/producer.go b/metric/metricexport/producer.go index 0b651f7b0..077b9fcf6 100644 --- a/metric/metricexport/producer.go +++ b/metric/metricexport/producer.go @@ -16,8 +16,6 @@ package metricexport import ( "go.opencensus.io/metric/metricdata" - "sync" - "sync/atomic" ) // Producer is a source of metrics. @@ -28,73 +26,3 @@ type Producer interface { // resource. Read() []*metricdata.Metric } - -// Registry maintains a set of metric producers for exporting. Most users will -// rely on the DefaultRegistry. -type Registry struct { - mu sync.RWMutex - state atomic.Value -} - -type registryState struct { - producers map[Producer]struct{} -} - -// NewRegistry creates a new Registry. -func NewRegistry() *Registry { - m := &Registry{} - m.state.Store(®istryState{ - producers: make(map[Producer]struct{}), - }) - return m -} - -// Read returns all the metrics from all the metric produces in this registry. -func (m *Registry) ReadAll() []*metricdata.Metric { - s := m.state.Load().(*registryState) - ms := make([]*metricdata.Metric, 0, len(s.producers)) - for p := range s.producers { - ms = append(ms, p.Read()...) - } - return ms -} - -// AddProducer adds a producer to this registry. -func (m *Registry) AddProducer(p Producer) { - m.mu.Lock() - defer m.mu.Unlock() - newState := ®istryState{ - make(map[Producer]struct{}), - } - state := m.state.Load().(*registryState) - for producer := range state.producers { - newState.producers[producer] = struct{}{} - } - newState.producers[p] = struct{}{} - m.state.Store(newState) -} - -// RemoveProducer removes the given producer from this registry. -func (m *Registry) RemoveProducer(p Producer) { - m.mu.Lock() - defer m.mu.Unlock() - newState := ®istryState{ - make(map[Producer]struct{}), - } - state := m.state.Load().(*registryState) - for producer := range state.producers { - newState.producers[producer] = struct{}{} - } - delete(newState.producers, p) - m.state.Store(newState) -} - -var defaultReg = NewRegistry() - -// DefaultRegistry returns the default, global metric registry for the current -// process. -// Most applications will rely on this registry but libraries should not assume -// the default registry is used. -func DefaultRegistry() *Registry { - return defaultReg -} diff --git a/metric/registry.go b/metric/registry.go new file mode 100644 index 000000000..ac39e421e --- /dev/null +++ b/metric/registry.go @@ -0,0 +1,81 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "go.opencensus.io/metric/metricdata" + "log" + "time" +) + +// Registry creates and manages a set of gauges. +// External synchronization is required if you want to add gauges to the same +// registry from multiple goroutines. +type Registry struct { + gauges map[string]*gauge +} + +// NewRegistry initializes a new Registry. +func NewRegistry() *Registry { + return &Registry{ + gauges: make(map[string]*gauge), + } +} + +// AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. +func (r *Registry) AddFloat64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) *Float64Gauge { + f := &Float64Gauge{ + g: gauge{ + isFloat: true, + }, + } + r.initGauge(&f.g, labelKeys, name, description, unit) + return f +} + +// AddInt64Gauge creates and adds a new int64-valued gauge to this registry. +func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) *Int64Gauge { + i := &Int64Gauge{} + r.initGauge(&i.g, labelKeys, name, description, unit) + return i +} + +func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) *gauge { + existing, ok := r.gauges[name] + if ok { + if existing.isFloat != g.isFloat { + log.Panicf("Gauge with name %s already exists with a different type", name) + } + } + g.keys = labelKeys + g.start = time.Now() + g.desc = metricdata.Descriptor{ + Name: name, + Description: description, + Unit: unit, + LabelKeys: labelKeys, + } + r.gauges[name] = g + return g +} + +// ReadAll reads all gauges in this registry and returns their values as metrics. +func (r *Registry) ReadAll() []*metricdata.Metric { + ms := make([]*metricdata.Metric, 0, len(r.gauges)) + for _, g := range r.gauges { + ms = append(ms, g.read()) + } + return ms +} From 6f50dd46d1afc6f1e8062f91b24800d0eb46d54b Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 24 Jan 2019 08:48:02 -0800 Subject: [PATCH 107/212] Bump up the version to 0.20.0 (#1016) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index 7faf9e821..a52dcd8c6 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.19.0" + return "0.20.0" } From 1d7c80bdb10d66ba5297d42d81b432f8c7efeedd Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 25 Jan 2019 09:36:21 -0800 Subject: [PATCH 108/212] fix build error. (#1019) * fix build error. * build using go 11. * update go.mod and go.sum * update thrift to 0.12.0 * update only thrift version in go.mod --- .travis.yml | 1 + go.mod | 2 +- go.sum | 5 ----- 3 files changed, 2 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index 73c8571c3..39186b745 100644 --- a/.travis.yml +++ b/.travis.yml @@ -18,6 +18,7 @@ before_script: script: - embedmd -d README.md # Ensure embedded code is up-to-date + - export GO111MODULE=on - go build ./... # Ensure dependency updates don't break build - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - go vet ./... diff --git a/go.mod b/go.mod index 3da37b230..b1e2981df 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.opencensus.io require ( cloud.google.com/go v0.34.0 // indirect - git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a + git.apache.org/thrift.git v0.12.0 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/ghodss/yaml v1.0.0 // indirect github.com/golang/mock v1.2.0 // indirect diff --git a/go.sum b/go.sum index baf0a1da7..0502a9902 100644 --- a/go.sum +++ b/go.sum @@ -1,10 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -git.apache.org/thrift.git v0.0.0-20180807212849-6e67faa92827/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999 h1:sihTnRgTOUSCQz0iS0pjZuFQy/z7GXCJgSBg3+rZKHw= -git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= -git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a h1:ROdxmPITVHDxX22/qs2j1XXyJ/riQh8MB4UeWA4ZfVw= -git.apache.org/thrift.git v0.0.0-20181218151757-9b75e4fe745a/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= From 4baeb639308e8502f1bd4d713236b7978ab8f125 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 25 Jan 2019 10:30:53 -0800 Subject: [PATCH 109/212] Add option to configure limits for attributes, annotations, message events and links. (#1015) --- trace/config.go | 38 +++++++++++++++++++++++ trace/config_test.go | 72 +++++++++++++++++++++++++++++++++++++++----- trace/trace.go | 8 +++-- 3 files changed, 108 insertions(+), 10 deletions(-) diff --git a/trace/config.go b/trace/config.go index 0816892ea..775f8274f 100644 --- a/trace/config.go +++ b/trace/config.go @@ -27,10 +27,36 @@ type Config struct { // IDGenerator is for internal use only. IDGenerator internal.IDGenerator + + // MaxAnnotationEventsPerSpan is max number of annotation events per span + MaxAnnotationEventsPerSpan int + + // MaxMessageEventsPerSpan is max number of message events per span + MaxMessageEventsPerSpan int + + // MaxAnnotationEventsPerSpan is max number of attributes per span + MaxAttributesPerSpan int + + // MaxLinksPerSpan is max number of links per span + MaxLinksPerSpan int } var configWriteMu sync.Mutex +const ( + // DefaultMaxAnnotationEventsPerSpan is default max number of annotation events per span + DefaultMaxAnnotationEventsPerSpan = 32 + + // DefaultMaxMessageEventsPerSpan is default max number of message events per span + DefaultMaxMessageEventsPerSpan = 128 + + // DefaultMaxAttributesPerSpan is default max number of attributes per span + DefaultMaxAttributesPerSpan = 32 + + // DefaultMaxLinksPerSpan is default max number of links per span + DefaultMaxLinksPerSpan = 32 +) + // ApplyConfig applies changes to the global tracing configuration. // // Fields not provided in the given config are going to be preserved. @@ -44,5 +70,17 @@ func ApplyConfig(cfg Config) { if cfg.IDGenerator != nil { c.IDGenerator = cfg.IDGenerator } + if cfg.MaxAnnotationEventsPerSpan > 0 { + c.MaxAnnotationEventsPerSpan = cfg.MaxAnnotationEventsPerSpan + } + if cfg.MaxMessageEventsPerSpan > 0 { + c.MaxMessageEventsPerSpan = cfg.MaxMessageEventsPerSpan + } + if cfg.MaxAttributesPerSpan > 0 { + c.MaxAttributesPerSpan = cfg.MaxAttributesPerSpan + } + if cfg.MaxLinksPerSpan > 0 { + c.MaxLinksPerSpan = cfg.MaxLinksPerSpan + } config.Store(&c) } diff --git a/trace/config_test.go b/trace/config_test.go index 8495d8137..547b817d0 100644 --- a/trace/config_test.go +++ b/trace/config_test.go @@ -19,15 +19,71 @@ import ( "testing" ) -func TestApplyZeroConfig(t *testing.T) { +func TestApplyConfig(t *testing.T) { + testCfgs := []Config{ + {}, + { + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 2, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 4, + }, + { + MaxAttributesPerSpan: -1, + MaxAnnotationEventsPerSpan: 3, + MaxMessageEventsPerSpan: -3, + MaxLinksPerSpan: 5, + }} cfg := config.Load().(*Config) - ApplyConfig(Config{}) - currentCfg := config.Load().(*Config) + wantCfgs := []Config{ + { + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + }, + { + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 2, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 4, + }, + { + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 3, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 5, + }} + + for i, newCfg := range testCfgs { + ApplyConfig(newCfg) + gotCfg := config.Load().(*Config) + wantCfg := wantCfgs[i] + + if got, want := reflect.ValueOf(gotCfg.DefaultSampler).Pointer(), reflect.ValueOf(wantCfg.DefaultSampler).Pointer(); got != want { + t.Fatalf("testId = %d config.DefaultSampler = %#v; want %#v", i, got, want) + } + if got, want := gotCfg.IDGenerator, wantCfg.IDGenerator; got != want { + t.Fatalf("testId = %d config.IDGenerator = %#v; want %#v", i, got, want) + } + if got, want := gotCfg.MaxAttributesPerSpan, wantCfg.MaxAttributesPerSpan; got != want { + t.Fatalf("testId = %d config.MaxAttributesPerSpan = %#v; want %#v", i, got, want) + } + if got, want := gotCfg.MaxLinksPerSpan, wantCfg.MaxLinksPerSpan; got != want { + t.Fatalf("testId = %d config.MaxLinksPerSpan = %#v; want %#v", i, got, want) + } + if got, want := gotCfg.MaxAnnotationEventsPerSpan, wantCfg.MaxAnnotationEventsPerSpan; got != want { + t.Fatalf("testId = %d config.MaxAnnotationEventsPerSpan = %#v; want %#v", i, got, want) + } + if got, want := gotCfg.MaxMessageEventsPerSpan, wantCfg.MaxMessageEventsPerSpan; got != want { + t.Fatalf("testId = %d config.MaxMessageEventsPerSpan = %#v; want %#v", i, got, want) + } - if got, want := reflect.ValueOf(currentCfg.DefaultSampler).Pointer(), reflect.ValueOf(cfg.DefaultSampler).Pointer(); got != want { - t.Fatalf("config.DefaultSampler = %#v; want %#v", got, want) - } - if got, want := currentCfg.IDGenerator, cfg.IDGenerator; got != want { - t.Fatalf("config.IDGenerator = %#v; want %#v", got, want) } } diff --git a/trace/trace.go b/trace/trace.go index 9e5e5f033..ab7f6f46a 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -468,8 +468,12 @@ func init() { gen.spanIDInc |= 1 config.Store(&Config{ - DefaultSampler: ProbabilitySampler(defaultSamplingProbability), - IDGenerator: gen, + DefaultSampler: ProbabilitySampler(defaultSamplingProbability), + IDGenerator: gen, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, }) } From 50686e2a1af8be72538901d5f9e77b38e10b52d7 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 25 Jan 2019 12:01:37 -0800 Subject: [PATCH 110/212] enforce attribute limit (#1020) --- go.mod | 1 + go.sum | 2 ++ trace/export.go | 7 ++++--- trace/lrumap.go | 37 +++++++++++++++++++++++++++++++++++++ trace/trace.go | 37 ++++++++++++++++++++++++++++--------- trace/trace_test.go | 31 +++++++++++++++++++++++++++++++ 6 files changed, 103 insertions(+), 12 deletions(-) create mode 100644 trace/lrumap.go diff --git a/go.mod b/go.mod index b1e2981df..0d7796924 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 github.com/grpc-ecosystem/grpc-gateway v1.6.2 // indirect + github.com/hashicorp/golang-lru v0.5.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/openzipkin/zipkin-go v0.1.3 github.com/prometheus/client_golang v0.9.2 diff --git a/go.sum b/go.sum index 0502a9902..3e6193e2b 100644 --- a/go.sum +++ b/go.sum @@ -18,6 +18,8 @@ github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5a github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= diff --git a/trace/export.go b/trace/export.go index 77a8c7357..07bf2d008 100644 --- a/trace/export.go +++ b/trace/export.go @@ -81,9 +81,10 @@ type SpanData struct { // from StartTime by the duration of the span. EndTime time.Time // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - Annotations []Annotation - MessageEvents []MessageEvent + Attributes map[string]interface{} + DroppedAttributeCount int + Annotations []Annotation + MessageEvents []MessageEvent Status Links []Link HasRemoteParent bool diff --git a/trace/lrumap.go b/trace/lrumap.go new file mode 100644 index 000000000..3f80a3368 --- /dev/null +++ b/trace/lrumap.go @@ -0,0 +1,37 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "github.com/hashicorp/golang-lru/simplelru" +) + +type lruMap struct { + simpleLruMap *simplelru.LRU + droppedCount int +} + +func newLruMap(size int) *lruMap { + lm := &lruMap{} + lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + return lm +} + +func (lm *lruMap) add(key, value interface{}) { + evicted := lm.simpleLruMap.Add(key, value) + if evicted { + lm.droppedCount++ + } +} diff --git a/trace/trace.go b/trace/trace.go index ab7f6f46a..0c32420ee 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -42,6 +42,11 @@ type Span struct { data *SpanData mu sync.Mutex // protects the contents of *data (but not the pointer value.) spanContext SpanContext + + // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry + // is removed to create room for a new entry. + lruAttributes *lruMap + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -226,6 +231,7 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa Name: name, HasRemoteParent: remoteParent, } + span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) if hasParent { span.data.ParentSpanID = parent.SpanID } @@ -276,11 +282,9 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.data.Attributes != nil { - sd.Attributes = make(map[string]interface{}) - for k, v := range s.data.Attributes { - sd.Attributes[k] = v - } + if s.lruAttributes.simpleLruMap.Len() > 0 { + sd.Attributes = s.lruAttributesToAttributeMap() + sd.DroppedAttributeCount = s.lruAttributes.droppedCount } s.mu.Unlock() return &sd @@ -314,6 +318,24 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } +func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { + attributes := make(map[string]interface{}) + for _, key := range s.lruAttributes.simpleLruMap.Keys() { + value, ok := s.lruAttributes.simpleLruMap.Get(key) + if ok { + keyStr := key.(string) + attributes[keyStr] = value + } + } + return attributes +} + +func (s *Span) copyToCappedAttributes(attributes []Attribute) { + for _, a := range attributes { + s.lruAttributes.add(a.key, a.value) + } +} + // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. @@ -322,10 +344,7 @@ func (s *Span) AddAttributes(attributes ...Attribute) { return } s.mu.Lock() - if s.data.Attributes == nil { - s.data.Attributes = make(map[string]interface{}) - } - copyAttributes(s.data.Attributes, attributes) + s.copyToCappedAttributes(attributes) s.mu.Unlock() } diff --git a/trace/trace_test.go b/trace/trace_test.go index 0c25cc7f3..4e45abda5 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -386,6 +386,37 @@ func TestSetSpanAttributes(t *testing.T) { } } +func TestSetSpanAttributesOverLimit(t *testing.T) { + cfg := Config{MaxAttributesPerSpan: 2} + ApplyConfig(cfg) + + span := startSpan(StartOptions{}) + span.AddAttributes(StringAttribute("key1", "value1")) + span.AddAttributes(StringAttribute("key2", "value2")) + span.AddAttributes(StringAttribute("key1", "value3")) // Replace key1. + span.AddAttributes(StringAttribute("key4", "value4")) // Remove key2 and add key4 + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + want := &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + Attributes: map[string]interface{}{"key1": "value3", "key4": "value4"}, + HasRemoteParent: true, + DroppedAttributeCount: 1, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("exporting span: got %#v want %#v", got, want) + } +} + func TestAnnotations(t *testing.T) { span := startSpan(StartOptions{}) span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) From 3abc75f40a2357dfd55e20f1631a181c31cacb04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=C3=A1rk=20S=C3=A1gi-Kaz=C3=A1r?= Date: Fri, 25 Jan 2019 23:38:41 +0100 Subject: [PATCH 111/212] Allow setting the http route from HTTP handlers (#1017) --- plugin/ochttp/route.go | 10 ++++++++++ plugin/ochttp/route_test.go | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 47 insertions(+) diff --git a/plugin/ochttp/route.go b/plugin/ochttp/route.go index dbe22d586..5e6a34307 100644 --- a/plugin/ochttp/route.go +++ b/plugin/ochttp/route.go @@ -15,11 +15,21 @@ package ochttp import ( + "context" "net/http" "go.opencensus.io/tag" ) +// SetRoute sets the http_server_route tag to the given value. +// It's useful when an HTTP framework does not support the http.Handler interface +// and using WithRouteTag is not an option, but provides a way to hook into the request flow. +func SetRoute(ctx context.Context, route string) { + if a, ok := ctx.Value(addedTagsKey{}).(*addedTags); ok { + a.t = append(a.t, tag.Upsert(KeyServerRoute, route)) + } +} + // WithRouteTag returns an http.Handler that records stats with the // http_server_route tag set to the given value. func WithRouteTag(handler http.Handler, route string) http.Handler { diff --git a/plugin/ochttp/route_test.go b/plugin/ochttp/route_test.go index a9793eb0b..2efe23fc5 100644 --- a/plugin/ochttp/route_test.go +++ b/plugin/ochttp/route_test.go @@ -61,6 +61,43 @@ func TestWithRouteTag(t *testing.T) { } } +func TestSetRoute(t *testing.T) { + v := &view.View{ + Name: "request_total", + Measure: ochttp.ServerLatency, + Aggregation: view.Count(), + TagKeys: []tag.Key{ochttp.KeyServerRoute}, + } + view.Register(v) + var e testStatsExporter + view.RegisterExporter(&e) + defer view.UnregisterExporter(&e) + + mux := http.NewServeMux() + handler := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + ochttp.SetRoute(r.Context(), "/a/") + w.WriteHeader(204) + }) + mux.Handle("/a/", handler) + plugin := ochttp.Handler{Handler: mux} + req, _ := http.NewRequest("GET", "/a/b/c", nil) + rr := httptest.NewRecorder() + plugin.ServeHTTP(rr, req) + if got, want := rr.Code, 204; got != want { + t.Fatalf("Unexpected response, got %d; want %d", got, want) + } + + view.Unregister(v) // trigger exporting + + got := e.rowsForView("request_total") + want := []*view.Row{ + {Data: &view.CountData{Value: 1}, Tags: []tag.Tag{{Key: ochttp.KeyServerRoute, Value: "/a/"}}}, + } + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("Unexpected view data exported, -got, +want: %s", diff) + } +} + type testStatsExporter struct { vd []*view.Data } From ad83cdacffdbd053b1629e6174360e60486d8cac Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 28 Jan 2019 15:21:43 -0800 Subject: [PATCH 112/212] Enforce annotation limit. (#1021) * Enforce annotation limit. * Add test for annotations over limit. * fix gofmt. --- trace/evictedqueue.go | 38 ++++++++++++++++++++++ trace/evictedqueue_test.go | 65 ++++++++++++++++++++++++++++++++++++++ trace/export.go | 13 ++++---- trace/trace.go | 21 ++++++++++-- trace/trace_test.go | 39 +++++++++++++++++++++++ 5 files changed, 168 insertions(+), 8 deletions(-) create mode 100644 trace/evictedqueue.go create mode 100644 trace/evictedqueue_test.go diff --git a/trace/evictedqueue.go b/trace/evictedqueue.go new file mode 100644 index 000000000..ffc264f23 --- /dev/null +++ b/trace/evictedqueue.go @@ -0,0 +1,38 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) *evictedQueue { + eq := &evictedQueue{ + capacity: capacity, + queue: make([]interface{}, 0), + } + + return eq +} + +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + eq.queue = eq.queue[1:] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/trace/evictedqueue_test.go b/trace/evictedqueue_test.go new file mode 100644 index 000000000..0e10fa5db --- /dev/null +++ b/trace/evictedqueue_test.go @@ -0,0 +1,65 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace + +import ( + "reflect" + "testing" +) + +func init() { +} + +func TestAdd(t *testing.T) { + q := newEvictedQueue(3) + q.add("value1") + q.add("value2") + if wantLen, gotLen := 2, len(q.queue); wantLen != gotLen { + t.Errorf("got queue length %d want %d", gotLen, wantLen) + } +} + +func (eq *evictedQueue) queueToArray() []string { + arr := make([]string, 0) + for _, value := range eq.queue { + arr = append(arr, value.(string)) + } + return arr +} + +func TestDropCount(t *testing.T) { + q := newEvictedQueue(3) + q.add("value1") + q.add("value2") + q.add("value3") + q.add("value1") + q.add("value4") + if wantLen, gotLen := 3, len(q.queue); wantLen != gotLen { + t.Errorf("got queue length %d want %d", gotLen, wantLen) + } + if wantDropCount, gotDropCount := 2, q.droppedCount; wantDropCount != gotDropCount { + t.Errorf("got drop count %d want %d", gotDropCount, wantDropCount) + } + wantArr := []string{"value3", "value1", "value4"} + gotArr := q.queueToArray() + + if wantLen, gotLen := len(wantArr), len(gotArr); gotLen != wantLen { + t.Errorf("got array len %d want %d", gotLen, wantLen) + } + + if !reflect.DeepEqual(gotArr, wantArr) { + t.Errorf("got array = %#v; want %#v", gotArr, wantArr) + } +} diff --git a/trace/export.go b/trace/export.go index 07bf2d008..8fa02e260 100644 --- a/trace/export.go +++ b/trace/export.go @@ -81,11 +81,12 @@ type SpanData struct { // from StartTime by the duration of the span. EndTime time.Time // The values of Attributes each have type string, bool, or int64. - Attributes map[string]interface{} - DroppedAttributeCount int - Annotations []Annotation - MessageEvents []MessageEvent + Attributes map[string]interface{} + Annotations []Annotation + MessageEvents []MessageEvent Status - Links []Link - HasRemoteParent bool + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int } diff --git a/trace/trace.go b/trace/trace.go index 0c32420ee..610ae88d0 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -47,6 +47,9 @@ type Span struct { // is removed to create room for a new entry. lruAttributes *lruMap + // annotations are stored in FIFO queue capped by configured limit. + annotations *evictedQueue + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -232,6 +235,8 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa HasRemoteParent: remoteParent, } span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) + span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + if hasParent { span.data.ParentSpanID = parent.SpanID } @@ -286,6 +291,10 @@ func (s *Span) makeSpanData() *SpanData { sd.Attributes = s.lruAttributesToAttributeMap() sd.DroppedAttributeCount = s.lruAttributes.droppedCount } + if len(s.annotations.queue) > 0 { + sd.Annotations = s.interfaceArrayToAnnotationArray() + sd.DroppedAnnotationCount = s.annotations.droppedCount + } s.mu.Unlock() return &sd } @@ -318,6 +327,14 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } +func (s *Span) interfaceArrayToAnnotationArray() []Annotation { + annotationArr := make([]Annotation, 0) + for _, value := range s.annotations.queue { + annotationArr = append(annotationArr, value.(Annotation)) + } + return annotationArr +} + func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}) for _, key := range s.lruAttributes.simpleLruMap.Keys() { @@ -364,7 +381,7 @@ func (s *Span) lazyPrintfInternal(attributes []Attribute, format string, a ...in m = make(map[string]interface{}) copyAttributes(m, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: msg, Attributes: m, @@ -380,7 +397,7 @@ func (s *Span) printStringInternal(attributes []Attribute, str string) { a = make(map[string]interface{}) copyAttributes(a, attributes) } - s.data.Annotations = append(s.data.Annotations, Annotation{ + s.annotations.add(Annotation{ Time: now, Message: str, Attributes: a, diff --git a/trace/trace_test.go b/trace/trace_test.go index 4e45abda5..ad8b1d1d4 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -451,6 +451,45 @@ func TestAnnotations(t *testing.T) { } } +func TestAnnotationsOverLimit(t *testing.T) { + cfg := Config{MaxAnnotationEventsPerSpan: 2} + ApplyConfig(cfg) + span := startSpan(StartOptions{}) + span.Annotatef([]Attribute{StringAttribute("key4", "value4")}, "%d", 1) + span.Annotate([]Attribute{StringAttribute("key3", "value3")}, "Annotate oldest") + span.Annotatef([]Attribute{StringAttribute("key1", "value1")}, "%f", 1.5) + span.Annotate([]Attribute{StringAttribute("key2", "value2")}, "Annotate") + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + for i := range got.Annotations { + if !checkTime(&got.Annotations[i].Time) { + t.Error("exporting span: expected nonzero Annotation Time") + } + } + + want := &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + Annotations: []Annotation{ + {Message: "1.500000", Attributes: map[string]interface{}{"key1": "value1"}}, + {Message: "Annotate", Attributes: map[string]interface{}{"key2": "value2"}}, + }, + DroppedAnnotationCount: 2, + HasRemoteParent: true, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("exporting span: got %#v want %#v", got, want) + } +} + func TestMessageEvents(t *testing.T) { span := startSpan(StartOptions{}) span.AddMessageReceiveEvent(3, 400, 300) From 3cbb6ce43853260092b2858e37d5b692b57b432b Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 28 Jan 2019 16:46:58 -0800 Subject: [PATCH 113/212] Enforce message event and link limits. (#1022) --- trace/export.go | 10 +++--- trace/trace.go | 38 +++++++++++++++++++-- trace/trace_test.go | 82 +++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 123 insertions(+), 7 deletions(-) diff --git a/trace/export.go b/trace/export.go index 8fa02e260..05832a97f 100644 --- a/trace/export.go +++ b/trace/export.go @@ -85,8 +85,10 @@ type SpanData struct { Annotations []Annotation MessageEvents []MessageEvent Status - Links []Link - HasRemoteParent bool - DroppedAttributeCount int - DroppedAnnotationCount int + Links []Link + HasRemoteParent bool + DroppedAttributeCount int + DroppedAnnotationCount int + DroppedMessageEventCount int + DroppedLinkCount int } diff --git a/trace/trace.go b/trace/trace.go index 610ae88d0..f156a208b 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -50,6 +50,12 @@ type Span struct { // annotations are stored in FIFO queue capped by configured limit. annotations *evictedQueue + // messageEvents are stored in FIFO queue capped by configured limit. + messageEvents *evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links *evictedQueue + // spanStore is the spanStore this span belongs to, if any, otherwise it is nil. *spanStore endOnce sync.Once @@ -236,6 +242,8 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa } span.lruAttributes = newLruMap(cfg.MaxAttributesPerSpan) span.annotations = newEvictedQueue(cfg.MaxAnnotationEventsPerSpan) + span.messageEvents = newEvictedQueue(cfg.MaxMessageEventsPerSpan) + span.links = newEvictedQueue(cfg.MaxLinksPerSpan) if hasParent { span.data.ParentSpanID = parent.SpanID @@ -295,6 +303,14 @@ func (s *Span) makeSpanData() *SpanData { sd.Annotations = s.interfaceArrayToAnnotationArray() sd.DroppedAnnotationCount = s.annotations.droppedCount } + if len(s.messageEvents.queue) > 0 { + sd.MessageEvents = s.interfaceArrayToMessageEventArray() + sd.DroppedMessageEventCount = s.messageEvents.droppedCount + } + if len(s.links.queue) > 0 { + sd.Links = s.interfaceArrayToLinksArray() + sd.DroppedLinkCount = s.links.droppedCount + } s.mu.Unlock() return &sd } @@ -327,6 +343,22 @@ func (s *Span) SetStatus(status Status) { s.mu.Unlock() } +func (s *Span) interfaceArrayToLinksArray() []Link { + linksArr := make([]Link, 0) + for _, value := range s.links.queue { + linksArr = append(linksArr, value.(Link)) + } + return linksArr +} + +func (s *Span) interfaceArrayToMessageEventArray() []MessageEvent { + messageEventArr := make([]MessageEvent, 0) + for _, value := range s.messageEvents.queue { + messageEventArr = append(messageEventArr, value.(MessageEvent)) + } + return messageEventArr +} + func (s *Span) interfaceArrayToAnnotationArray() []Annotation { annotationArr := make([]Annotation, 0) for _, value := range s.annotations.queue { @@ -434,7 +466,7 @@ func (s *Span) AddMessageSendEvent(messageID, uncompressedByteSize, compressedBy } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeSent, MessageID: messageID, @@ -456,7 +488,7 @@ func (s *Span) AddMessageReceiveEvent(messageID, uncompressedByteSize, compresse } now := time.Now() s.mu.Lock() - s.data.MessageEvents = append(s.data.MessageEvents, MessageEvent{ + s.messageEvents.add(MessageEvent{ Time: now, EventType: MessageEventTypeRecv, MessageID: messageID, @@ -472,7 +504,7 @@ func (s *Span) AddLink(l Link) { return } s.mu.Lock() - s.data.Links = append(s.data.Links, l) + s.links.add(l) s.mu.Unlock() } diff --git a/trace/trace_test.go b/trace/trace_test.go index ad8b1d1d4..91725fc53 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -524,6 +524,45 @@ func TestMessageEvents(t *testing.T) { } } +func TestMessageEventsOverLimit(t *testing.T) { + cfg := Config{MaxMessageEventsPerSpan: 2} + ApplyConfig(cfg) + span := startSpan(StartOptions{}) + span.AddMessageReceiveEvent(5, 300, 120) + span.AddMessageSendEvent(4, 100, 50) + span.AddMessageReceiveEvent(3, 400, 300) + span.AddMessageSendEvent(1, 200, 100) + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + for i := range got.MessageEvents { + if !checkTime(&got.MessageEvents[i].Time) { + t.Error("exporting span: expected nonzero MessageEvent Time") + } + } + + want := &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + MessageEvents: []MessageEvent{ + {EventType: 2, MessageID: 0x3, UncompressedByteSize: 0x190, CompressedByteSize: 0x12c}, + {EventType: 1, MessageID: 0x1, UncompressedByteSize: 0xc8, CompressedByteSize: 0x64}, + }, + DroppedMessageEventCount: 2, + HasRemoteParent: true, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("exporting span: got %#v want %#v", got, want) + } +} + func TestSetSpanName(t *testing.T) { want := "SpanName-1" span := startSpan(StartOptions{}) @@ -633,6 +672,49 @@ func TestAddLink(t *testing.T) { } } +func TestAddLinkOverLimit(t *testing.T) { + cfg := Config{MaxLinksPerSpan: 1} + ApplyConfig(cfg) + span := startSpan(StartOptions{}) + span.AddLink(Link{ + TraceID: tid, + SpanID: sid, + Type: LinkTypeParent, + Attributes: map[string]interface{}{"key4": "value4"}, + }) + span.AddLink(Link{ + TraceID: tid, + SpanID: sid, + Type: LinkTypeParent, + Attributes: map[string]interface{}{"key5": "value5"}, + }) + got, err := endSpan(span) + if err != nil { + t.Fatal(err) + } + + want := &SpanData{ + SpanContext: SpanContext{ + TraceID: tid, + SpanID: SpanID{}, + TraceOptions: 0x1, + }, + ParentSpanID: sid, + Name: "span0", + Links: []Link{{ + TraceID: tid, + SpanID: sid, + Type: 2, + Attributes: map[string]interface{}{"key5": "value5"}, + }}, + DroppedLinkCount: 1, + HasRemoteParent: true, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("exporting span: got %#v want %#v", got, want) + } +} + func TestUnregisterExporter(t *testing.T) { var te testExporter RegisterExporter(&te) From 7a32882185281dc6ea972e9b80d6a58448359977 Mon Sep 17 00:00:00 2001 From: rghetia Date: Sat, 2 Feb 2019 17:24:15 -0800 Subject: [PATCH 114/212] Add support for child span count. (#1023) --- trace/export.go | 3 +++ trace/trace.go | 10 ++++++++++ trace/trace_test.go | 31 +++++++++++++++++++++++++++++++ 3 files changed, 44 insertions(+) diff --git a/trace/export.go b/trace/export.go index 05832a97f..e0d9a4b99 100644 --- a/trace/export.go +++ b/trace/export.go @@ -91,4 +91,7 @@ type SpanData struct { DroppedAnnotationCount int DroppedMessageEventCount int DroppedLinkCount int + + // ChildSpanCount holds the number of child span created for this span. + ChildSpanCount int } diff --git a/trace/trace.go b/trace/trace.go index f156a208b..38ead7bf0 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -170,6 +170,7 @@ func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Cont var opts StartOptions var parent SpanContext if p := FromContext(ctx); p != nil { + p.addChild() parent = p.spanContext } for _, op := range o { @@ -385,6 +386,15 @@ func (s *Span) copyToCappedAttributes(attributes []Attribute) { } } +func (s *Span) addChild() { + if !s.IsRecordingEvents() { + return + } + s.mu.Lock() + s.data.ChildSpanCount++ + s.mu.Unlock() +} + // AddAttributes sets attributes in the span. // // Existing attributes whose keys appear in the attributes parameter are overwritten. diff --git a/trace/trace_test.go b/trace/trace_test.go index 91725fc53..c2151367f 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -829,6 +829,37 @@ func TestStartSpanAfterEnd(t *testing.T) { } } +func TestChildSpanCount(t *testing.T) { + spans := make(exporter) + RegisterExporter(&spans) + defer UnregisterExporter(&spans) + ctx, span0 := StartSpan(context.Background(), "parent", WithSampler(AlwaysSample())) + ctx1, span1 := StartSpan(ctx, "span-1", WithSampler(AlwaysSample())) + _, span2 := StartSpan(ctx1, "span-2", WithSampler(AlwaysSample())) + span2.End() + span1.End() + + _, span3 := StartSpan(ctx, "span-3", WithSampler(AlwaysSample())) + span3.End() + span0.End() + UnregisterExporter(&spans) + if got, want := len(spans), 4; got != want { + t.Fatalf("len(%#v) = %d; want %d", spans, got, want) + } + if got, want := spans["span-3"].ChildSpanCount, 0; got != want { + t.Errorf("span-3.ChildSpanCount=%q; want %q", got, want) + } + if got, want := spans["span-2"].ChildSpanCount, 0; got != want { + t.Errorf("span-2.ChildSpanCount=%q; want %q", got, want) + } + if got, want := spans["span-1"].ChildSpanCount, 1; got != want { + t.Errorf("span-1.ChildSpanCount=%q; want %q", got, want) + } + if got, want := spans["parent"].ChildSpanCount, 2; got != want { + t.Errorf("parent.ChildSpanCount=%q; want %q", got, want) + } +} + func TestNilSpanEnd(t *testing.T) { var span *Span span.End() From ed9f155754b50f84f15997a4550ebd54667078e7 Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 5 Feb 2019 14:37:37 -0800 Subject: [PATCH 115/212] revert (#1000) and update prometheus version. (#1026) * revert (#1000) and updated prometheus version. fixes #1025 * keep the go.sum as is due to go 11 hash problem. --- exporter/prometheus/prometheus_test.go | 33 +++++++++++++------------- go.mod | 11 ++++----- 2 files changed, 20 insertions(+), 24 deletions(-) diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 80157145e..67b05f020 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -17,7 +17,6 @@ package prometheus import ( "context" "fmt" - "github.com/google/go-cmp/cmp" "io/ioutil" "net/http" "net/http/httptest" @@ -295,16 +294,16 @@ func TestCumulativenessFromHistograms(t *testing.T) { // 100: [] | 0 + prev(i) = 0 + 4 = 4 // 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7 wantLines := []string{ - `cash_register_bucket{le="1.0"} 1.0`, - `cash_register_bucket{le="5.0"} 2.0`, - `cash_register_bucket{le="10.0"} 3.0`, - `cash_register_bucket{le="20.0"} 4.0`, - `cash_register_bucket{le="50.0"} 4.0`, - `cash_register_bucket{le="100.0"} 4.0`, - `cash_register_bucket{le="250.0"} 7.0`, - `cash_register_bucket{le="+Inf"} 7.0`, + `cash_register_bucket{le="1"} 1`, + `cash_register_bucket{le="5"} 2`, + `cash_register_bucket{le="10"} 3`, + `cash_register_bucket{le="20"} 4`, + `cash_register_bucket{le="50"} 4`, + `cash_register_bucket{le="100"} 4`, + `cash_register_bucket{le="250"} 7`, + `cash_register_bucket{le="+Inf"} 7`, `cash_register_sum 654.0799999999999`, // Summation of the input values - `cash_register_count 7.0`, + `cash_register_count 7`, } ctx := context.Background() @@ -422,16 +421,16 @@ func TestConstLabelsIncluded(t *testing.T) { want := `# HELP tests_bar bar # TYPE tests_bar counter -tests_bar{method="issue961",service="spanner"} 1.0 +tests_bar{method="issue961",service="spanner"} 1 # HELP tests_baz baz # TYPE tests_baz counter -tests_baz{method="issue961",service="spanner"} 1.0 +tests_baz{method="issue961",service="spanner"} 1 # HELP tests_foo foo # TYPE tests_foo counter -tests_foo{method="issue961",service="spanner"} 1.0 +tests_foo{method="issue961",service="spanner"} 1 ` - if diff := cmp.Diff(output, want); diff != "" { - t.Fatalf("output differed from expected -got +want: %s", diff) + if output != want { + t.Fatal("output differed from expected") } } @@ -498,8 +497,8 @@ func TestViewMeasureWithoutTag(t *testing.T) { } want := `# HELP tests_foo foo # TYPE tests_foo counter -tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1.0 -tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1.0 +tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1 +tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1 ` if output != want { t.Fatalf("output differed from expected output: %s want: %s", output, want) diff --git a/go.mod b/go.mod index 0d7796924..cad6a096a 100644 --- a/go.mod +++ b/go.mod @@ -2,7 +2,7 @@ module go.opencensus.io require ( cloud.google.com/go v0.34.0 // indirect - git.apache.org/thrift.git v0.12.0 + git.apache.org/thrift.git v0.12.0 github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 github.com/ghodss/yaml v1.0.0 // indirect github.com/golang/mock v1.2.0 // indirect @@ -12,14 +12,11 @@ require ( github.com/hashicorp/golang-lru v0.5.0 github.com/matttproud/golang_protobuf_extensions v1.0.1 github.com/openzipkin/zipkin-go v0.1.3 - github.com/prometheus/client_golang v0.9.2 - github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 - github.com/prometheus/common v0.0.0-20181218105931-67670fe90761 - github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a + github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 // indirect - golang.org/x/net v0.0.0-20181217023233-e147a9138326 + golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect - golang.org/x/sync v0.0.0-20181108010431-42b317875d0f + golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 golang.org/x/text v0.3.0 golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e // indirect From 57c09932883846047fd542903575671cb6b75070 Mon Sep 17 00:00:00 2001 From: Andrew Wilkins Date: Fri, 15 Feb 2019 02:11:55 +0800 Subject: [PATCH 116/212] RFC: add SpanContext.LocalRootSpanID (#1029) * RFC: add SpanContext.LocalRootSpanID * Move LocalRootSpanID to Span/SpanData SpanContext should only hold things that are propagated between processes, which is not the case for this field. * Fix tests --- trace/export.go | 9 +++++---- trace/trace.go | 21 +++++++++++++++------ trace/trace_test.go | 30 ++++++++++++++++++++++++++++++ 3 files changed, 50 insertions(+), 10 deletions(-) diff --git a/trace/export.go b/trace/export.go index e0d9a4b99..6fc1f4c18 100644 --- a/trace/export.go +++ b/trace/export.go @@ -73,10 +73,11 @@ func UnregisterExporter(e Exporter) { // SpanData contains all the information collected by a Span. type SpanData struct { SpanContext - ParentSpanID SpanID - SpanKind int - Name string - StartTime time.Time + ParentSpanID SpanID + LocalRootSpanID SpanID + SpanKind int + Name string + StartTime time.Time // The wall clock time of EndTime will be adjusted to always be offset // from StartTime by the duration of the span. EndTime time.Time diff --git a/trace/trace.go b/trace/trace.go index 38ead7bf0..549e6fb11 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -39,9 +39,10 @@ type Span struct { // It will be non-nil if we are exporting the span or recording events for it. // Otherwise, data is nil, and the Span is simply a carrier for the // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext + localRootSpanID SpanID // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry // is removed to create room for a new entry. @@ -169,14 +170,16 @@ func WithSampler(sampler Sampler) StartOption { func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext + var localRoot SpanID if p := FromContext(ctx); p != nil { p.addChild() parent = p.spanContext + localRoot = p.localRootSpanID } for _, op := range o { op(&opts) } - span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) + span := startSpanInternal(name, parent != SpanContext{}, parent, localRoot, false, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end @@ -195,15 +198,16 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont for _, op := range o { op(&opts) } - span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) + span := startSpanInternal(name, parent != SpanContext{}, parent, SpanID{}, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end return NewContext(ctx, span), span } -func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { +func startSpanInternal(name string, hasParent bool, parent SpanContext, localRoot SpanID, remoteParent bool, o StartOptions) *Span { span := &Span{} span.spanContext = parent + span.localRootSpanID = localRoot cfg := config.Load().(*Config) @@ -213,6 +217,10 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler + if localRoot == (SpanID{}) { + span.localRootSpanID = span.spanContext.SpanID + } + if !hasParent || remoteParent || o.Sampler != nil { // If this span is the child of a local span and no Sampler is set in the // options, keep the parent's TraceOptions. @@ -236,6 +244,7 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, remotePa span.data = &SpanData{ SpanContext: span.spanContext, + LocalRootSpanID: span.localRootSpanID, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, diff --git a/trace/trace_test.go b/trace/trace_test.go index c2151367f..c8f624d3b 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -230,6 +230,32 @@ func TestStartSpanWithRemoteParent(t *testing.T) { } } +func TestLocalRootSpanID(t *testing.T) { + ctx, span1 := StartSpan(context.Background(), "span1") + if span1.localRootSpanID == (SpanID{}) { + t.Errorf("exporting root span: expected nonzero localRootSpanID") + } + + _, span2 := StartSpan(ctx, "span2") + if err := checkChild(span1.spanContext, span2); err != nil { + t.Error(err) + } + if got, want := span2.localRootSpanID, span1.localRootSpanID; got != want { + t.Errorf("span2.localRootSpanID=%q; want %q (span1.localRootSpanID)", got, want) + } + + _, span3 := StartSpanWithRemoteParent(context.Background(), "span3", span2.SpanContext()) + if err := checkChild(span3.spanContext, span2); err != nil { + t.Error(err) + } + if span3.localRootSpanID == (SpanID{}) { + t.Errorf("exporting span with remote parent: expected nonzero localRootSpanID") + } + if got, want := span3.localRootSpanID, span2.localRootSpanID; got == want { + t.Errorf("span3.localRootSpanID=%q; expected different value to span2.localRootSpanID, got same", got) + } +} + // startSpan returns a context with a new Span that is recording events and will be exported. func startSpan(o StartOptions) *Span { _, span := StartSpanWithRemoteParent(context.Background(), "span0", @@ -274,7 +300,11 @@ func endSpan(span *Span) (*SpanData, error) { if got.SpanContext.SpanID == (SpanID{}) { return nil, fmt.Errorf("exporting span: expected nonzero SpanID") } + if got.LocalRootSpanID == (SpanID{}) { + return nil, fmt.Errorf("exporting span: expected nonzero LocalRootSpanID") + } got.SpanContext.SpanID = SpanID{} + got.LocalRootSpanID = SpanID{} if !checkTime(&got.StartTime) { return nil, fmt.Errorf("exporting span: expected nonzero StartTime") } From bf352733d4dffdd0e650c71cabc694aca654be9a Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 20 Feb 2019 11:47:58 -0800 Subject: [PATCH 117/212] Revert "RFC: add SpanContext.LocalRootSpanID (#1029)" (#1031) This reverts commit 57c09932883846047fd542903575671cb6b75070. It was merged without updating the spec and resolving https://github.com/census-instrumentation/opencensus-specs/issues/229 --- trace/export.go | 9 ++++----- trace/trace.go | 21 ++++++--------------- trace/trace_test.go | 30 ------------------------------ 3 files changed, 10 insertions(+), 50 deletions(-) diff --git a/trace/export.go b/trace/export.go index 6fc1f4c18..e0d9a4b99 100644 --- a/trace/export.go +++ b/trace/export.go @@ -73,11 +73,10 @@ func UnregisterExporter(e Exporter) { // SpanData contains all the information collected by a Span. type SpanData struct { SpanContext - ParentSpanID SpanID - LocalRootSpanID SpanID - SpanKind int - Name string - StartTime time.Time + ParentSpanID SpanID + SpanKind int + Name string + StartTime time.Time // The wall clock time of EndTime will be adjusted to always be offset // from StartTime by the duration of the span. EndTime time.Time diff --git a/trace/trace.go b/trace/trace.go index 549e6fb11..38ead7bf0 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -39,10 +39,9 @@ type Span struct { // It will be non-nil if we are exporting the span or recording events for it. // Otherwise, data is nil, and the Span is simply a carrier for the // SpanContext, so that the trace ID is propagated. - data *SpanData - mu sync.Mutex // protects the contents of *data (but not the pointer value.) - spanContext SpanContext - localRootSpanID SpanID + data *SpanData + mu sync.Mutex // protects the contents of *data (but not the pointer value.) + spanContext SpanContext // lruAttributes are capped at configured limit. When the capacity is reached an oldest entry // is removed to create room for a new entry. @@ -170,16 +169,14 @@ func WithSampler(sampler Sampler) StartOption { func StartSpan(ctx context.Context, name string, o ...StartOption) (context.Context, *Span) { var opts StartOptions var parent SpanContext - var localRoot SpanID if p := FromContext(ctx); p != nil { p.addChild() parent = p.spanContext - localRoot = p.localRootSpanID } for _, op := range o { op(&opts) } - span := startSpanInternal(name, parent != SpanContext{}, parent, localRoot, false, opts) + span := startSpanInternal(name, parent != SpanContext{}, parent, false, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end @@ -198,16 +195,15 @@ func StartSpanWithRemoteParent(ctx context.Context, name string, parent SpanCont for _, op := range o { op(&opts) } - span := startSpanInternal(name, parent != SpanContext{}, parent, SpanID{}, true, opts) + span := startSpanInternal(name, parent != SpanContext{}, parent, true, opts) ctx, end := startExecutionTracerTask(ctx, name) span.executionTracerTaskEnd = end return NewContext(ctx, span), span } -func startSpanInternal(name string, hasParent bool, parent SpanContext, localRoot SpanID, remoteParent bool, o StartOptions) *Span { +func startSpanInternal(name string, hasParent bool, parent SpanContext, remoteParent bool, o StartOptions) *Span { span := &Span{} span.spanContext = parent - span.localRootSpanID = localRoot cfg := config.Load().(*Config) @@ -217,10 +213,6 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, localRoo span.spanContext.SpanID = cfg.IDGenerator.NewSpanID() sampler := cfg.DefaultSampler - if localRoot == (SpanID{}) { - span.localRootSpanID = span.spanContext.SpanID - } - if !hasParent || remoteParent || o.Sampler != nil { // If this span is the child of a local span and no Sampler is set in the // options, keep the parent's TraceOptions. @@ -244,7 +236,6 @@ func startSpanInternal(name string, hasParent bool, parent SpanContext, localRoo span.data = &SpanData{ SpanContext: span.spanContext, - LocalRootSpanID: span.localRootSpanID, StartTime: time.Now(), SpanKind: o.SpanKind, Name: name, diff --git a/trace/trace_test.go b/trace/trace_test.go index c8f624d3b..c2151367f 100644 --- a/trace/trace_test.go +++ b/trace/trace_test.go @@ -230,32 +230,6 @@ func TestStartSpanWithRemoteParent(t *testing.T) { } } -func TestLocalRootSpanID(t *testing.T) { - ctx, span1 := StartSpan(context.Background(), "span1") - if span1.localRootSpanID == (SpanID{}) { - t.Errorf("exporting root span: expected nonzero localRootSpanID") - } - - _, span2 := StartSpan(ctx, "span2") - if err := checkChild(span1.spanContext, span2); err != nil { - t.Error(err) - } - if got, want := span2.localRootSpanID, span1.localRootSpanID; got != want { - t.Errorf("span2.localRootSpanID=%q; want %q (span1.localRootSpanID)", got, want) - } - - _, span3 := StartSpanWithRemoteParent(context.Background(), "span3", span2.SpanContext()) - if err := checkChild(span3.spanContext, span2); err != nil { - t.Error(err) - } - if span3.localRootSpanID == (SpanID{}) { - t.Errorf("exporting span with remote parent: expected nonzero localRootSpanID") - } - if got, want := span3.localRootSpanID, span2.localRootSpanID; got == want { - t.Errorf("span3.localRootSpanID=%q; expected different value to span2.localRootSpanID, got same", got) - } -} - // startSpan returns a context with a new Span that is recording events and will be exported. func startSpan(o StartOptions) *Span { _, span := StartSpanWithRemoteParent(context.Background(), "span0", @@ -300,11 +274,7 @@ func endSpan(span *Span) (*SpanData, error) { if got.SpanContext.SpanID == (SpanID{}) { return nil, fmt.Errorf("exporting span: expected nonzero SpanID") } - if got.LocalRootSpanID == (SpanID{}) { - return nil, fmt.Errorf("exporting span: expected nonzero LocalRootSpanID") - } got.SpanContext.SpanID = SpanID{} - got.LocalRootSpanID = SpanID{} if !checkTime(&got.StartTime) { return nil, fmt.Errorf("exporting span: expected nonzero StartTime") } From 6499bfc9eda4dee6663352fe6494c751b2f9b053 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Thu, 21 Feb 2019 11:15:21 -0800 Subject: [PATCH 118/212] Add code owners to restrict access. (#1030) --- .github/CODEOWNERS | 6 ++++++ 1 file changed, 6 insertions(+) create mode 100644 .github/CODEOWNERS diff --git a/.github/CODEOWNERS b/.github/CODEOWNERS new file mode 100644 index 000000000..184c72449 --- /dev/null +++ b/.github/CODEOWNERS @@ -0,0 +1,6 @@ +# Code owners file. +# This file controls who is tagged for review for any given pull request. + +# For anything not explicitly taken by someone else: +* @census-instrumentation/global-owners @rakyll @rghetia + From beafb2a85a579a4918ba259877a1625e9213a263 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 22 Feb 2019 15:05:22 -0800 Subject: [PATCH 119/212] Add support for Float64 Attribute. (#1033) * Add support for Float64 Attribute. * Add support for Float64 in Jaeger opencensus exporter * Add support for Float64 in Zipkin opencensus exporter * fixed a typo. --- exporter/jaeger/jaeger.go | 7 +++++++ exporter/jaeger/jaeger_test.go | 7 +++++-- exporter/zipkin/zipkin.go | 2 ++ exporter/zipkin/zipkin_test.go | 3 +++ trace/basetypes.go | 5 +++++ trace/benchmark_test.go | 7 +++++-- 6 files changed, 27 insertions(+), 4 deletions(-) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index 9a433d9bc..e63a09900 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -283,6 +283,13 @@ func attributeToTag(key string, a interface{}) *gen.Tag { VLong: &v, VType: gen.TagType_LONG, } + case float64: + v := float64(value) + tag = &gen.Tag{ + Key: key, + VDouble: &v, + VType: gen.TagType_DOUBLE, + } } return tag } diff --git a/exporter/jaeger/jaeger_test.go b/exporter/jaeger/jaeger_test.go index 5d5431da0..de8ca87f4 100644 --- a/exporter/jaeger/jaeger_test.go +++ b/exporter/jaeger/jaeger_test.go @@ -49,7 +49,7 @@ func Test_bytesToInt64(t *testing.T) { for _, tt := range tests { t.Run(fmt.Sprintf("%d", tt.want), func(t *testing.T) { if got := bytesToInt64(tt.buf); got != tt.want { - t.Errorf("bytesToInt64() = %v, want %v", got, tt.want) + t.Errorf("bytesToInt64() = \n%v, \n want \n%v", got, tt.want) } }) } @@ -62,6 +62,7 @@ func Test_spanDataToThrift(t *testing.T) { keyValue := "value" resultValue := true statusCodeValue := int64(2) + doubleValue := float64(123.456) statusMessage := "error" tests := []struct { @@ -80,7 +81,8 @@ func Test_spanDataToThrift(t *testing.T) { StartTime: now, EndTime: now, Attributes: map[string]interface{}{ - "key": keyValue, + "double": doubleValue, + "key": keyValue, }, Annotations: []trace.Annotation{ { @@ -108,6 +110,7 @@ func Test_spanDataToThrift(t *testing.T) { StartTime: now.UnixNano() / 1000, Duration: 0, Tags: []*gen.Tag{ + {Key: "double", VType: gen.TagType_DOUBLE, VDouble: &doubleValue}, {Key: "key", VType: gen.TagType_STRING, VStr: &keyValue}, {Key: "status.code", VType: gen.TagType_LONG, VLong: &statusCodeValue}, {Key: "status.message", VType: gen.TagType_STRING, VStr: &statusMessage}, diff --git a/exporter/zipkin/zipkin.go b/exporter/zipkin/zipkin.go index 30d2fa438..69de70571 100644 --- a/exporter/zipkin/zipkin.go +++ b/exporter/zipkin/zipkin.go @@ -149,6 +149,8 @@ func zipkinSpan(s *trace.SpanData, localEndpoint *model.Endpoint) model.SpanMode } case int64: m[key] = strconv.FormatInt(v, 10) + case float64: + m[key] = strconv.FormatFloat(v, 'f', -1, 64) } } z.Tags = m diff --git a/exporter/zipkin/zipkin_test.go b/exporter/zipkin/zipkin_test.go index 2d5f81cc1..c2b5420de 100644 --- a/exporter/zipkin/zipkin_test.go +++ b/exporter/zipkin/zipkin_test.go @@ -58,6 +58,7 @@ func TestExport(t *testing.T) { "intkey": int64(42), "boolkey1": true, "boolkey2": false, + "doublekey": float64(123.456), }, MessageEvents: []trace.MessageEvent{ { @@ -77,6 +78,7 @@ func TestExport(t *testing.T) { "intkey": int64(42), "boolkey1": true, "boolkey2": false, + "doublekey": float64(123.456), }, }, }, @@ -114,6 +116,7 @@ func TestExport(t *testing.T) { "intkey": "42", "boolkey1": "true", "boolkey2": "false", + "doublekey": "123.456", "error": "INVALID_ARGUMENT", "opencensus.status_description": "error", }, diff --git a/trace/basetypes.go b/trace/basetypes.go index 01f0f9083..ed59bfbde 100644 --- a/trace/basetypes.go +++ b/trace/basetypes.go @@ -59,6 +59,11 @@ func Int64Attribute(key string, value int64) Attribute { return Attribute{key: key, value: value} } +// Float64Attribute returns a float64-valued attribute. +func Float64Attribute(key string, value float64) Attribute { + return Attribute{key: key, value: value} +} + // StringAttribute returns a string-valued attribute. func StringAttribute(key string, value string) Attribute { return Attribute{key: key, value: value} diff --git a/trace/benchmark_test.go b/trace/benchmark_test.go index 7e86d64d9..aa6c9e387 100644 --- a/trace/benchmark_test.go +++ b/trace/benchmark_test.go @@ -30,7 +30,7 @@ func BenchmarkStartEndSpan(b *testing.B) { }) } -func BenchmarkSpanWithAnnotations_3(b *testing.B) { +func BenchmarkSpanWithAnnotations_4(b *testing.B) { traceBenchmark(b, func(b *testing.B) { ctx := context.Background() b.ResetTimer() @@ -41,13 +41,14 @@ func BenchmarkSpanWithAnnotations_3(b *testing.B) { BoolAttribute("key1", false), StringAttribute("key2", "hello"), Int64Attribute("key3", 123), + Float64Attribute("key4", 123.456), ) span.End() } }) } -func BenchmarkSpanWithAnnotations_6(b *testing.B) { +func BenchmarkSpanWithAnnotations_8(b *testing.B) { traceBenchmark(b, func(b *testing.B) { ctx := context.Background() b.ResetTimer() @@ -61,6 +62,8 @@ func BenchmarkSpanWithAnnotations_6(b *testing.B) { StringAttribute("key4", "hello"), Int64Attribute("key5", 123), Int64Attribute("key6", 456), + Float64Attribute("key7", 123.456), + Float64Attribute("key8", 456.789), ) span.End() } From 62b306abc9d664e1185f921a82872cb9e5bf586b Mon Sep 17 00:00:00 2001 From: Stephen G Date: Tue, 26 Feb 2019 22:53:34 -0600 Subject: [PATCH 120/212] Increase time waited in TestHandlerImplementsHTTPCloseNotify (#1027) Currently this test sometimes fails with the last log message not making it. In my testing, if I decreased this value the test does this every time so I believe that increasing it will prevent it from happening. Fixes #926 --- plugin/ochttp/server_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 29468cbe4..614530fb5 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -543,7 +543,7 @@ func TestHandlerImplementsHTTPCloseNotify(t *testing.T) { } // Wait for a couple of milliseconds for the GoAway frames to be properly propagated - <-time.After(150 * time.Millisecond) + <-time.After(200 * time.Millisecond) wantHTTP1Log := strings.Repeat("ended\n", len(transports)) wantHTTP2Log := strings.Repeat("ended\n", len(transports)) From 144687d8128a6c935b2106cebef35215b541cd16 Mon Sep 17 00:00:00 2001 From: Philipp Hug Date: Wed, 27 Feb 2019 15:48:27 +0100 Subject: [PATCH 121/212] Use Host from Request instead of Request.URL (#1036) Request.URL contains the value sent in the HTTP request and not in the Host header. --- plugin/ochttp/client_stats.go | 4 ++-- plugin/ochttp/server.go | 2 +- plugin/ochttp/trace.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/ochttp/client_stats.go b/plugin/ochttp/client_stats.go index b432316e0..e258bcc2a 100644 --- a/plugin/ochttp/client_stats.go +++ b/plugin/ochttp/client_stats.go @@ -34,8 +34,8 @@ type statsTransport struct { // RoundTrip implements http.RoundTripper, delegating to Base and recording stats for the request. func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { ctx, _ := tag.New(req.Context(), - tag.Upsert(KeyClientHost, req.URL.Host), - tag.Upsert(Host, req.URL.Host), + tag.Upsert(KeyClientHost, req.Host), + tag.Upsert(Host, req.Host), tag.Upsert(KeyClientPath, req.URL.Path), tag.Upsert(Path, req.URL.Path), tag.Upsert(KeyClientMethod, req.Method), diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index ff72de97a..c65b51bbb 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -136,7 +136,7 @@ func (h *Handler) extractSpanContext(r *http.Request) (trace.SpanContext, bool) func (h *Handler) startStats(w http.ResponseWriter, r *http.Request) (http.ResponseWriter, func(tags *addedTags)) { ctx, _ := tag.New(r.Context(), - tag.Upsert(Host, r.URL.Host), + tag.Upsert(Host, r.Host), tag.Upsert(Path, r.URL.Path), tag.Upsert(Method, r.Method)) track := &trackingResponseWriter{ diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 819a2d5ff..ca312fcf4 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -151,7 +151,7 @@ func spanNameFromURL(req *http.Request) string { func requestAttrs(r *http.Request) []trace.Attribute { return []trace.Attribute{ trace.StringAttribute(PathAttribute, r.URL.Path), - trace.StringAttribute(HostAttribute, r.URL.Host), + trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), trace.StringAttribute(UserAgentAttribute, r.UserAgent()), } From 0f56738270a355db0b8f587c42f6c4e2635bca08 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 27 Feb 2019 13:23:58 -0800 Subject: [PATCH 122/212] fix inconsistent jaeger test. (#1038) * fix inconsistent jaeger test. * use the sorted 'got'. --- exporter/jaeger/jaeger_test.go | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/exporter/jaeger/jaeger_test.go b/exporter/jaeger/jaeger_test.go index de8ca87f4..ab3ce5adb 100644 --- a/exporter/jaeger/jaeger_test.go +++ b/exporter/jaeger/jaeger_test.go @@ -22,6 +22,7 @@ import ( gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" "go.opencensus.io/trace" + "sort" ) // TODO(jbd): Test export. @@ -130,7 +131,14 @@ func Test_spanDataToThrift(t *testing.T) { } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := spanDataToThrift(tt.data); !reflect.DeepEqual(got, tt.want) { + got := spanDataToThrift(tt.data) + sort.Slice(got.Tags, func(i, j int) bool { + return got.Tags[i].Key < got.Tags[j].Key + }) + sort.Slice(tt.want.Tags, func(i, j int) bool { + return tt.want.Tags[i].Key < tt.want.Tags[j].Key + }) + if !reflect.DeepEqual(got, tt.want) { t.Errorf("spanDataToThrift() = %v, want %v", got, tt.want) } }) From 22a302cdd30bb5e3c0c894c70decd0b53e96eda0 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 27 Feb 2019 13:52:07 -0800 Subject: [PATCH 123/212] treat 0 bucket bound as an error. (#1037) * treat 0 bucket bound as an error. * fix build error and review comment. removed more zero buckets from test and example. * drop 0 bucket silently. * add test for zero bucket drop. --- README.md | 4 ++-- examples/helloworld/main.go | 2 +- examples/quickstart/stats.go | 4 ++-- internal/readme/stats.go | 4 ++-- plugin/ocgrpc/client_stats_handler_test.go | 20 ++++++++++---------- plugin/ocgrpc/server_stats_handler_test.go | 20 ++++++++++---------- plugin/ocgrpc/stats_common.go | 6 +++--- plugin/ochttp/stats.go | 4 ++-- stats/view/aggregation_data_test.go | 14 +++++++------- stats/view/view.go | 12 ++++++++++++ stats/view/view_test.go | 18 +++++++++++------- trace/exemplar_test.go | 8 ++++---- 12 files changed, 66 insertions(+), 50 deletions(-) diff --git a/README.md b/README.md index b8a5107bf..3f40ed5cb 100644 --- a/README.md +++ b/README.md @@ -123,7 +123,7 @@ Currently three types of aggregations are supported: [embedmd]:# (internal/readme/stats.go aggs) ```go -distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) +distAgg := view.Distribution(1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() ``` @@ -136,7 +136,7 @@ if err := view.Register(&view.View{ Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to register view: %v", err) } diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index c93edcf29..0f26636dc 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -64,7 +64,7 @@ func main() { Description: "processed video size over time", TagKeys: []tag.Key{frontendKey}, Measure: videoSize, - Aggregation: view.Distribution(0, 1<<16, 1<<32), + Aggregation: view.Distribution(1<<16, 1<<32), }); err != nil { log.Fatalf("Cannot register view: %v", err) } diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 19ddd4869..8b4dc7af6 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -61,7 +61,7 @@ var ( // Latency in buckets: // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] - Aggregation: view.Distribution(0, 25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), + Aggregation: view.Distribution(25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), TagKeys: []tag.Key{KeyMethod}} LineCountView = &view.View{ @@ -83,7 +83,7 @@ var ( Description: "Groups the lengths of keys in buckets", Measure: MLineLengths, // Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] - Aggregation: view.Distribution(0, 5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), + Aggregation: view.Distribution(5, 2000, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), } ) diff --git a/internal/readme/stats.go b/internal/readme/stats.go index e8a27ff98..8eecf1484 100644 --- a/internal/readme/stats.go +++ b/internal/readme/stats.go @@ -32,7 +32,7 @@ func statsExamples() { videoSize := stats.Int64("example.com/video_size", "processed video size", "MB") // START aggs - distAgg := view.Distribution(0, 1<<32, 2<<32, 3<<32) + distAgg := view.Distribution(1<<32, 2<<32, 3<<32) countAgg := view.Count() sumAgg := view.Sum() // END aggs @@ -44,7 +44,7 @@ func statsExamples() { Name: "example.com/video_size_distribution", Description: "distribution of processed video size over time", Measure: videoSize, - Aggregation: view.Distribution(0, 1<<32, 2<<32, 3<<32), + Aggregation: view.Distribution(1<<32, 2<<32, 3<<32), }); err != nil { log.Fatalf("Failed to register view: %v", err) } diff --git a/plugin/ocgrpc/client_stats_handler_test.go b/plugin/ocgrpc/client_stats_handler_test.go index 990075cdb..53f924823 100644 --- a/plugin/ocgrpc/client_stats_handler_test.go +++ b/plugin/ocgrpc/client_stats_handler_test.go @@ -80,7 +80,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), + Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, @@ -91,7 +91,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), + Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, @@ -102,7 +102,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), + Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, @@ -113,7 +113,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), + Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, @@ -157,7 +157,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), + Data: newDistributionData([]int64{0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), }, }, }, @@ -168,7 +168,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), + Data: newDistributionData([]int64{0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), }, }, }, @@ -225,7 +225,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), + Data: newDistributionData([]int64{0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), }, }, }, @@ -236,7 +236,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), + Data: newDistributionData([]int64{0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), }, }, }, @@ -247,7 +247,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2 /*16384*/, 1 /*65536*/, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), + Data: newDistributionData([]int64{0, 0, 0, 0, 2 /*16384*/, 1 /*65536*/, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), }, }, }, @@ -258,7 +258,7 @@ func TestClientDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyClientMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.666667, 2.1459558466666666e+08), + Data: newDistributionData([]int64{1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.666667, 2.1459558466666666e+08), }, }, }, diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index 1b96edf88..cab232a68 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -80,7 +80,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), + Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, @@ -91,7 +91,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), + Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 1, 1, 1, 0), }, }, }, @@ -102,7 +102,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), + Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, @@ -113,7 +113,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), + Data: newDistributionData([]int64{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 1, 10, 10, 10, 0), }, }, }, @@ -157,7 +157,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), + Data: newDistributionData([]int64{0, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 1, 2, 1.5, 0.5), }, }, }, @@ -168,7 +168,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), + Data: newDistributionData([]int64{0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 2, 2, 3, 2.5, 0.5), }, }, }, @@ -225,7 +225,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), + Data: newDistributionData([]int64{0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 2, 1.333333333, 0.333333333*2), }, }, }, @@ -236,7 +236,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), + Data: newDistributionData([]int64{0, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 2, 3, 2.666666666, 0.333333333*2), }, }, }, @@ -247,7 +247,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.6666667, 2.1459558466666667e+08), + Data: newDistributionData([]int64{1, 1, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 1, 18432, 6485.6666667, 2.1459558466666667e+08), }, }, }, @@ -258,7 +258,7 @@ func TestServerDefaultCollections(t *testing.T) { Tags: []tag.Tag{ {Key: KeyServerMethod, Value: "package.service/method"}, }, - Data: newDistributionData([]int64{0, 0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), + Data: newDistributionData([]int64{0, 0, 0, 0, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0}, 3, 20480, 66561, 36523, 1.355519318e+09), }, }, }, diff --git a/plugin/ocgrpc/stats_common.go b/plugin/ocgrpc/stats_common.go index 1737809e7..e9991fe0f 100644 --- a/plugin/ocgrpc/stats_common.go +++ b/plugin/ocgrpc/stats_common.go @@ -51,9 +51,9 @@ type rpcData struct { // The following variables define the default hard-coded auxiliary data used by // both the default GRPC client and GRPC server metrics. var ( - DefaultBytesDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultMillisecondsDistribution = view.Distribution(0, 0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) - DefaultMessageCountDistribution = view.Distribution(0, 1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) + DefaultBytesDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultMillisecondsDistribution = view.Distribution(0.01, 0.05, 0.1, 0.3, 0.6, 0.8, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultMessageCountDistribution = view.Distribution(1, 2, 4, 8, 16, 32, 64, 128, 256, 512, 1024, 2048, 4096, 8192, 16384, 32768, 65536) ) // Server tags are applied to the context used to process each RPC, as well as diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index 46dcc8e57..cdf8ec21d 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -104,8 +104,8 @@ var ( // Default distributions used by views in this package. var ( - DefaultSizeDistribution = view.Distribution(0, 1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) - DefaultLatencyDistribution = view.Distribution(0, 1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) + DefaultSizeDistribution = view.Distribution(1024, 2048, 4096, 16384, 65536, 262144, 1048576, 4194304, 16777216, 67108864, 268435456, 1073741824, 4294967296) + DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) // Package ochttp provides some convenience views. diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index 9b12b8537..dc209cbc5 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -66,7 +66,7 @@ func TestDataClone(t *testing.T) { } func TestDistributionData_addSample(t *testing.T) { - dd := newDistributionData([]float64{0, 1, 2}) + dd := newDistributionData([]float64{1, 2}) t1, _ := time.Parse("Mon Jan 2 15:04:05 -0700 MST 2006", "Mon Jan 2 15:04:05 -0700 MST 2006") e1 := &exemplar.Exemplar{ Attachments: exemplar.Attachments{ @@ -80,8 +80,8 @@ func TestDistributionData_addSample(t *testing.T) { want := &DistributionData{ Count: 1, - CountPerBucket: []int64{0, 1, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{nil, e1, nil, nil}, + CountPerBucket: []int64{1, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{e1, nil, nil}, Max: 0.5, Min: 0.5, Mean: 0.5, @@ -104,8 +104,8 @@ func TestDistributionData_addSample(t *testing.T) { // Previous exemplar should be preserved, since it has more annotations. want = &DistributionData{ Count: 2, - CountPerBucket: []int64{0, 2, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{nil, e1, nil, nil}, + CountPerBucket: []int64{2, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{e1, nil, nil}, Max: 0.7, Min: 0.5, Mean: 0.6, @@ -128,8 +128,8 @@ func TestDistributionData_addSample(t *testing.T) { // Exemplar should be replaced since it has a trace_id. want = &DistributionData{ Count: 3, - CountPerBucket: []int64{0, 3, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{nil, e3, nil, nil}, + CountPerBucket: []int64{3, 0, 0}, + ExemplarsPerBucket: []*exemplar.Exemplar{e3, nil, nil}, Max: 0.7, Min: 0.2, Mean: 0.4666666666666667, diff --git a/stats/view/view.go b/stats/view/view.go index 02644e1f4..f1c253e30 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -99,9 +99,21 @@ func (v *View) canonicalize() error { return ErrNegativeBucketBounds } } + // drop 0 bucket silently. + v.Aggregation.Buckets = dropZeroBounds(v.Aggregation.Buckets...) + return nil } +func dropZeroBounds(bounds ...float64) []float64 { + for i, bound := range bounds { + if bound > 0 { + return bounds[i:] + } + } + return []float64{} +} + // viewInternal is the internal representation of a View. type viewInternal struct { view *View // view is the canonicalized View definition associated with this view. diff --git a/stats/view/view_test.go b/stats/view/view_test.go index d50379fef..550110862 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -459,23 +459,27 @@ func TestViewRegister_negativeBucketBounds(t *testing.T) { } } -func TestViewRegister_zeroBucketBounds(t *testing.T) { - m := stats.Int64("TestViewRegister_negativeBucketBounds", "", "") +func TestViewRegister_sortBuckets(t *testing.T) { + m := stats.Int64("TestViewRegister_sortBuckets", "", "") v := &View{ Measure: m, - Aggregation: Distribution(0, 2), + Aggregation: Distribution(2, 1), } err := Register(v) if err != nil { - t.Errorf("Expected no error, got %v", err) + t.Fatalf("Unexpected err %s", err) + } + want := []float64{1, 2} + if diff := cmp.Diff(v.Aggregation.Buckets, want); diff != "" { + t.Errorf("buckets differ -got +want: %s", diff) } } -func TestViewRegister_sortBuckets(t *testing.T) { - m := stats.Int64("TestViewRegister_sortBuckets", "", "") +func TestViewRegister_dropZeroBuckets(t *testing.T) { + m := stats.Int64("TestViewRegister_dropZeroBuckets", "", "") v := &View{ Measure: m, - Aggregation: Distribution(2, 1), + Aggregation: Distribution(2, 0, 1), } err := Register(v) if err != nil { diff --git a/trace/exemplar_test.go b/trace/exemplar_test.go index de27631f9..5d6f62d5c 100644 --- a/trace/exemplar_test.go +++ b/trace/exemplar_test.go @@ -27,7 +27,7 @@ func TestTraceExemplar(t *testing.T) { m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) v := &view.View{ Measure: m, - Aggregation: view.Distribution(0, 1, 2, 3), + Aggregation: view.Distribution(1, 2, 3), } view.Register(v) ctx := context.Background() @@ -46,7 +46,7 @@ func TestTraceExemplar(t *testing.T) { if got := len(dd.ExemplarsPerBucket); got < 3 { t.Fatalf("len(dd.ExemplarsPerBucket) = %d; want >= 2", got) } - exemplar := dd.ExemplarsPerBucket[2] + exemplar := dd.ExemplarsPerBucket[1] if exemplar == nil { t.Fatal("Expected exemplar") } @@ -65,7 +65,7 @@ func TestTraceExemplar_notSampled(t *testing.T) { m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) v := &view.View{ Measure: m, - Aggregation: view.Distribution(0, 1, 2, 3), + Aggregation: view.Distribution(1, 2, 3), } view.Register(v) ctx := context.Background() @@ -84,7 +84,7 @@ func TestTraceExemplar_notSampled(t *testing.T) { if got := len(dd.ExemplarsPerBucket); got < 3 { t.Fatalf("len(buckets) = %d; want >= 2", got) } - exemplar := dd.ExemplarsPerBucket[2] + exemplar := dd.ExemplarsPerBucket[1] if exemplar == nil { t.Fatal("Expected exemplar") } From dc0ac5c2c858512c103d555317a3039ede8637fa Mon Sep 17 00:00:00 2001 From: Emmanuel T Odeke Date: Sat, 2 Mar 2019 01:27:55 -0800 Subject: [PATCH 124/212] exporter/jaeger: ensure non-zero status code sets error tag During conversion from OpenCensus-Go SpanData to Jaeger spans, ensure that if a Status' code is non-zero (0 being "OK" according to the OpenCensus data model), that we set the "error" tag on the corresponding Jaeger span. Fixes #1041 --- exporter/jaeger/jaeger.go | 11 +++++++++++ exporter/jaeger/jaeger_test.go | 4 +++- 2 files changed, 14 insertions(+), 1 deletion(-) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index e63a09900..322735a10 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -192,6 +192,11 @@ func (e *Exporter) ExportSpan(data *trace.SpanData) { // TODO(jbd): Handle oversized bundlers. } +// As per the OpenCensus Status code mapping in +// https://opencensus.io/tracing/span/status/ +// the status is OK if the code is 0. +const opencensusStatusCodeOK = 0 + func spanDataToThrift(data *trace.SpanData) *gen.Span { tags := make([]*gen.Tag, 0, len(data.Attributes)) for k, v := range data.Attributes { @@ -206,6 +211,12 @@ func spanDataToThrift(data *trace.SpanData) *gen.Span { attributeToTag("status.message", data.Status.Message), ) + // Ensure that if Status.Code is not OK, that we set the "error" tag on the Jaeger span. + // See Issue https://github.com/census-instrumentation/opencensus-go/issues/1041 + if data.Status.Code != opencensusStatusCodeOK { + tags = append(tags, attributeToTag("error", true)) + } + var logs []*gen.Log for _, a := range data.Annotations { fields := make([]*gen.Tag, 0, len(a.Attributes)) diff --git a/exporter/jaeger/jaeger_test.go b/exporter/jaeger/jaeger_test.go index ab3ce5adb..f93b5dd0e 100644 --- a/exporter/jaeger/jaeger_test.go +++ b/exporter/jaeger/jaeger_test.go @@ -64,6 +64,7 @@ func Test_spanDataToThrift(t *testing.T) { resultValue := true statusCodeValue := int64(2) doubleValue := float64(123.456) + boolTrue := true statusMessage := "error" tests := []struct { @@ -113,6 +114,7 @@ func Test_spanDataToThrift(t *testing.T) { Tags: []*gen.Tag{ {Key: "double", VType: gen.TagType_DOUBLE, VDouble: &doubleValue}, {Key: "key", VType: gen.TagType_STRING, VStr: &keyValue}, + {Key: "error", VType: gen.TagType_BOOL, VBool: &boolTrue}, {Key: "status.code", VType: gen.TagType_LONG, VLong: &statusCodeValue}, {Key: "status.message", VType: gen.TagType_STRING, VStr: &statusMessage}, }, @@ -139,7 +141,7 @@ func Test_spanDataToThrift(t *testing.T) { return tt.want.Tags[i].Key < tt.want.Tags[j].Key }) if !reflect.DeepEqual(got, tt.want) { - t.Errorf("spanDataToThrift() = %v, want %v", got, tt.want) + t.Errorf("spanDataToThrift()\nGot:\n%v\nWant;\n%v", got, tt.want) } }) } From f305e5c4e2cf345eba88de13d10de1126fa45a61 Mon Sep 17 00:00:00 2001 From: rghetia Date: Sun, 3 Mar 2019 22:22:19 -0800 Subject: [PATCH 125/212] add metric producer manager. (#1039) * add metric producer manager. * added immutable test and removed contains method. * fix fmt and lint. * fix immutability. * add nil check and fix test. * fix review comment. --- metric/producer/manager.go | 89 +++++++++++ metric/producer/manager_test.go | 147 ++++++++++++++++++ metric/{metricexport => producer}/producer.go | 4 +- 3 files changed, 238 insertions(+), 2 deletions(-) create mode 100644 metric/producer/manager.go create mode 100644 metric/producer/manager_test.go rename metric/{metricexport => producer}/producer.go (93%) diff --git a/metric/producer/manager.go b/metric/producer/manager.go new file mode 100644 index 000000000..35d888154 --- /dev/null +++ b/metric/producer/manager.go @@ -0,0 +1,89 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package producer + +import ( + "sync" +) + +type manager struct { + mu sync.RWMutex + producers map[Producer]struct{} +} + +var prodMgr *manager +var once sync.Once + +func getManager() *manager { + once.Do(func() { + prodMgr = &manager{} + prodMgr.producers = make(map[Producer]struct{}) + }) + return prodMgr +} + +// Add adds the producer to the manager if it is not already present. +// The manager maintains the list of active producers. It provides +// this list to a reader to read metrics from each producer and then export. +func Add(producer Producer) { + if producer == nil { + return + } + pm := getManager() + pm.add(producer) +} + +// Delete deletes the producer from the manager if it is present. +func Delete(producer Producer) { + if producer == nil { + return + } + pm := getManager() + pm.delete(producer) +} + +// GetAll returns a slice of all producer currently registered with +// the manager. For each call it generates a new slice. The slice +// should not be cached as registration may change at any time. It is +// typically called periodically by exporter to read metrics from +// the producers. +func GetAll() []Producer { + pm := getManager() + return pm.getAll() +} + +func (pm *manager) getAll() []Producer { + pm.mu.Lock() + defer pm.mu.Unlock() + producers := make([]Producer, len(pm.producers)) + i := 0 + for producer := range pm.producers { + producers[i] = producer + i++ + } + return producers +} + +func (pm *manager) add(producer Producer) { + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} +} + +func (pm *manager) delete(producer Producer) { + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) +} diff --git a/metric/producer/manager_test.go b/metric/producer/manager_test.go new file mode 100644 index 000000000..baf038abb --- /dev/null +++ b/metric/producer/manager_test.go @@ -0,0 +1,147 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package producer + +import ( + "testing" + + "go.opencensus.io/metric/metricdata" +) + +type testProducer struct { + name string +} + +var ( + myProd1 = newTestProducer("foo") + myProd2 = newTestProducer("bar") + myProd3 = newTestProducer("foobar") +) + +func newTestProducer(name string) *testProducer { + return &testProducer{name} +} + +func (mp *testProducer) Read() []*metricdata.Metric { + return nil +} + +func TestAdd(t *testing.T) { + Add(myProd1) + Add(myProd2) + + got := GetAll() + want := []*testProducer{myProd1, myProd2} + checkSlice(got, want, t) + deleteAll() +} + +func TestAddExisting(t *testing.T) { + Add(myProd1) + Add(myProd2) + Add(myProd1) + + got := GetAll() + want := []*testProducer{myProd2, myProd1} + checkSlice(got, want, t) + deleteAll() +} + +func TestAddNil(t *testing.T) { + Add(nil) + + got := GetAll() + want := []*testProducer{} + checkSlice(got, want, t) + deleteAll() +} + +func TestDelete(t *testing.T) { + Add(myProd1) + Add(myProd2) + Add(myProd3) + Delete(myProd2) + + got := GetAll() + want := []*testProducer{myProd1, myProd3} + checkSlice(got, want, t) + deleteAll() +} + +func TestDeleteNonExisting(t *testing.T) { + Add(myProd1) + Add(myProd3) + Delete(myProd2) + + got := GetAll() + want := []*testProducer{myProd1, myProd3} + checkSlice(got, want, t) + deleteAll() +} + +func TestDeleteNil(t *testing.T) { + Add(myProd1) + Add(myProd3) + Delete(nil) + + got := GetAll() + want := []*testProducer{myProd1, myProd3} + checkSlice(got, want, t) + deleteAll() +} + +func TestGetAllNil(t *testing.T) { + got := GetAll() + want := []*testProducer{} + checkSlice(got, want, t) + deleteAll() +} + +func TestImmutableProducerList(t *testing.T) { + Add(myProd1) + Add(myProd2) + + producersToMutate := GetAll() + producersToMutate[0] = myProd3 + got := GetAll() + want := []*testProducer{myProd1, myProd2} + checkSlice(got, want, t) + deleteAll() +} + +func checkSlice(got []Producer, want []*testProducer, t *testing.T) { + gotLen := len(got) + wantLen := len(want) + if gotLen != wantLen { + t.Errorf("got len: %d want: %d\n", gotLen, wantLen) + } else { + gotMap := map[Producer]struct{}{} + for i := 0; i < gotLen; i++ { + gotMap[got[i]] = struct{}{} + } + for i := 0; i < wantLen; i++ { + delete(gotMap, want[i]) + } + if len(gotMap) > 0 { + t.Errorf("got %v, want %v\n", got, want) + } + } +} + +func deleteAll() { + Delete(myProd1) + Delete(myProd2) + Delete(myProd3) +} diff --git a/metric/metricexport/producer.go b/metric/producer/producer.go similarity index 93% rename from metric/metricexport/producer.go rename to metric/producer/producer.go index 077b9fcf6..637ab972d 100644 --- a/metric/metricexport/producer.go +++ b/metric/producer/producer.go @@ -1,4 +1,4 @@ -// Copyright 2018, OpenCensus Authors +// Copyright 2019, OpenCensus Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package metricexport +package producer import ( "go.opencensus.io/metric/metricdata" From 8734d3b4deb5b1369308eb92b16ca7c7dd5ff343 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 4 Mar 2019 15:49:55 -0800 Subject: [PATCH 126/212] Make Add, Delete, GetAll associated with producer.Manager (#1047) fixes #1045 --- metric/producer/manager.go | 55 ++++++++++++----------------- metric/producer/manager_test.go | 61 +++++++++++++++++---------------- 2 files changed, 53 insertions(+), 63 deletions(-) diff --git a/metric/producer/manager.go b/metric/producer/manager.go index 35d888154..8d335c704 100644 --- a/metric/producer/manager.go +++ b/metric/producer/manager.go @@ -18,53 +18,54 @@ import ( "sync" ) -type manager struct { +// Manager maintains a list of active producers. Producers can register +// with the manager to allow readers to read all metrics provided by them. +// Readers can retrieve all producers registered with the manager, +// read metrics from the producers and export them. +type Manager struct { mu sync.RWMutex producers map[Producer]struct{} } -var prodMgr *manager +var prodMgr *Manager var once sync.Once -func getManager() *manager { +// GlobalManager is a single instance of producer manager +// that is used by all producers and all readers. +func GlobalManager() *Manager { once.Do(func() { - prodMgr = &manager{} + prodMgr = &Manager{} prodMgr.producers = make(map[Producer]struct{}) }) return prodMgr } -// Add adds the producer to the manager if it is not already present. -// The manager maintains the list of active producers. It provides -// this list to a reader to read metrics from each producer and then export. -func Add(producer Producer) { +// AddProducer adds the producer to the Manager if it is not already present. +func (pm *Manager) AddProducer(producer Producer) { if producer == nil { return } - pm := getManager() - pm.add(producer) + pm.mu.Lock() + defer pm.mu.Unlock() + pm.producers[producer] = struct{}{} } -// Delete deletes the producer from the manager if it is present. -func Delete(producer Producer) { +// DeleteProducer deletes the producer from the Manager if it is present. +func (pm *Manager) DeleteProducer(producer Producer) { if producer == nil { return } - pm := getManager() - pm.delete(producer) + pm.mu.Lock() + defer pm.mu.Unlock() + delete(pm.producers, producer) } // GetAll returns a slice of all producer currently registered with -// the manager. For each call it generates a new slice. The slice +// the Manager. For each call it generates a new slice. The slice // should not be cached as registration may change at any time. It is // typically called periodically by exporter to read metrics from // the producers. -func GetAll() []Producer { - pm := getManager() - return pm.getAll() -} - -func (pm *manager) getAll() []Producer { +func (pm *Manager) GetAll() []Producer { pm.mu.Lock() defer pm.mu.Unlock() producers := make([]Producer, len(pm.producers)) @@ -75,15 +76,3 @@ func (pm *manager) getAll() []Producer { } return producers } - -func (pm *manager) add(producer Producer) { - pm.mu.Lock() - defer pm.mu.Unlock() - pm.producers[producer] = struct{}{} -} - -func (pm *manager) delete(producer Producer) { - pm.mu.Lock() - defer pm.mu.Unlock() - delete(pm.producers, producer) -} diff --git a/metric/producer/manager_test.go b/metric/producer/manager_test.go index baf038abb..d4050e2bd 100644 --- a/metric/producer/manager_test.go +++ b/metric/producer/manager_test.go @@ -28,6 +28,7 @@ var ( myProd1 = newTestProducer("foo") myProd2 = newTestProducer("bar") myProd3 = newTestProducer("foobar") + pm = GlobalManager() ) func newTestProducer(name string) *testProducer { @@ -39,83 +40,83 @@ func (mp *testProducer) Read() []*metricdata.Metric { } func TestAdd(t *testing.T) { - Add(myProd1) - Add(myProd2) + pm.AddProducer(myProd1) + pm.AddProducer(myProd2) - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd1, myProd2} checkSlice(got, want, t) deleteAll() } func TestAddExisting(t *testing.T) { - Add(myProd1) - Add(myProd2) - Add(myProd1) + pm.AddProducer(myProd1) + pm.AddProducer(myProd2) + pm.AddProducer(myProd1) - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd2, myProd1} checkSlice(got, want, t) deleteAll() } func TestAddNil(t *testing.T) { - Add(nil) + pm.AddProducer(nil) - got := GetAll() + got := pm.GetAll() want := []*testProducer{} checkSlice(got, want, t) deleteAll() } func TestDelete(t *testing.T) { - Add(myProd1) - Add(myProd2) - Add(myProd3) - Delete(myProd2) + pm.AddProducer(myProd1) + pm.AddProducer(myProd2) + pm.AddProducer(myProd3) + pm.DeleteProducer(myProd2) - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestDeleteNonExisting(t *testing.T) { - Add(myProd1) - Add(myProd3) - Delete(myProd2) + pm.AddProducer(myProd1) + pm.AddProducer(myProd3) + pm.DeleteProducer(myProd2) - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestDeleteNil(t *testing.T) { - Add(myProd1) - Add(myProd3) - Delete(nil) + pm.AddProducer(myProd1) + pm.AddProducer(myProd3) + pm.DeleteProducer(nil) - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd1, myProd3} checkSlice(got, want, t) deleteAll() } func TestGetAllNil(t *testing.T) { - got := GetAll() + got := pm.GetAll() want := []*testProducer{} checkSlice(got, want, t) deleteAll() } func TestImmutableProducerList(t *testing.T) { - Add(myProd1) - Add(myProd2) + pm.AddProducer(myProd1) + pm.AddProducer(myProd2) - producersToMutate := GetAll() + producersToMutate := pm.GetAll() producersToMutate[0] = myProd3 - got := GetAll() + got := pm.GetAll() want := []*testProducer{myProd1, myProd2} checkSlice(got, want, t) deleteAll() @@ -141,7 +142,7 @@ func checkSlice(got []Producer, want []*testProducer, t *testing.T) { } func deleteAll() { - Delete(myProd1) - Delete(myProd2) - Delete(myProd3) + pm.DeleteProducer(myProd1) + pm.DeleteProducer(myProd2) + pm.DeleteProducer(myProd3) } From 127ad9a0bda233d0ee48bfc85b433680af5e3750 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 11 Mar 2019 15:48:28 -0700 Subject: [PATCH 127/212] ochttp plugin: add remote span as parent link instead of child link. (#1055) --- plugin/ochttp/server.go | 2 +- plugin/ochttp/trace_test.go | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index c65b51bbb..5fe15e89f 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -118,7 +118,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ span.AddLink(trace.Link{ TraceID: sc.TraceID, SpanID: sc.SpanID, - Type: trace.LinkTypeChild, + Type: trace.LinkTypeParent, Attributes: nil, }) } diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index ea9b77c05..8cc9cf7b9 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -328,7 +328,7 @@ func TestEndToEnd(t *testing.T) { t.Errorf("len(server.Links) = %d; want %d", got, want) } else { link := server.Links[0] - if got, want := link.Type, trace.LinkTypeChild; got != want { + if got, want := link.Type, trace.LinkTypeParent; got != want { t.Errorf("link.Type = %v; want %v", got, want) } } From 1c9fa349e904ab5e280e67471b53d7c5f696ef0d Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 11 Mar 2019 15:51:12 -0700 Subject: [PATCH 128/212] rename producer to metricproducer package. (#1053) --- metric/{producer => metricproducer}/manager.go | 2 +- metric/{producer => metricproducer}/manager_test.go | 2 +- metric/{producer => metricproducer}/producer.go | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) rename metric/{producer => metricproducer}/manager.go (98%) rename metric/{producer => metricproducer}/manager_test.go (99%) rename metric/{producer => metricproducer}/producer.go (97%) diff --git a/metric/producer/manager.go b/metric/metricproducer/manager.go similarity index 98% rename from metric/producer/manager.go rename to metric/metricproducer/manager.go index 8d335c704..ca1f39049 100644 --- a/metric/producer/manager.go +++ b/metric/metricproducer/manager.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package producer +package metricproducer import ( "sync" diff --git a/metric/producer/manager_test.go b/metric/metricproducer/manager_test.go similarity index 99% rename from metric/producer/manager_test.go rename to metric/metricproducer/manager_test.go index d4050e2bd..9d74d8678 100644 --- a/metric/producer/manager_test.go +++ b/metric/metricproducer/manager_test.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package producer +package metricproducer import ( "testing" diff --git a/metric/producer/producer.go b/metric/metricproducer/producer.go similarity index 97% rename from metric/producer/producer.go rename to metric/metricproducer/producer.go index 637ab972d..6cee9ed17 100644 --- a/metric/producer/producer.go +++ b/metric/metricproducer/producer.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package producer +package metricproducer import ( "go.opencensus.io/metric/metricdata" From 2eaaf3ad5aeaf6d5f513040ace947fad79d9da63 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Tue, 12 Mar 2019 10:09:36 -0700 Subject: [PATCH 129/212] Add makefile, enforce lint, fix lint errors. (#1044) * Add makefile, enforce lint, fix lint errors. * Remove duplicate comments. --- .travis.yml | 27 +++----- CONTRIBUTING.md | 9 ++- Makefile | 95 +++++++++++++++++++++++++++++ examples/quickstart/stats.go | 41 +++++++------ exemplar/exemplar.go | 3 +- internal/tagencoding/tagencoding.go | 5 +- internal/testpb/impl.go | 1 + internal/traceinternals.go | 1 + plugin/ochttp/stats.go | 57 ++++++++++++----- resource/resource.go | 1 + stats/internal/validation.go | 28 --------- stats/view/view.go | 22 +++++-- trace/internal/internal.go | 1 + 13 files changed, 203 insertions(+), 88 deletions(-) create mode 100644 Makefile delete mode 100644 stats/internal/validation.go diff --git a/.travis.yml b/.travis.yml index 39186b745..bd6b66ee8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,28 +1,17 @@ language: go +go_import_path: go.opencensus.io + go: - # 1.8 is tested by AppVeyor - 1.11.x -go_import_path: go.opencensus.io - -# Don't email me the results of the test runs. -notifications: - email: false +env: + global: + GO111MODULE=on before_script: - - GO_FILES=$(find . -iname '*.go' | grep -v /vendor/) # All the .go files, excluding vendor/ if any - - PKGS=$(go list ./... | grep -v /vendor/) # All the import paths, excluding vendor/ if any - - curl https://raw.githubusercontent.com/golang/dep/master/install.sh | sh # Install latest dep release - - go get github.com/rakyll/embedmd + - make install-tools script: - - embedmd -d README.md # Ensure embedded code is up-to-date - - export GO111MODULE=on - - go build ./... # Ensure dependency updates don't break build - - if [ -n "$(gofmt -s -l $GO_FILES)" ]; then echo "gofmt the following files:"; gofmt -s -l $GO_FILES; exit 1; fi - - go vet ./... - - go test -v -race $PKGS # Run all the tests with the race detector enabled - - GOARCH=386 go test -v $PKGS # Run all tests against a 386 architecture - - 'if [[ $TRAVIS_GO_VERSION = 1.8* ]]; then ! golint ./... | grep -vE "(_mock|_string|\.pb)\.go:"; fi' - - go run internal/check/version.go + - make travis-ci + - go run internal/check/version.go # TODO move this to makefile diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 3f3aed396..1ba3962c8 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -41,7 +41,8 @@ git remote add fork git@github.com:YOUR_GITHUB_USERNAME/opencensus-go.git Run tests: ``` -$ go test ./... +$ make install-tools # Only first time. +$ make ``` Checkout a new branch, make modifications and push the branch to your fork: @@ -54,3 +55,9 @@ $ git push fork feature ``` Open a pull request against the main opencensus-go repo. + +## General Notes +This project uses Appveyor and Travis for CI. + +The dependencies are managed with `go mod` if you work with the sources under your +`$GOPATH` you need to set the environment variable `GO111MODULE=on`. \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..e2f2ed59e --- /dev/null +++ b/Makefile @@ -0,0 +1,95 @@ +# TODO: Fix this on windows. +ALL_SRC := $(shell find . -name '*.go' \ + -not -path './vendor/*' \ + -not -path '*/gen-go/*' \ + -type f | sort) +ALL_PKGS := $(shell go list $(sort $(dir $(ALL_SRC)))) + +GOTEST_OPT?=-v -race -timeout 30s +GOTEST_OPT_WITH_COVERAGE = $(GOTEST_OPT) -coverprofile=coverage.txt -covermode=atomic +GOTEST=go test +GOFMT=gofmt +GOLINT=golint +GOVET=go vet +EMBEDMD=embedmd +# TODO decide if we need to change these names. +TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" +TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" + +.DEFAULT_GOAL := fmt-lint-vet-embedmd-test + +.PHONY: fmt-lint-vet-embedmd-test +fmt-lint-vet-embedmd-test: fmt lint vet embedmd test + +# TODO enable test-with-coverage in tavis +.PHONY: travis-ci +travis-ci: fmt lint vet embedmd test test-386 + +all-pkgs: + @echo $(ALL_PKGS) | tr ' ' '\n' | sort + +all-srcs: + @echo $(ALL_SRC) | tr ' ' '\n' | sort + +.PHONY: test +test: + $(GOTEST) $(GOTEST_OPT) $(ALL_PKGS) + +.PHONY: test-386 +test-386: + GOARCH=386 $(GOTEST) -v -timeout 30s $(ALL_PKGS) + +.PHONY: test-with-coverage +test-with-coverage: + $(GOTEST) $(GOTEST_OPT_WITH_COVERAGE) $(ALL_PKGS) + +.PHONY: fmt +fmt: + @FMTOUT=`$(GOFMT) -s -l $(ALL_SRC) 2>&1`; \ + if [ "$$FMTOUT" ]; then \ + echo "$(GOFMT) FAILED => gofmt the following files:\n"; \ + echo "$$FMTOUT\n"; \ + exit 1; \ + else \ + echo "Fmt finished successfully"; \ + fi + +.PHONY: lint +lint: + @LINTOUT=`$(GOLINT) $(ALL_PKGS) | grep -v $(TRACE_ID_LINT_EXCEPTION) | grep -v $(TRACE_OPTION_LINT_EXCEPTION) 2>&1`; \ + if [ "$$LINTOUT" ]; then \ + echo "$(GOLINT) FAILED => clean the following lint errors:\n"; \ + echo "$$LINTOUT\n"; \ + exit 1; \ + else \ + echo "Lint finished successfully"; \ + fi + +.PHONY: vet +vet: + # TODO: Understand why go vet downloads "github.com/google/go-cmp v0.2.0" + @VETOUT=`$(GOVET) ./... | grep -v "go: downloading" 2>&1`; \ + if [ "$$VETOUT" ]; then \ + echo "$(GOVET) FAILED => go vet the following files:\n"; \ + echo "$$VETOUT\n"; \ + exit 1; \ + else \ + echo "Vet finished successfully"; \ + fi + +.PHONY: embedmd +embedmd: + @EMBEDMDOUT=`$(EMBEDMD) -d README.md 2>&1`; \ + if [ "$$EMBEDMDOUT" ]; then \ + echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ + echo "$$EMBEDMDOUT\n"; \ + exit 1; \ + else \ + echo "Embedmd finished successfully"; \ + fi + +.PHONY: install-tools +install-tools: + go get -u golang.org/x/tools/cmd/cover + go get -u golang.org/x/lint/golint + go get -u github.com/rakyll/embedmd diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 8b4dc7af6..8811ba17d 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -35,53 +35,56 @@ import ( "go.opencensus.io/zpages" ) +// Measures for the stats quickstart. var ( // The latency in milliseconds - MLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", "ms") + mLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", stats.UnitMilliseconds) // Counts the number of lines read in from standard input - MLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", "1") + mLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", stats.UnitNone) // Encounters the number of non EOF(end-of-file) errors. - MErrors = stats.Int64("repl/errors", "The number of errors encountered", "1") + mErrors = stats.Int64("repl/errors", "The number of errors encountered", stats.UnitNone) // Counts/groups the lengths of lines read in. - MLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", "By") + mLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", stats.UnitBytes) ) +// TagKeys for the stats quickstart. var ( - KeyMethod, _ = tag.NewKey("method") + keyMethod, _ = tag.NewKey("method") ) +// Views for the stats quickstart. var ( - LatencyView = &view.View{ + latencyView = &view.View{ Name: "demo/latency", - Measure: MLatencyMs, + Measure: mLatencyMs, Description: "The distribution of the latencies", // Latency in buckets: // [>=0ms, >=25ms, >=50ms, >=75ms, >=100ms, >=200ms, >=400ms, >=600ms, >=800ms, >=1s, >=2s, >=4s, >=6s] Aggregation: view.Distribution(25, 50, 75, 100, 200, 400, 600, 800, 1000, 2000, 4000, 6000), - TagKeys: []tag.Key{KeyMethod}} + TagKeys: []tag.Key{keyMethod}} - LineCountView = &view.View{ + lineCountView = &view.View{ Name: "demo/lines_in", - Measure: MLinesIn, + Measure: mLinesIn, Description: "The number of lines from standard input", Aggregation: view.Count(), } - ErrorCountView = &view.View{ + errorCountView = &view.View{ Name: "demo/errors", - Measure: MErrors, + Measure: mErrors, Description: "The number of errors encountered", Aggregation: view.Count(), } - LineLengthView = &view.View{ + lineLengthView = &view.View{ Name: "demo/line_lengths", Description: "Groups the lengths of keys in buckets", - Measure: MLineLengths, + Measure: mLineLengths, // Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] Aggregation: view.Distribution(5, 2000, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), } @@ -102,7 +105,7 @@ func main() { view.RegisterExporter(exporter) // Register the views - if err := view.Register(LatencyView, LineCountView, ErrorCountView, LineLengthView); err != nil { + if err := view.Register(latencyView, lineCountView, errorCountView, lineLengthView); err != nil { log.Fatalf("Failed to register views: %v", err) } @@ -128,7 +131,7 @@ func main() { // readEvaluateProcess reads a line from the input reader and // then processes it. It returns an error if any was encountered. func readEvaluateProcess(br *bufio.Reader) error { - ctx, err := tag.New(context.Background(), tag.Insert(KeyMethod, "repl")) + ctx, err := tag.New(context.Background(), tag.Insert(keyMethod, "repl")) if err != nil { return err } @@ -137,14 +140,14 @@ func readEvaluateProcess(br *bufio.Reader) error { line, _, err := br.ReadLine() if err != nil { if err != io.EOF { - stats.Record(ctx, MErrors.M(1)) + stats.Record(ctx, mErrors.M(1)) } return err } out, err := processLine(ctx, line) if err != nil { - stats.Record(ctx, MErrors.M(1)) + stats.Record(ctx, mErrors.M(1)) return err } fmt.Printf("< %s\n\n", out) @@ -157,7 +160,7 @@ func processLine(ctx context.Context, in []byte) (out []byte, err error) { startTime := time.Now() defer func() { ms := float64(time.Since(startTime).Nanoseconds()) / 1e6 - stats.Record(ctx, MLinesIn.M(1), MLatencyMs.M(ms), MLineLengths.M(int64(len(in)))) + stats.Record(ctx, mLinesIn.M(1), mLatencyMs.M(ms), mLineLengths.M(int64(len(in)))) }() return bytes.ToUpper(in), nil diff --git a/exemplar/exemplar.go b/exemplar/exemplar.go index e676df837..acc225af9 100644 --- a/exemplar/exemplar.go +++ b/exemplar/exemplar.go @@ -24,6 +24,7 @@ import ( "time" ) +// Exemplars keys. const ( KeyTraceID = "trace_id" KeySpanID = "span_id" @@ -66,7 +67,7 @@ func RegisterAttachmentExtractor(e AttachmentExtractor) { extractors = append(extractors, e) } -// NewFromContext extracts exemplars from the given context. +// AttachmentsFromContext extracts exemplars from the given context. // Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an // unspecified order to add attachments to the exemplar. func AttachmentsFromContext(ctx context.Context) Attachments { diff --git a/internal/tagencoding/tagencoding.go b/internal/tagencoding/tagencoding.go index 3b1af8b4b..41b2c3fc0 100644 --- a/internal/tagencoding/tagencoding.go +++ b/internal/tagencoding/tagencoding.go @@ -17,6 +17,7 @@ // used interally by the stats collector. package tagencoding // import "go.opencensus.io/internal/tagencoding" +// Values represent the encoded buffer for the values. type Values struct { Buffer []byte WriteIndex int @@ -31,6 +32,7 @@ func (vb *Values) growIfRequired(expected int) { } } +// WriteValue is the helper method to encode Values from map[Key][]byte. func (vb *Values) WriteValue(v []byte) { length := len(v) & 0xff vb.growIfRequired(1 + length) @@ -49,7 +51,7 @@ func (vb *Values) WriteValue(v []byte) { vb.WriteIndex += length } -// ReadValue is the helper method to read the values when decoding valuesBytes to a map[Key][]byte. +// ReadValue is the helper method to decode Values to a map[Key][]byte. func (vb *Values) ReadValue() []byte { // read length of v length := int(vb.Buffer[vb.ReadIndex]) @@ -67,6 +69,7 @@ func (vb *Values) ReadValue() []byte { return v } +// Bytes returns a reference to already written bytes in the Buffer. func (vb *Values) Bytes() []byte { return vb.Buffer[:vb.WriteIndex] } diff --git a/internal/testpb/impl.go b/internal/testpb/impl.go index 24533afcd..3ffed6a9b 100644 --- a/internal/testpb/impl.go +++ b/internal/testpb/impl.go @@ -62,6 +62,7 @@ func (s *testServer) Multiple(stream Foo_MultipleServer) error { } } +// NewTestClient returns a new TestClient. func NewTestClient(l *testing.T) (client FooClient, cleanup func()) { // initialize server listener, err := net.Listen("tcp", "localhost:0") diff --git a/internal/traceinternals.go b/internal/traceinternals.go index 553ca68dc..073af7b47 100644 --- a/internal/traceinternals.go +++ b/internal/traceinternals.go @@ -22,6 +22,7 @@ import ( // TODO(#412): remove this var Trace interface{} +// LocalSpanStoreEnabled true if the local span store is enabled. var LocalSpanStoreEnabled bool // BucketConfiguration stores the number of samples to store for span buckets diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index cdf8ec21d..63bbcda5e 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -20,19 +20,31 @@ import ( "go.opencensus.io/tag" ) -// The following client HTTP measures are supported for use in custom views. +// Deprecated: client HTTP measures. var ( // Deprecated: Use a Count aggregation over one of the other client measures to achieve the same effect. - ClientRequestCount = stats.Int64("opencensus.io/http/client/request_count", "Number of HTTP requests started", stats.UnitDimensionless) + ClientRequestCount = stats.Int64( + "opencensus.io/http/client/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) // Deprecated: Use ClientSentBytes. - ClientRequestBytes = stats.Int64("opencensus.io/http/client/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) + ClientRequestBytes = stats.Int64( + "opencensus.io/http/client/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) // Deprecated: Use ClientReceivedBytes. - ClientResponseBytes = stats.Int64("opencensus.io/http/client/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) + ClientResponseBytes = stats.Int64( + "opencensus.io/http/client/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) // Deprecated: Use ClientRoundtripLatency. - ClientLatency = stats.Float64("opencensus.io/http/client/latency", "End-to-end latency", stats.UnitMilliseconds) + ClientLatency = stats.Float64( + "opencensus.io/http/client/latency", + "End-to-end latency", + stats.UnitMilliseconds) ) -// Client measures supported for use in custom views. +// The following client HTTP measures are supported for use in custom views. var ( ClientSentBytes = stats.Int64( "opencensus.io/http/client/sent_bytes", @@ -53,10 +65,22 @@ var ( // The following server HTTP measures are supported for use in custom views: var ( - ServerRequestCount = stats.Int64("opencensus.io/http/server/request_count", "Number of HTTP requests started", stats.UnitDimensionless) - ServerRequestBytes = stats.Int64("opencensus.io/http/server/request_bytes", "HTTP request body size if set as ContentLength (uncompressed)", stats.UnitBytes) - ServerResponseBytes = stats.Int64("opencensus.io/http/server/response_bytes", "HTTP response body size (uncompressed)", stats.UnitBytes) - ServerLatency = stats.Float64("opencensus.io/http/server/latency", "End-to-end latency", stats.UnitMilliseconds) + ServerRequestCount = stats.Int64( + "opencensus.io/http/server/request_count", + "Number of HTTP requests started", + stats.UnitDimensionless) + ServerRequestBytes = stats.Int64( + "opencensus.io/http/server/request_bytes", + "HTTP request body size if set as ContentLength (uncompressed)", + stats.UnitBytes) + ServerResponseBytes = stats.Int64( + "opencensus.io/http/server/response_bytes", + "HTTP response body size (uncompressed)", + stats.UnitBytes) + ServerLatency = stats.Float64( + "opencensus.io/http/server/latency", + "End-to-end latency", + stats.UnitMilliseconds) ) // The following tags are applied to stats recorded by this package. Host, Path @@ -108,7 +132,7 @@ var ( DefaultLatencyDistribution = view.Distribution(1, 2, 3, 4, 5, 6, 8, 10, 13, 16, 20, 25, 30, 40, 50, 65, 80, 100, 130, 160, 200, 250, 300, 400, 500, 650, 800, 1000, 2000, 5000, 10000, 20000, 50000, 100000) ) -// Package ochttp provides some convenience views. +// Package ochttp provides some convenience views for client measures. // You still need to register these views for data to actually be collected. var ( ClientSentBytesDistribution = &view.View{ @@ -144,6 +168,7 @@ var ( } ) +// Deprecated: Old client Views. var ( // Deprecated: No direct replacement, but see ClientCompletedCount. ClientRequestCountView = &view.View{ @@ -161,7 +186,7 @@ var ( Aggregation: DefaultSizeDistribution, } - // Deprecated: Use ClientReceivedBytesDistribution. + // Deprecated: Use ClientReceivedBytesDistribution instead. ClientResponseBytesView = &view.View{ Name: "opencensus.io/http/client/response_bytes", Description: "Size distribution of HTTP response body", @@ -169,7 +194,7 @@ var ( Aggregation: DefaultSizeDistribution, } - // Deprecated: Use ClientRoundtripLatencyDistribution. + // Deprecated: Use ClientRoundtripLatencyDistribution instead. ClientLatencyView = &view.View{ Name: "opencensus.io/http/client/latency", Description: "Latency distribution of HTTP requests", @@ -177,7 +202,7 @@ var ( Aggregation: DefaultLatencyDistribution, } - // Deprecated: Use ClientCompletedCount. + // Deprecated: Use ClientCompletedCount instead. ClientRequestCountByMethod = &view.View{ Name: "opencensus.io/http/client/request_count_by_method", Description: "Client request count by HTTP method", @@ -186,7 +211,7 @@ var ( Aggregation: view.Count(), } - // Deprecated: Use ClientCompletedCount. + // Deprecated: Use ClientCompletedCount instead. ClientResponseCountByStatusCode = &view.View{ Name: "opencensus.io/http/client/response_count_by_status_code", Description: "Client response count by status code", @@ -196,6 +221,8 @@ var ( } ) +// Package ochttp provides some convenience views for server measures. +// You still need to register these views for data to actually be collected. var ( ServerRequestCountView = &view.View{ Name: "opencensus.io/http/server/request_count", diff --git a/resource/resource.go b/resource/resource.go index ec89b216c..b1764e1d3 100644 --- a/resource/resource.go +++ b/resource/resource.go @@ -26,6 +26,7 @@ import ( "strings" ) +// Environment variables used by FromEnv to decode a resource. const ( EnvVarType = "OC_RESOURCE_TYPE" EnvVarLabels = "OC_RESOURCE_LABELS" diff --git a/stats/internal/validation.go b/stats/internal/validation.go deleted file mode 100644 index b946667f9..000000000 --- a/stats/internal/validation.go +++ /dev/null @@ -1,28 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package internal // import "go.opencensus.io/stats/internal" - -const ( - MaxNameLength = 255 -) - -func IsPrintable(str string) bool { - for _, r := range str { - if !(r >= ' ' && r <= '~') { - return false - } - } - return true -} diff --git a/stats/view/view.go b/stats/view/view.go index f1c253e30..7372f999f 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -27,7 +27,6 @@ import ( "go.opencensus.io/exemplar" "go.opencensus.io/stats" - "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -70,6 +69,9 @@ func (v *View) same(other *View) bool { v.Measure.Name() == other.Measure.Name() } +// ErrNegativeBucketBounds error returned if histogram contains negative bounds. +// +// Deprecated: this should not be public. var ErrNegativeBucketBounds = errors.New("negative bucket bounds not supported") // canonicalize canonicalizes v by setting explicit @@ -195,11 +197,23 @@ func (r *Row) Equal(other *Row) bool { return reflect.DeepEqual(r.Tags, other.Tags) && r.Data.equal(other.Data) } +const maxNameLength = 255 + +// Returns true if the given string contains only printable characters. +func isPrintable(str string) bool { + for _, r := range str { + if !(r >= ' ' && r <= '~') { + return false + } + } + return true +} + func checkViewName(name string) error { - if len(name) > internal.MaxNameLength { - return fmt.Errorf("view name cannot be larger than %v", internal.MaxNameLength) + if len(name) > maxNameLength { + return fmt.Errorf("view name cannot be larger than %v", maxNameLength) } - if !internal.IsPrintable(name) { + if !isPrintable(name) { return fmt.Errorf("view name needs to be an ASCII string") } return nil diff --git a/trace/internal/internal.go b/trace/internal/internal.go index 1c8b9b34b..7e808d8f3 100644 --- a/trace/internal/internal.go +++ b/trace/internal/internal.go @@ -15,6 +15,7 @@ // Package internal provides trace internals. package internal +// IDGenerator allows custom generators for TraceId and SpanId. type IDGenerator interface { NewTraceID() [16]byte NewSpanID() [8]byte From d1aebdcb4d4866dcb036f57503d612add60f22a7 Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 12 Mar 2019 10:54:40 -0700 Subject: [PATCH 130/212] refactor ReadAll to Read and fix concurrency issue. (#1056) * refactor ReadAll to Read and fix concurrency issue. * use sync.Map instead of RWMutex --- metric/gauge_test.go | 36 ++++++++++++++++++++++++++++-------- metric/registry.go | 27 +++++++++++++++------------ 2 files changed, 43 insertions(+), 20 deletions(-) diff --git a/metric/gauge_test.go b/metric/gauge_test.go index e5f8f2e9e..c475a9d5f 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -31,7 +31,7 @@ func TestGauge(t *testing.T) { f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")).Add(1) - m := r.ReadAll() + m := r.Read() want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ @@ -79,17 +79,17 @@ func TestFloat64Entry_Add(t *testing.T) { r := NewRegistry() g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) g.GetEntry().Add(0) - ms := r.ReadAll() + ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) } g.GetEntry().Add(1) - ms = r.ReadAll() + ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 1.0; got != want { t.Errorf("value = %v, want %v", got, want) } g.GetEntry().Add(-1) - ms = r.ReadAll() + ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) } @@ -99,7 +99,7 @@ func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) g.GetEntry().Add(-1.0) - ms := r.ReadAll() + ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(0); got != want { t.Errorf("value = %v, want %v", got, want) } @@ -109,12 +109,12 @@ func TestInt64GaugeEntry_Add(t *testing.T) { r := NewRegistry() g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) g.GetEntry().Add(0) - ms := r.ReadAll() + ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } g.GetEntry().Add(1) - ms = r.ReadAll() + ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { t.Errorf("value = %v, want %v", got, want) } @@ -124,7 +124,7 @@ func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) g.GetEntry().Add(-1) - ms := r.ReadAll() + ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } @@ -156,6 +156,26 @@ func TestMapKey(t *testing.T) { } } +func TestRaceCondition(t *testing.T) { + r := NewRegistry() + + // start reader before adding Gauge metric. + var ms = []*metricdata.Metric{} + for i := 0; i < 5; i++ { + go func(k int) { + for j := 0; j < 5; j++ { + g := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j), "", metricdata.UnitDimensionless) + g.GetEntry().Add(1) + } + }(i) + } + time.Sleep(1 * time.Second) + ms = r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + func ignoreTimes(_, _ time.Time) bool { return true } diff --git a/metric/registry.go b/metric/registry.go index ac39e421e..a31baccd5 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -15,23 +15,23 @@ package metric import ( - "go.opencensus.io/metric/metricdata" "log" + "sync" "time" + + "go.opencensus.io/metric/metricdata" ) // Registry creates and manages a set of gauges. // External synchronization is required if you want to add gauges to the same // registry from multiple goroutines. type Registry struct { - gauges map[string]*gauge + gauges sync.Map } // NewRegistry initializes a new Registry. func NewRegistry() *Registry { - return &Registry{ - gauges: make(map[string]*gauge), - } + return &Registry{} } // AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. @@ -53,8 +53,9 @@ func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, } func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) *gauge { - existing, ok := r.gauges[name] + val, ok := r.gauges.Load(name) if ok { + existing := val.(*gauge) if existing.isFloat != g.isFloat { log.Panicf("Gauge with name %s already exists with a different type", name) } @@ -67,15 +68,17 @@ func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, descript Unit: unit, LabelKeys: labelKeys, } - r.gauges[name] = g + r.gauges.Store(name, g) return g } -// ReadAll reads all gauges in this registry and returns their values as metrics. -func (r *Registry) ReadAll() []*metricdata.Metric { - ms := make([]*metricdata.Metric, 0, len(r.gauges)) - for _, g := range r.gauges { +// Read reads all gauges in this registry and returns their values as metrics. +func (r *Registry) Read() []*metricdata.Metric { + ms := []*metricdata.Metric{} + r.gauges.Range(func(k, v interface{}) bool { + g := v.(*gauge) ms = append(ms, g.read()) - } + return true + }) return ms } From bf23ae1ffcc5e22ab26392aaf8d15e6b965675a5 Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 12 Mar 2019 14:26:37 -0700 Subject: [PATCH 131/212] During gauge creation return error instead of panic. (#1057) * During gauge creation return error instead of panic. * fix review comments. --- metric/error_const.go | 22 ++++++++++++++ metric/examples_test.go | 4 +-- metric/gauge.go | 38 ++++++++++++++---------- metric/gauge_test.go | 66 ++++++++++++++++++++++++++++++----------- metric/registry.go | 44 +++++++++++++++++++-------- 5 files changed, 126 insertions(+), 48 deletions(-) create mode 100644 metric/error_const.go diff --git a/metric/error_const.go b/metric/error_const.go new file mode 100644 index 000000000..a0dc8c0b9 --- /dev/null +++ b/metric/error_const.go @@ -0,0 +1,22 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import "errors" + +var ( + errGaugeExistsWithDiffType = errors.New("gauge with same name exists with a different type") + errKeyValueMismatch = errors.New("must supply the same number of label values as keys used to construct this gauge") +) diff --git a/metric/examples_test.go b/metric/examples_test.go index b510ce8c1..cc39571ba 100644 --- a/metric/examples_test.go +++ b/metric/examples_test.go @@ -25,10 +25,10 @@ func ExampleRegistry_AddInt64Gauge() { r := metric.NewRegistry() // TODO: allow exporting from a registry - g := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") + g, _ := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { - e := g.GetEntry(metricdata.NewLabelValue(request.Method)) + e, _ := g.GetEntry(metricdata.NewLabelValue(request.Method)) e.Add(1) defer e.Add(-1) // process request ... diff --git a/metric/gauge.go b/metric/gauge.go index c64a9f074..38ff88544 100644 --- a/metric/gauge.go +++ b/metric/gauge.go @@ -32,11 +32,11 @@ import ( // // gauge should not be used directly, use Float64Gauge or Int64Gauge. type gauge struct { - vals sync.Map - desc metricdata.Descriptor - start time.Time - keys []string - isFloat bool + vals sync.Map + desc metricdata.Descriptor + start time.Time + keys []string + gType gaugeType } type gaugeEntry interface { @@ -92,16 +92,16 @@ func (g *gauge) labelValues(s string) []metricdata.LabelValue { return vals } -func (g *gauge) entryForValues(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) interface{} { +func (g *gauge) entryForValues(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) (interface{}, error) { if len(labelVals) != len(g.keys) { - panic("must supply the same number of label values as keys used to construct this gauge") + return nil, errKeyValueMismatch } mapKey := g.mapKey(labelVals) if entry, ok := g.vals.Load(mapKey); ok { - return entry + return entry, nil } entry, _ := g.vals.LoadOrStore(mapKey, newEntry()) - return entry + return entry, nil } // Float64Gauge represents a float64 value that can go up and down. @@ -131,10 +131,14 @@ func (e *Float64Entry) read(t time.Time) metricdata.Point { // // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. -func (g *Float64Gauge) GetEntry(labelVals ...metricdata.LabelValue) *Float64Entry { - return g.g.entryForValues(labelVals, func() gaugeEntry { +func (g *Float64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Float64Entry, error) { + entry, err := g.g.entryForValues(labelVals, func() gaugeEntry { return &Float64Entry{} - }).(*Float64Entry) + }) + if err != nil { + return nil, err + } + return entry.(*Float64Entry), nil } // Set sets the gauge entry value to val. @@ -179,10 +183,14 @@ func (e *Int64GaugeEntry) read(t time.Time) metricdata.Point { // // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. -func (g *Int64Gauge) GetEntry(labelVals ...metricdata.LabelValue) *Int64GaugeEntry { - return g.g.entryForValues(labelVals, func() gaugeEntry { +func (g *Int64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Int64GaugeEntry, error) { + entry, err := g.g.entryForValues(labelVals, func() gaugeEntry { return &Int64GaugeEntry{} - }).(*Int64GaugeEntry) + }) + if err != nil { + return nil, err + } + return entry.(*Int64GaugeEntry), nil } // Set sets the value of the gauge entry to the provided value. diff --git a/metric/gauge_test.go b/metric/gauge_test.go index c475a9d5f..05b9a7cbd 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -26,11 +26,15 @@ import ( func TestGauge(t *testing.T) { r := NewRegistry() - f := r.AddFloat64Gauge("TestGauge", "", "", "k1", "k2") - f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}).Set(5) - f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) - f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}).Add(1) - f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")).Add(1) + f, _ := r.AddFloat64Gauge("TestGauge", "", "", "k1", "k2") + e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) + e.Set(5) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + e.Add(1) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + e.Add(1) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) + e.Add(1) m := r.Read() want := []*metricdata.Metric{ { @@ -77,18 +81,21 @@ func TestGauge(t *testing.T) { func TestFloat64Entry_Add(t *testing.T) { r := NewRegistry() - g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) - g.GetEntry().Add(0) + g, _ := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + e, _ := g.GetEntry() + e.Add(0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) } - g.GetEntry().Add(1) + e, _ = g.GetEntry() + e.Add(1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 1.0; got != want { t.Errorf("value = %v, want %v", got, want) } - g.GetEntry().Add(-1) + e, _ = g.GetEntry() + e.Add(-1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), 0.0; got != want { t.Errorf("value = %v, want %v", got, want) @@ -97,8 +104,9 @@ func TestFloat64Entry_Add(t *testing.T) { func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() - g := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) - g.GetEntry().Add(-1.0) + g, _ := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + e, _ := g.GetEntry() + e.Add(-1.0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(0); got != want { t.Errorf("value = %v, want %v", got, want) @@ -107,13 +115,15 @@ func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { func TestInt64GaugeEntry_Add(t *testing.T) { r := NewRegistry() - g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) - g.GetEntry().Add(0) + g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + e, _ := g.GetEntry() + e.Add(0) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } - g.GetEntry().Add(1) + e, _ = g.GetEntry() + e.Add(1) ms = r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(1); got != want { t.Errorf("value = %v, want %v", got, want) @@ -122,14 +132,33 @@ func TestInt64GaugeEntry_Add(t *testing.T) { func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() - g := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) - g.GetEntry().Add(-1) + g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + e, _ := g.GetEntry() + e.Add(-1) ms := r.Read() if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(0); got != want { t.Errorf("value = %v, want %v", got, want) } } +func TestGaugeWithSameNameDiffType(t *testing.T) { + r := NewRegistry() + r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + _, wantErr := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + if wantErr == nil { + t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + } +} + +func TestGaugeWithLabelMismatch(t *testing.T) { + r := NewRegistry() + g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless, "k1") + _, wantErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) + if wantErr == nil { + t.Errorf("got: nil, want error: %v", errKeyValueMismatch) + } +} + func TestMapKey(t *testing.T) { cases := [][]metricdata.LabelValue{ {}, @@ -164,8 +193,9 @@ func TestRaceCondition(t *testing.T) { for i := 0; i < 5; i++ { go func(k int) { for j := 0; j < 5; j++ { - g := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j), "", metricdata.UnitDimensionless) - g.GetEntry().Add(1) + g, _ := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j), "", metricdata.UnitDimensionless) + e, _ := g.GetEntry() + e.Add(1) } }(i) } diff --git a/metric/registry.go b/metric/registry.go index a31baccd5..4c0300264 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -15,7 +15,6 @@ package metric import ( - "log" "sync" "time" @@ -29,35 +28,54 @@ type Registry struct { gauges sync.Map } +type gaugeType int + +const ( + gaugeInt64 gaugeType = iota + gaugeFloat64 + derivedGaugeInt64 + derivedGaugeFloat64 +) + // NewRegistry initializes a new Registry. func NewRegistry() *Registry { return &Registry{} } // AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. -func (r *Registry) AddFloat64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) *Float64Gauge { +func (r *Registry) AddFloat64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Float64Gauge, error) { f := &Float64Gauge{ g: gauge{ - isFloat: true, + gType: gaugeFloat64, }, } - r.initGauge(&f.g, labelKeys, name, description, unit) - return f + _, err := r.initGauge(&f.g, labelKeys, name, description, unit) + if err != nil { + return nil, err + } + return f, nil } // AddInt64Gauge creates and adds a new int64-valued gauge to this registry. -func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) *Int64Gauge { - i := &Int64Gauge{} - r.initGauge(&i.g, labelKeys, name, description, unit) - return i +func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Int64Gauge, error) { + i := &Int64Gauge{ + g: gauge{ + gType: gaugeInt64, + }, + } + _, err := r.initGauge(&i.g, labelKeys, name, description, unit) + if err != nil { + return nil, err + } + return i, nil } -func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) *gauge { +func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) (*gauge, error) { val, ok := r.gauges.Load(name) if ok { existing := val.(*gauge) - if existing.isFloat != g.isFloat { - log.Panicf("Gauge with name %s already exists with a different type", name) + if existing.gType != g.gType { + return nil, errGaugeExistsWithDiffType } } g.keys = labelKeys @@ -69,7 +87,7 @@ func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, descript LabelKeys: labelKeys, } r.gauges.Store(name, g) - return g + return g, nil } // Read reads all gauges in this registry and returns their values as metrics. From 084f0af45890ac542efb278fdcb11eb19f78d664 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Tue, 12 Mar 2019 23:17:56 -0700 Subject: [PATCH 132/212] Exporter/Prometheus: Simplify histogram creation. (#1061) * Exporter/Prometheus: Simplify histogram creation. * Add a test on unordered bucket bounds. * Fix review comments. --- exporter/prometheus/prometheus.go | 21 +----- exporter/prometheus/prometheus_test.go | 89 +++++++++++++++++++++++++- 2 files changed, 90 insertions(+), 20 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 968409816..203bd38ad 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -21,7 +21,6 @@ import ( "fmt" "log" "net/http" - "sort" "sync" "go.opencensus.io/internal" @@ -213,25 +212,9 @@ func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) case *view.DistributionData: points := make(map[float64]uint64) // Histograms are cumulative in Prometheus. - // 1. Sort buckets in ascending order but, retain - // their indices for reverse lookup later on. - // TODO: If there is a guarantee that distribution elements - // are always sorted, then skip the sorting. - indicesMap := make(map[float64]int) - buckets := make([]float64, 0, len(v.Aggregation.Buckets)) - for i, b := range v.Aggregation.Buckets { - if _, ok := indicesMap[b]; !ok { - indicesMap[b] = i - buckets = append(buckets, b) - } - } - sort.Float64s(buckets) - - // 2. Now that the buckets are sorted by magnitude - // we can create cumulative indicesmap them back by reverse index + // Get cumulative bucket counts. cumCount := uint64(0) - for _, b := range buckets { - i := indicesMap[b] + for i, b := range v.Aggregation.Buckets { cumCount += uint64(data.CountPerBucket[i]) points[b] = cumCount } diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 67b05f020..4042e68b3 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -288,7 +288,94 @@ func TestCumulativenessFromHistograms(t *testing.T) { // We want the results that look like this: // 1: [0.25] | 1 + prev(i) = 1 + 0 = 1 // 5: [1.45] | 1 + prev(i) = 1 + 1 = 2 - // 10: [] | 1 + prev(i) = 1 + 2 = 3 + // 10: [7.69] | 1 + prev(i) = 1 + 2 = 3 + // 20: [12] | 1 + prev(i) = 1 + 3 = 4 + // 50: [] | 0 + prev(i) = 0 + 4 = 4 + // 100: [] | 0 + prev(i) = 0 + 4 = 4 + // 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7 + wantLines := []string{ + `cash_register_bucket{le="1"} 1`, + `cash_register_bucket{le="5"} 2`, + `cash_register_bucket{le="10"} 3`, + `cash_register_bucket{le="20"} 4`, + `cash_register_bucket{le="50"} 4`, + `cash_register_bucket{le="100"} 4`, + `cash_register_bucket{le="250"} 7`, + `cash_register_bucket{le="+Inf"} 7`, + `cash_register_sum 654.0799999999999`, // Summation of the input values + `cash_register_count 7`, + } + + ctx := context.Background() + ms := make([]stats.Measurement, 0, len(values)) + for _, value := range values { + mx := m.M(value) + ms = append(ms, mx) + } + stats.Record(ctx, ms...) + + // Give the recorder ample time to process recording + <-time.After(10 * reportPeriod) + + cst := httptest.NewServer(exporter) + defer cst.Close() + res, err := http.Get(cst.URL) + if err != nil { + t.Fatalf("http.Get error: %v", err) + } + blob, err := ioutil.ReadAll(res.Body) + if err != nil { + t.Fatalf("Read body error: %v", err) + } + str := strings.Trim(string(blob), "\n") + lines := strings.Split(str, "\n") + nonComments := make([]string, 0, len(lines)) + for _, line := range lines { + if !strings.Contains(line, "#") { + nonComments = append(nonComments, line) + } + } + + got := strings.Join(nonComments, "\n") + want := strings.Join(wantLines, "\n") + if got != want { + t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want) + } +} + +func TestHistogramUnorderedBucketBounds(t *testing.T) { + exporter, err := NewExporter(Options{}) + if err != nil { + t.Fatalf("failed to create prometheus exporter: %v", err) + } + view.RegisterExporter(exporter) + reportPeriod := time.Millisecond + view.SetReportingPeriod(reportPeriod) + + m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless) + v := &view.View{ + Name: "cash/register", + Description: "this is a test", + Measure: m, + + // Intentionally used unordered and duplicated elements in the distribution + // to ensure unordered bucket bounds are handled. + Aggregation: view.Distribution(10, 5, 1, 1, 50, 5, 20, 100, 250), + } + + if err := view.Register(v); err != nil { + t.Fatalf("Register error: %v", err) + } + defer view.Unregister(v) + + // Give the reporter ample time to process registration + <-time.After(10 * reportPeriod) + + values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12} + // We want the results that look like this: + // 1: [0.25] | 1 + prev(i) = 1 + 0 = 1 + // 5: [1.45] | 1 + prev(i) = 1 + 1 = 2 + // 10: [7.69] | 1 + prev(i) = 1 + 2 = 3 // 20: [12] | 1 + prev(i) = 1 + 3 = 4 // 50: [] | 0 + prev(i) = 0 + 4 = 4 // 100: [] | 0 + prev(i) = 0 + 4 = 4 From 05c3ab66b023eab41d1e5a62cb1edf6aefe55b76 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 13 Mar 2019 16:39:53 -0700 Subject: [PATCH 133/212] update link type definition to adhere to spec. (#1062) --- trace/basetypes.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/trace/basetypes.go b/trace/basetypes.go index ed59bfbde..0c54492a2 100644 --- a/trace/basetypes.go +++ b/trace/basetypes.go @@ -76,8 +76,8 @@ type LinkType int32 // LinkType values. const ( LinkTypeUnspecified LinkType = iota // The relationship of the two spans is unknown. - LinkTypeChild // The current span is a child of the linked span. - LinkTypeParent // The current span is the parent of the linked span. + LinkTypeChild // The linked span is a child of the current span. + LinkTypeParent // The linked span is the parent of the current span. ) // Link represents a reference from one span to another span. From 2656f29035855a0148283f6ea10490c176db0cd9 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 13 Mar 2019 18:10:11 -0700 Subject: [PATCH 134/212] Add support for derived gauges. (#1060) * Add support for derived gauges. * fix review comment and simplify interface. --- metric/error_const.go | 1 + metric/gauge.go | 76 +++++++++++++++++++++++ metric/gauge_test.go | 140 ++++++++++++++++++++++++++++++++++++++++-- metric/registry.go | 32 ++++++++++ 4 files changed, 245 insertions(+), 4 deletions(-) diff --git a/metric/error_const.go b/metric/error_const.go index a0dc8c0b9..c2bdf2b59 100644 --- a/metric/error_const.go +++ b/metric/error_const.go @@ -17,6 +17,7 @@ package metric import "errors" var ( + errInvalidParam = errors.New("invalid parameter") errGaugeExistsWithDiffType = errors.New("gauge with same name exists with a different type") errKeyValueMismatch = errors.New("must supply the same number of label values as keys used to construct this gauge") ) diff --git a/metric/gauge.go b/metric/gauge.go index 38ff88544..0f5dcba14 100644 --- a/metric/gauge.go +++ b/metric/gauge.go @@ -104,6 +104,16 @@ func (g *gauge) entryForValues(labelVals []metricdata.LabelValue, newEntry func( return entry, nil } +func (g *gauge) upsertEntry(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) error { + if len(labelVals) != len(g.keys) { + return errKeyValueMismatch + } + mapKey := g.mapKey(labelVals) + g.vals.Delete(mapKey) + g.vals.Store(mapKey, newEntry()) + return nil +} + // Float64Gauge represents a float64 value that can go up and down. // // Float64Gauge maintains a float64 value for each combination of of label values @@ -202,3 +212,69 @@ func (e *Int64GaugeEntry) Set(val int64) { func (e *Int64GaugeEntry) Add(val int64) { atomic.AddInt64(&e.val, val) } + +// Int64DerivedGauge represents int64 gauge value that is derived from an object. +// +// Int64DerivedGauge maintains objects for each combination of label values. +// These objects implement Int64DerivedGaugeInterface to read instantaneous value +// representing the object. +type Int64DerivedGauge struct { + g gauge +} + +type int64DerivedGaugeEntry struct { + fn func() int64 +} + +func (e *int64DerivedGaugeEntry) read(t time.Time) metricdata.Point { + return metricdata.NewInt64Point(t, e.fn()) +} + +// UpsertEntry inserts or updates a derived gauge entry for the given set of label values. +// The object for which this gauge entry is inserted or updated, must implement func() int64 +// +// It returns an error if +// 1. The number of label values supplied are not the same as the number +// of keys supplied when this gauge was created. +// 2. fn func() int64 is nil. +func (g *Int64DerivedGauge) UpsertEntry(fn func() int64, labelVals ...metricdata.LabelValue) error { + if fn == nil { + return errInvalidParam + } + return g.g.upsertEntry(labelVals, func() gaugeEntry { + return &int64DerivedGaugeEntry{fn} + }) +} + +// Float64DerivedGauge represents float64 gauge value that is derived from an object. +// +// Float64DerivedGauge maintains objects for each combination of label values. +// These objects implement Float64DerivedGaugeInterface to read instantaneous value +// representing the object. +type Float64DerivedGauge struct { + g gauge +} + +type float64DerivedGaugeEntry struct { + fn func() float64 +} + +func (e *float64DerivedGaugeEntry) read(t time.Time) metricdata.Point { + return metricdata.NewFloat64Point(t, e.fn()) +} + +// UpsertEntry inserts or updates a derived gauge entry for the given set of label values. +// The object for which this gauge entry is inserted or updated, must implement func() float64 +// +// It returns an error if +// 1. The number of label values supplied are not the same as the number +// of keys supplied when this gauge was created. +// 2. fn func() float64 is nil. +func (g *Float64DerivedGauge) UpsertEntry(fn func() float64, labelVals ...metricdata.LabelValue) error { + if fn == nil { + return errInvalidParam + } + return g.g.upsertEntry(labelVals, func() gaugeEntry { + return &float64DerivedGaugeEntry{fn} + }) +} diff --git a/metric/gauge_test.go b/metric/gauge_test.go index 05b9a7cbd..905e7f296 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -144,8 +144,16 @@ func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { func TestGaugeWithSameNameDiffType(t *testing.T) { r := NewRegistry() r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) - _, wantErr := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) - if wantErr == nil { + _, gotErr := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + } + _, gotErr = r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless) + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + } + _, gotErr = r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless) + if gotErr == nil { t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) } } @@ -153,8 +161,8 @@ func TestGaugeWithSameNameDiffType(t *testing.T) { func TestGaugeWithLabelMismatch(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless, "k1") - _, wantErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) - if wantErr == nil { + _, gotErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) + if gotErr == nil { t.Errorf("got: nil, want error: %v", errKeyValueMismatch) } } @@ -231,3 +239,127 @@ func canonicalize(ms []*metricdata.Metric) { }) } } + +type queueInt64 struct { + size int64 +} + +func (q *queueInt64) ToInt64() int64 { + return q.size +} + +func TestInt64DerivedGaugeEntry_Add(t *testing.T) { + r := NewRegistry() + q := &queueInt64{3} + g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + err := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(3); got != want { + t.Errorf("value = %v, want %v", got, want) + } + q.size = 5 + ms = r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestInt64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { + r := NewRegistry() + g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestInt64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { + r := NewRegistry() + q := &queueInt64{3} + g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + gotErr := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1")) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestInt64DerivedGaugeEntry_Update(t *testing.T) { + r := NewRegistry() + q := &queueInt64{3} + q2 := &queueInt64{5} + g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + gotErr := g.UpsertEntry(q2.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr != nil { + t.Errorf("got: %v, want: nil", gotErr) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +type queueFloat64 struct { + size float64 +} + +func (q *queueFloat64) ToFloat64() float64 { + return q.size +} + +func TestFloat64DerivedGaugeEntry_Add(t *testing.T) { + r := NewRegistry() + q := &queueFloat64{5.0} + g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + err := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } + q.size = 5 + ms = r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestFloat64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { + r := NewRegistry() + g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestFloat64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { + r := NewRegistry() + q := &queueFloat64{3} + g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + gotErr := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1")) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestFloat64DerivedGaugeEntry_Update(t *testing.T) { + r := NewRegistry() + q := &queueFloat64{3.0} + q2 := &queueFloat64{5.0} + g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + gotErr := g.UpsertEntry(q2.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr != nil { + t.Errorf("got: %v, want: nil", gotErr) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} diff --git a/metric/registry.go b/metric/registry.go index 4c0300264..19cc8874a 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -70,6 +70,38 @@ func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, return i, nil } +// AddInt64DerivedGauge creates and adds a new derived int64-valued gauge to this registry. +// A derived gauge is convenient form of gauge where the object associated with the gauge +// provides its value by implementing func() int64. +func (r *Registry) AddInt64DerivedGauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Int64DerivedGauge, error) { + i := &Int64DerivedGauge{ + g: gauge{ + gType: derivedGaugeInt64, + }, + } + _, err := r.initGauge(&i.g, labelKeys, name, description, unit) + if err != nil { + return nil, err + } + return i, nil +} + +// AddFloat64DerivedGauge creates and adds a new derived float64-valued gauge to this registry. +// A derived gauge is convenient form of gauge where the object associated with the gauge +// provides its value by implementing func() float64. +func (r *Registry) AddFloat64DerivedGauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Float64DerivedGauge, error) { + f := &Float64DerivedGauge{ + g: gauge{ + gType: derivedGaugeFloat64, + }, + } + _, err := r.initGauge(&f.g, labelKeys, name, description, unit) + if err != nil { + return nil, err + } + return f, nil +} + func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) (*gauge, error) { val, ok := r.gauges.Load(name) if ok { From 4c51322deaf814004e48e541c32b0cf777d04f8d Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 14 Mar 2019 10:51:39 -0700 Subject: [PATCH 135/212] Fix misspell in the comments. (#1065) --- plugin/ochttp/client_test.go | 4 ++-- plugin/ochttp/propagation/b3/b3.go | 2 +- plugin/ochttp/server_test.go | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/plugin/ochttp/client_test.go b/plugin/ochttp/client_test.go index 97d15ab97..a7f303ece 100644 --- a/plugin/ochttp/client_test.go +++ b/plugin/ochttp/client_test.go @@ -118,7 +118,7 @@ func TestClientNew(t *testing.T) { case *view.DistributionData: count = data.Count default: - t.Errorf("Unkown data type: %v", data) + t.Errorf("Unknown data type: %v", data) continue } if got := count; got != reqCount { @@ -210,7 +210,7 @@ func TestClientOld(t *testing.T) { case *view.DistributionData: count = data.Count default: - t.Errorf("Unkown data type: %v", data) + t.Errorf("Unknown data type: %v", data) continue } if got := count; got != reqCount { diff --git a/plugin/ochttp/propagation/b3/b3.go b/plugin/ochttp/propagation/b3/b3.go index f777772ec..2f1c7f006 100644 --- a/plugin/ochttp/propagation/b3/b3.go +++ b/plugin/ochttp/propagation/b3/b3.go @@ -38,7 +38,7 @@ const ( // because there are additional fields not represented in the // OpenCensus span context. Spans created from the incoming // header will be the direct children of the client-side span. -// Similarly, reciever of the outgoing spans should use client-side +// Similarly, receiver of the outgoing spans should use client-side // span created by OpenCensus as the parent. type HTTPFormat struct{} diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 614530fb5..4cca1bcb5 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -110,7 +110,7 @@ func TestHandlerStatsCollection(t *testing.T) { count = int(data.Count) sum = data.Sum() default: - t.Errorf("Unkown data type: %v", data) + t.Errorf("Unknown data type: %v", data) continue } From 5439521ef327a8c875a33a20b4d3cf00a46e3819 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 14 Mar 2019 18:45:41 -0700 Subject: [PATCH 136/212] Minor cleanups in the tests. (#1066) --- plugin/ochttp/trace_test.go | 3 +-- resource/resource_test.go | 2 +- trace/tracestate/tracestate_test.go | 4 ++++ 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index 8cc9cf7b9..33df4d730 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -100,8 +100,7 @@ func TestTransport_RoundTrip_Race(t *testing.T) { } func TestTransport_RoundTrip(t *testing.T) { - ctx := context.Background() - ctx, parent := trace.StartSpan(ctx, "parent") + _, parent := trace.StartSpan(context.Background(), "parent") tests := []struct { name string parent *trace.Span diff --git a/resource/resource_test.go b/resource/resource_test.go index 77fef040e..0dd4d111a 100644 --- a/resource/resource_test.go +++ b/resource/resource_test.go @@ -146,7 +146,7 @@ func TestMultiDetector(t *testing.T) { } wantErr := errors.New("err1") - got, err = MultiDetector( + _, err = MultiDetector( func(context.Context) (*Resource, error) { return &Resource{ Type: "t1", diff --git a/trace/tracestate/tracestate_test.go b/trace/tracestate/tracestate_test.go index db76d9c3f..379ff4f70 100644 --- a/trace/tracestate/tracestate_test.go +++ b/trace/tracestate/tracestate_test.go @@ -152,6 +152,10 @@ func TestImplicitImmutableTracestate(t *testing.T) { // Check Tracestate does not have key3. checkKeyValue(t, tracestate, key3, "", testname) + // Check that we added the key3 in the entries + tracestate, err = New(nil, entries...) + checkError(t, tracestate, err, testname, "create failed") + checkKeyValue(t, tracestate, key3, value3, testname) } func TestKeyWithValidChar(t *testing.T) { From 3b8e2721f2c3c01fa1bf4a2e455874e7b8319cd7 Mon Sep 17 00:00:00 2001 From: Bogdan Drutu Date: Thu, 14 Mar 2019 18:58:20 -0700 Subject: [PATCH 137/212] Run command mod tiny (#1052) * Run command mod tiny * Update google.golang.org/api and grpc * Remove thrift vendor. * Use the right import for thrift. * More cleanups and update versions. * Updagrade golang. --- appveyor.yml | 1 + exporter/jaeger/agent.go | 2 +- .../jaeger/internal/gen-go/jaeger/agent.go | 2 +- .../collector-remote/collector-remote.go | 2 +- .../internal/gen-go/jaeger/jaeger-consts.go | 2 +- .../jaeger/internal/gen-go/jaeger/jaeger.go | 2 +- exporter/jaeger/jaeger.go | 2 +- go.mod | 27 +- go.sum | 122 +- vendor/git.apache.org/thrift.git/LICENSE | 239 --- vendor/git.apache.org/thrift.git/NOTICE | 5 - .../thrift.git/contrib/fb303/LICENSE | 16 - .../thrift.git/debian/copyright | 129 -- .../thrift.git/lib/dart/LICENSE_HEADER | 16 - .../lib/go/thrift/application_exception.go | 164 -- .../lib/go/thrift/binary_protocol.go | 515 ------- .../lib/go/thrift/buffered_transport.go | 92 -- .../thrift.git/lib/go/thrift/client.go | 85 -- .../lib/go/thrift/compact_protocol.go | 816 ---------- .../thrift.git/lib/go/thrift/context.go | 24 - .../lib/go/thrift/debug_protocol.go | 270 ---- .../thrift.git/lib/go/thrift/deserializer.go | 58 - .../thrift.git/lib/go/thrift/exception.go | 44 - .../thrift.git/lib/go/thrift/field.go | 79 - .../lib/go/thrift/framed_transport.go | 173 --- .../thrift.git/lib/go/thrift/http_client.go | 242 --- .../lib/go/thrift/http_transport.go | 63 - .../lib/go/thrift/iostream_transport.go | 214 --- .../thrift.git/lib/go/thrift/json_protocol.go | 584 ------- .../thrift.git/lib/go/thrift/memory_buffer.go | 80 - .../thrift.git/lib/go/thrift/messagetype.go | 31 - .../lib/go/thrift/multiplexed_protocol.go | 170 --- .../thrift.git/lib/go/thrift/numeric.go | 164 -- .../thrift.git/lib/go/thrift/pointerize.go | 50 - .../lib/go/thrift/processor_factory.go | 70 - .../thrift.git/lib/go/thrift/protocol.go | 179 --- .../lib/go/thrift/protocol_exception.go | 77 - .../lib/go/thrift/protocol_factory.go | 25 - .../lib/go/thrift/rich_transport.go | 68 - .../thrift.git/lib/go/thrift/serializer.go | 79 - .../thrift.git/lib/go/thrift/server.go | 35 - .../thrift.git/lib/go/thrift/server_socket.go | 134 -- .../lib/go/thrift/server_transport.go | 34 - .../lib/go/thrift/simple_json_protocol.go | 1338 ----------------- .../thrift.git/lib/go/thrift/simple_server.go | 227 --- .../thrift.git/lib/go/thrift/socket.go | 166 -- .../lib/go/thrift/ssl_server_socket.go | 112 -- .../thrift.git/lib/go/thrift/ssl_socket.go | 176 --- .../thrift.git/lib/go/thrift/transport.go | 70 - .../lib/go/thrift/transport_exception.go | 90 -- .../lib/go/thrift/transport_factory.go | 39 - .../thrift.git/lib/go/thrift/type.go | 69 - .../lib/go/thrift/zlib_transport.go | 132 -- .../git.apache.org/thrift.git/lib/hs/LICENSE | 202 --- .../thrift.git/tutorial/hs/LICENSE | 239 --- 55 files changed, 100 insertions(+), 7946 deletions(-) delete mode 100644 vendor/git.apache.org/thrift.git/LICENSE delete mode 100644 vendor/git.apache.org/thrift.git/NOTICE delete mode 100644 vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE delete mode 100644 vendor/git.apache.org/thrift.git/debian/copyright delete mode 100644 vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/client.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/context.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/field.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/type.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go delete mode 100644 vendor/git.apache.org/thrift.git/lib/hs/LICENSE delete mode 100644 vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE diff --git a/appveyor.yml b/appveyor.yml index 98057888a..12bd7c4c7 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -12,6 +12,7 @@ environment: install: - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% + - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 - go version - go env diff --git a/exporter/jaeger/agent.go b/exporter/jaeger/agent.go index 362a571a0..4c8cd989f 100644 --- a/exporter/jaeger/agent.go +++ b/exporter/jaeger/agent.go @@ -19,7 +19,7 @@ import ( "io" "net" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" ) diff --git a/exporter/jaeger/internal/gen-go/jaeger/agent.go b/exporter/jaeger/internal/gen-go/jaeger/agent.go index e89bf4994..88d2df576 100644 --- a/exporter/jaeger/internal/gen-go/jaeger/agent.go +++ b/exporter/jaeger/internal/gen-go/jaeger/agent.go @@ -8,7 +8,7 @@ import ( "context" "fmt" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) diff --git a/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go b/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go index e367bc243..157559e0a 100755 --- a/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go +++ b/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go @@ -14,7 +14,7 @@ import ( "strconv" "strings" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" ) diff --git a/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go index 80bced8dc..d2b0fa9a9 100644 --- a/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go +++ b/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go @@ -9,7 +9,7 @@ import ( "fmt" "reflect" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) diff --git a/exporter/jaeger/internal/gen-go/jaeger/jaeger.go b/exporter/jaeger/internal/gen-go/jaeger/jaeger.go index 8d5d796ae..0f913633d 100644 --- a/exporter/jaeger/internal/gen-go/jaeger/jaeger.go +++ b/exporter/jaeger/internal/gen-go/jaeger/jaeger.go @@ -11,7 +11,7 @@ import ( "fmt" "reflect" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" ) // (needed to ensure safety because of naive import list construction.) diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go index 322735a10..3574a0650 100644 --- a/exporter/jaeger/jaeger.go +++ b/exporter/jaeger/jaeger.go @@ -25,7 +25,7 @@ import ( "log" "net/http" - "git.apache.org/thrift.git/lib/go/thrift" + "github.com/apache/thrift/lib/go/thrift" gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" "go.opencensus.io/trace" "google.golang.org/api/support/bundler" diff --git a/go.mod b/go.mod index cad6a096a..b59bf6c13 100644 --- a/go.mod +++ b/go.mod @@ -1,30 +1,13 @@ module go.opencensus.io require ( - cloud.google.com/go v0.34.0 // indirect - git.apache.org/thrift.git v0.12.0 - github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 - github.com/ghodss/yaml v1.0.0 // indirect - github.com/golang/mock v1.2.0 // indirect + github.com/apache/thrift v0.12.0 github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 - github.com/grpc-ecosystem/grpc-gateway v1.6.2 // indirect github.com/hashicorp/golang-lru v0.5.0 - github.com/matttproud/golang_protobuf_extensions v1.0.1 - github.com/openzipkin/zipkin-go v0.1.3 + github.com/openzipkin/zipkin-go v0.1.6 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 - golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1 // indirect - golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 - golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890 // indirect - golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 - golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 - golang.org/x/text v0.3.0 - golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e // indirect - google.golang.org/api v0.0.0-20181220000619-583d854617af - google.golang.org/appengine v1.3.0 // indirect - google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb - google.golang.org/grpc v1.17.0 - gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect - gopkg.in/yaml.v2 v2.2.2 // indirect - honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3 // indirect + golang.org/x/net v0.0.0-20190311183353-d8887717615a + google.golang.org/api v0.2.0 + google.golang.org/grpc v1.19.0 ) diff --git a/go.sum b/go.sum index 3e6193e2b..300602f17 100644 --- a/go.sum +++ b/go.sum @@ -1,96 +1,150 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= +github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= +github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/ghodss/yaml v1.0.0 h1:wQHKEahhL6wmXdzwWG11gIVCkOv05bNOh+Rxn0yngAk= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= +github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= +github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= -github.com/golang/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/grpc-ecosystem/grpc-gateway v1.5.0 h1:WcmKMm43DR7RdtlkEXQJyo5ws8iTp98CyhCCbOHMvNI= -github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= +github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= +github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= +github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= +github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/openzipkin/zipkin-go v0.1.1 h1:A/ADD6HaPnAKj3yS7HjGHRK77qi41Hi0DirOOIQAeIw= -github.com/openzipkin/zipkin-go v0.1.1/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/openzipkin/zipkin-go v0.1.3 h1:36hTtUTQR/vPX7YVJo2PYexSbHdAJiAkDrjuXw/YlYQ= github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= -github.com/prometheus/client_golang v0.8.0 h1:1921Yw9Gc3iSc4VQh3PIoOqgPCZS7G/4xQNVUp8Mda8= -github.com/prometheus/client_golang v0.8.0/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.2 h1:awm861/B8OKDd2I/6o1dy3ra4BamzKhYOiGItCeZ740= -github.com/prometheus/client_golang v0.9.2/go.mod h1:OsXs2jCmiKlQ1lTBmv21f2mNfw4xf/QclQDMrYNZzcM= +github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= +github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= +github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= +github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e h1:n/3MEhJQjQxrOUCzh1Y3Re6aJUUWRp2M9+Oc3eVn/54= -github.com/prometheus/common v0.0.0-20180801064454-c7de2306084e/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181126121408-4724e9255275/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/common v0.0.0-20181218105931-67670fe90761 h1:z6tvbDJ5OLJ48FFmnksv04a78maSTRBUIhkdHYV5Y98= -github.com/prometheus/common v0.0.0-20181218105931-67670fe90761/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273 h1:agujYaXJSxSo18YNX3jzl+4G6Bstwt+kqv47GS12uL0= -github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a h1:9a8MnZMP0X2nLJdBg+pBmGgkJlSaKC2KaQmTCk1XDtE= -github.com/prometheus/procfs v0.0.0-20181204211112-1dc9a6cbc91a/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= +github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= +github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= +github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180821023952-922f4815f713/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd h1:nTDtHvHSdCn1m6ITfMRqtOd/9+7a3s8RBNOZ3eYZzJA= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181217023233-e147a9138326 h1:iCzOf0xz39Tstp+Tu/WwyGjUXCk34QhQORRxBeXXTA4= -golang.org/x/net v0.0.0-20181217023233-e147a9138326/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= +golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sys v0.0.0-20180821140842-3b58ed4ad339/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= +golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e h1:o3PsSEY8E4eXWkXrIP9YJALUkVZqzHJT5DOasTyn8Vs= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 h1:MXtOG7w2ND9qNCUZSDBGll/SpVIq7ftozR9I8/JGBHY= golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -google.golang.org/api v0.0.0-20180818000503-e21acd801f91/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf h1:rjxqQmxjyqerRKEj+tZW+MCm4LgpFXu18bsEoCMgDsk= -google.golang.org/api v0.0.0-20180910000450-7ca32eb868bf/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= +google.golang.org/api v0.2.0 h1:B5VXkdjt7K2Gm6fGBC9C9a1OAKJDT95cTqwet+2zib0= +google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b h1:lohp5blsw53GBXtLyLNaTXPXS9pJ1tiTw61ZHUoE9Qw= -google.golang.org/genproto v0.0.0-20180831171423-11092d34479b/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb h1:dQshZyyJ5W/Xk8myF4GKBak1pZW6EywJuQ8+44EQhGA= google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= -google.golang.org/grpc v1.14.0 h1:ArxJuB1NWfPY6r9Gp9gqwplT0Ge7nqv9msgu03lHLmo= -google.golang.org/grpc v1.14.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= +google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= -google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/git.apache.org/thrift.git/LICENSE b/vendor/git.apache.org/thrift.git/LICENSE deleted file mode 100644 index 3b6d7d74c..000000000 --- a/vendor/git.apache.org/thrift.git/LICENSE +++ /dev/null @@ -1,239 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) --------------------------------------------------- diff --git a/vendor/git.apache.org/thrift.git/NOTICE b/vendor/git.apache.org/thrift.git/NOTICE deleted file mode 100644 index 902dc8d31..000000000 --- a/vendor/git.apache.org/thrift.git/NOTICE +++ /dev/null @@ -1,5 +0,0 @@ -Apache Thrift -Copyright 2006-2017 The Apache Software Foundation. - -This product includes software developed at -The Apache Software Foundation (http://www.apache.org/). diff --git a/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE b/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE deleted file mode 100644 index 4eacb6431..000000000 --- a/vendor/git.apache.org/thrift.git/contrib/fb303/LICENSE +++ /dev/null @@ -1,16 +0,0 @@ -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. diff --git a/vendor/git.apache.org/thrift.git/debian/copyright b/vendor/git.apache.org/thrift.git/debian/copyright deleted file mode 100644 index 850643c9a..000000000 --- a/vendor/git.apache.org/thrift.git/debian/copyright +++ /dev/null @@ -1,129 +0,0 @@ -This package was debianized by Thrift Developer's . - - -This package and the Debian packaging is licensed under the Apache License, -see `/usr/share/common-licenses/Apache-2.0'. - -The following information was copied from Apache Thrift LICENSE file. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - - --------------------------------------------------- -The following files contain some portions of code contributed under -the Thrift Software License (see doc/old-thrift-license.txt), and relicensed -under the Apache 2.0 License: - - compiler/cpp/Makefile.am - compiler/cpp/src/generate/t_cocoa_generator.cc - compiler/cpp/src/generate/t_cpp_generator.cc - compiler/cpp/src/generate/t_csharp_generator.cc - compiler/cpp/src/generate/t_erl_generator.cc - compiler/cpp/src/generate/t_hs_generator.cc - compiler/cpp/src/generate/t_java_generator.cc - compiler/cpp/src/generate/t_ocaml_generator.cc - compiler/cpp/src/generate/t_perl_generator.cc - compiler/cpp/src/generate/t_php_generator.cc - compiler/cpp/src/generate/t_py_generator.cc - compiler/cpp/src/generate/t_rb_generator.cc - compiler/cpp/src/generate/t_st_generator.cc - compiler/cpp/src/generate/t_xsd_generator.cc - compiler/cpp/src/main.cc - compiler/cpp/src/parse/t_field.h - compiler/cpp/src/parse/t_program.h - compiler/cpp/src/platform.h - compiler/cpp/src/thriftl.ll - compiler/cpp/src/thrifty.yy - lib/csharp/src/Protocol/TBinaryProtocol.cs - lib/csharp/src/Protocol/TField.cs - lib/csharp/src/Protocol/TList.cs - lib/csharp/src/Protocol/TMap.cs - lib/csharp/src/Protocol/TMessage.cs - lib/csharp/src/Protocol/TMessageType.cs - lib/csharp/src/Protocol/TProtocol.cs - lib/csharp/src/Protocol/TProtocolException.cs - lib/csharp/src/Protocol/TProtocolFactory.cs - lib/csharp/src/Protocol/TProtocolUtil.cs - lib/csharp/src/Protocol/TSet.cs - lib/csharp/src/Protocol/TStruct.cs - lib/csharp/src/Protocol/TType.cs - lib/csharp/src/Server/TServer.cs - lib/csharp/src/Server/TSimpleServer.cs - lib/csharp/src/Server/TThreadPoolServer.cs - lib/csharp/src/TApplicationException.cs - lib/csharp/src/Thrift.csproj - lib/csharp/src/Thrift.sln - lib/csharp/src/TProcessor.cs - lib/csharp/src/Transport/TServerSocket.cs - lib/csharp/src/Transport/TServerTransport.cs - lib/csharp/src/Transport/TSocket.cs - lib/csharp/src/Transport/TStreamTransport.cs - lib/csharp/src/Transport/TTransport.cs - lib/csharp/src/Transport/TTransportException.cs - lib/csharp/src/Transport/TTransportFactory.cs - lib/csharp/ThriftMSBuildTask/Properties/AssemblyInfo.cs - lib/csharp/ThriftMSBuildTask/ThriftBuild.cs - lib/csharp/ThriftMSBuildTask/ThriftMSBuildTask.csproj - lib/rb/lib/thrift.rb - lib/st/README - lib/st/thrift.st - test/OptionalRequiredTest.cpp - test/OptionalRequiredTest.thrift - test/ThriftTest.thrift - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the compiler/cpp/src/md5.[ch] components: - -/* - Copyright (C) 1999, 2000, 2002 Aladdin Enterprises. All rights reserved. - - This software is provided 'as-is', without any express or implied - warranty. In no event will the authors be held liable for any damages - arising from the use of this software. - - Permission is granted to anyone to use this software for any purpose, - including commercial applications, and to alter it and redistribute it - freely, subject to the following restrictions: - - 1. The origin of this software must not be misrepresented; you must not - claim that you wrote the original software. If you use this software - in a product, an acknowledgment in the product documentation would be - appreciated but is not required. - 2. Altered source versions must be plainly marked as such, and must not be - misrepresented as being the original software. - 3. This notice may not be removed or altered from any source distribution. - - L. Peter Deutsch - ghost@aladdin.com - - */ - ---------------------------------------------------- -For the lib/rb/setup.rb: Copyright (c) 2000-2005 Minero Aoki, -lib/ocaml/OCamlMakefile and lib/ocaml/README-OCamlMakefile components: - Copyright (C) 1999 - 2007 Markus Mottl - -Licensed under the terms of the GNU Lesser General Public License 2.1 -(see doc/lgpl-2.1.txt for the full terms of this license) diff --git a/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER b/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER deleted file mode 100644 index 4eacb6431..000000000 --- a/vendor/git.apache.org/thrift.git/lib/dart/LICENSE_HEADER +++ /dev/null @@ -1,16 +0,0 @@ -Licensed to the Apache Software Foundation (ASF) under one -or more contributor license agreements. See the NOTICE file -distributed with this work for additional information -regarding copyright ownership. The ASF licenses this file -to you under the Apache License, Version 2.0 (the -"License"); you may not use this file except in compliance -with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, -software distributed under the License is distributed on an -"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -KIND, either express or implied. See the License for the -specific language governing permissions and limitations -under the License. diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go deleted file mode 100644 index b9d7eedcd..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/application_exception.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -const ( - UNKNOWN_APPLICATION_EXCEPTION = 0 - UNKNOWN_METHOD = 1 - INVALID_MESSAGE_TYPE_EXCEPTION = 2 - WRONG_METHOD_NAME = 3 - BAD_SEQUENCE_ID = 4 - MISSING_RESULT = 5 - INTERNAL_ERROR = 6 - PROTOCOL_ERROR = 7 -) - -var defaultApplicationExceptionMessage = map[int32]string{ - UNKNOWN_APPLICATION_EXCEPTION: "unknown application exception", - UNKNOWN_METHOD: "unknown method", - INVALID_MESSAGE_TYPE_EXCEPTION: "invalid message type", - WRONG_METHOD_NAME: "wrong method name", - BAD_SEQUENCE_ID: "bad sequence ID", - MISSING_RESULT: "missing result", - INTERNAL_ERROR: "unknown internal error", - PROTOCOL_ERROR: "unknown protocol error", -} - -// Application level Thrift exception -type TApplicationException interface { - TException - TypeId() int32 - Read(iprot TProtocol) error - Write(oprot TProtocol) error -} - -type tApplicationException struct { - message string - type_ int32 -} - -func (e tApplicationException) Error() string { - if e.message != "" { - return e.message - } - return defaultApplicationExceptionMessage[e.type_] -} - -func NewTApplicationException(type_ int32, message string) TApplicationException { - return &tApplicationException{message, type_} -} - -func (p *tApplicationException) TypeId() int32 { - return p.type_ -} - -func (p *tApplicationException) Read(iprot TProtocol) error { - // TODO: this should really be generated by the compiler - _, err := iprot.ReadStructBegin() - if err != nil { - return err - } - - message := "" - type_ := int32(UNKNOWN_APPLICATION_EXCEPTION) - - for { - _, ttype, id, err := iprot.ReadFieldBegin() - if err != nil { - return err - } - if ttype == STOP { - break - } - switch id { - case 1: - if ttype == STRING { - if message, err = iprot.ReadString(); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return err - } - } - case 2: - if ttype == I32 { - if type_, err = iprot.ReadI32(); err != nil { - return err - } - } else { - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return err - } - } - default: - if err = SkipDefaultDepth(iprot, ttype); err != nil { - return err - } - } - if err = iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return err - } - - p.message = message - p.type_ = type_ - - return nil -} - -func (p *tApplicationException) Write(oprot TProtocol) (err error) { - err = oprot.WriteStructBegin("TApplicationException") - if len(p.Error()) > 0 { - err = oprot.WriteFieldBegin("message", STRING, 1) - if err != nil { - return - } - err = oprot.WriteString(p.Error()) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - } - err = oprot.WriteFieldBegin("type", I32, 2) - if err != nil { - return - } - err = oprot.WriteI32(p.type_) - if err != nil { - return - } - err = oprot.WriteFieldEnd() - if err != nil { - return - } - err = oprot.WriteFieldStop() - if err != nil { - return - } - err = oprot.WriteStructEnd() - return -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go deleted file mode 100644 index de0f6a7a5..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/binary_protocol.go +++ /dev/null @@ -1,515 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "encoding/binary" - "errors" - "fmt" - "io" - "math" -) - -type TBinaryProtocol struct { - trans TRichTransport - origTransport TTransport - reader io.Reader - writer io.Writer - strictRead bool - strictWrite bool - buffer [64]byte -} - -type TBinaryProtocolFactory struct { - strictRead bool - strictWrite bool -} - -func NewTBinaryProtocolTransport(t TTransport) *TBinaryProtocol { - return NewTBinaryProtocol(t, false, true) -} - -func NewTBinaryProtocol(t TTransport, strictRead, strictWrite bool) *TBinaryProtocol { - p := &TBinaryProtocol{origTransport: t, strictRead: strictRead, strictWrite: strictWrite} - if et, ok := t.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(t) - } - p.reader = p.trans - p.writer = p.trans - return p -} - -func NewTBinaryProtocolFactoryDefault() *TBinaryProtocolFactory { - return NewTBinaryProtocolFactory(false, true) -} - -func NewTBinaryProtocolFactory(strictRead, strictWrite bool) *TBinaryProtocolFactory { - return &TBinaryProtocolFactory{strictRead: strictRead, strictWrite: strictWrite} -} - -func (p *TBinaryProtocolFactory) GetProtocol(t TTransport) TProtocol { - return NewTBinaryProtocol(t, p.strictRead, p.strictWrite) -} - -/** - * Writing Methods - */ - -func (p *TBinaryProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - if p.strictWrite { - version := uint32(VERSION_1) | uint32(typeId) - e := p.WriteI32(int32(version)) - if e != nil { - return e - } - e = p.WriteString(name) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } else { - e := p.WriteString(name) - if e != nil { - return e - } - e = p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI32(seqId) - return e - } - return nil -} - -func (p *TBinaryProtocol) WriteMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteStructBegin(name string) error { - return nil -} - -func (p *TBinaryProtocol) WriteStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - e := p.WriteByte(int8(typeId)) - if e != nil { - return e - } - e = p.WriteI16(id) - return e -} - -func (p *TBinaryProtocol) WriteFieldEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteFieldStop() error { - e := p.WriteByte(STOP) - return e -} - -func (p *TBinaryProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - e := p.WriteByte(int8(keyType)) - if e != nil { - return e - } - e = p.WriteByte(int8(valueType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteListBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteListEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteSetBegin(elemType TType, size int) error { - e := p.WriteByte(int8(elemType)) - if e != nil { - return e - } - e = p.WriteI32(int32(size)) - return e -} - -func (p *TBinaryProtocol) WriteSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) WriteBool(value bool) error { - if value { - return p.WriteByte(1) - } - return p.WriteByte(0) -} - -func (p *TBinaryProtocol) WriteByte(value int8) error { - e := p.trans.WriteByte(byte(value)) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI16(value int16) error { - v := p.buffer[0:2] - binary.BigEndian.PutUint16(v, uint16(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI32(value int32) error { - v := p.buffer[0:4] - binary.BigEndian.PutUint32(v, uint32(value)) - _, e := p.writer.Write(v) - return NewTProtocolException(e) -} - -func (p *TBinaryProtocol) WriteI64(value int64) error { - v := p.buffer[0:8] - binary.BigEndian.PutUint64(v, uint64(value)) - _, err := p.writer.Write(v) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteDouble(value float64) error { - return p.WriteI64(int64(math.Float64bits(value))) -} - -func (p *TBinaryProtocol) WriteString(value string) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.trans.WriteString(value) - return NewTProtocolException(err) -} - -func (p *TBinaryProtocol) WriteBinary(value []byte) error { - e := p.WriteI32(int32(len(value))) - if e != nil { - return e - } - _, err := p.writer.Write(value) - return NewTProtocolException(err) -} - -/** - * Reading methods - */ - -func (p *TBinaryProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - size, e := p.ReadI32() - if e != nil { - return "", typeId, 0, NewTProtocolException(e) - } - if size < 0 { - typeId = TMessageType(size & 0x0ff) - version := int64(int64(size) & VERSION_MASK) - if version != VERSION_1 { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Bad version in ReadMessageBegin")) - } - name, e = p.ReadString() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - seqId, e = p.ReadI32() - if e != nil { - return name, typeId, seqId, NewTProtocolException(e) - } - return name, typeId, seqId, nil - } - if p.strictRead { - return name, typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, fmt.Errorf("Missing version in ReadMessageBegin")) - } - name, e2 := p.readStringBody(size) - if e2 != nil { - return name, typeId, seqId, e2 - } - b, e3 := p.ReadByte() - if e3 != nil { - return name, typeId, seqId, e3 - } - typeId = TMessageType(b) - seqId, e4 := p.ReadI32() - if e4 != nil { - return name, typeId, seqId, e4 - } - return name, typeId, seqId, nil -} - -func (p *TBinaryProtocol) ReadMessageEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadStructBegin() (name string, err error) { - return -} - -func (p *TBinaryProtocol) ReadStructEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadFieldBegin() (name string, typeId TType, seqId int16, err error) { - t, err := p.ReadByte() - typeId = TType(t) - if err != nil { - return name, typeId, seqId, err - } - if t != STOP { - seqId, err = p.ReadI16() - } - return name, typeId, seqId, err -} - -func (p *TBinaryProtocol) ReadFieldEnd() error { - return nil -} - -var invalidDataLength = NewTProtocolExceptionWithType(INVALID_DATA, errors.New("Invalid data length")) - -func (p *TBinaryProtocol) ReadMapBegin() (kType, vType TType, size int, err error) { - k, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - kType = TType(k) - v, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - vType = TType(v) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return kType, vType, size, nil -} - -func (p *TBinaryProtocol) ReadMapEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadListBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - return -} - -func (p *TBinaryProtocol) ReadListEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadSetBegin() (elemType TType, size int, err error) { - b, e := p.ReadByte() - if e != nil { - err = NewTProtocolException(e) - return - } - elemType = TType(b) - size32, e := p.ReadI32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - return elemType, size, nil -} - -func (p *TBinaryProtocol) ReadSetEnd() error { - return nil -} - -func (p *TBinaryProtocol) ReadBool() (bool, error) { - b, e := p.ReadByte() - v := true - if b != 1 { - v = false - } - return v, e -} - -func (p *TBinaryProtocol) ReadByte() (int8, error) { - v, err := p.trans.ReadByte() - return int8(v), err -} - -func (p *TBinaryProtocol) ReadI16() (value int16, err error) { - buf := p.buffer[0:2] - err = p.readAll(buf) - value = int16(binary.BigEndian.Uint16(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI32() (value int32, err error) { - buf := p.buffer[0:4] - err = p.readAll(buf) - value = int32(binary.BigEndian.Uint32(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadI64() (value int64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = int64(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadDouble() (value float64, err error) { - buf := p.buffer[0:8] - err = p.readAll(buf) - value = math.Float64frombits(binary.BigEndian.Uint64(buf)) - return value, err -} - -func (p *TBinaryProtocol) ReadString() (value string, err error) { - size, e := p.ReadI32() - if e != nil { - return "", e - } - if size < 0 { - err = invalidDataLength - return - } - - return p.readStringBody(size) -} - -func (p *TBinaryProtocol) ReadBinary() ([]byte, error) { - size, e := p.ReadI32() - if e != nil { - return nil, e - } - if size < 0 { - return nil, invalidDataLength - } - if uint64(size) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - isize := int(size) - buf := make([]byte, isize) - _, err := io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(err) -} - -func (p *TBinaryProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TBinaryProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TBinaryProtocol) Transport() TTransport { - return p.origTransport -} - -func (p *TBinaryProtocol) readAll(buf []byte) error { - _, err := io.ReadFull(p.reader, buf) - return NewTProtocolException(err) -} - -const readLimit = 32768 - -func (p *TBinaryProtocol) readStringBody(size int32) (value string, err error) { - if size < 0 { - return "", nil - } - if uint64(size) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - var ( - buf bytes.Buffer - e error - b []byte - ) - - switch { - case int(size) <= len(p.buffer): - b = p.buffer[:size] // avoids allocation for small reads - case int(size) < readLimit: - b = make([]byte, size) - default: - b = make([]byte, readLimit) - } - - for size > 0 { - _, e = io.ReadFull(p.trans, b) - buf.Write(b) - if e != nil { - break - } - size -= readLimit - if size < readLimit && size > 0 { - b = b[:size] - } - } - return buf.String(), NewTProtocolException(e) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go deleted file mode 100644 index 96702061b..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/buffered_transport.go +++ /dev/null @@ -1,92 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" -) - -type TBufferedTransportFactory struct { - size int -} - -type TBufferedTransport struct { - bufio.ReadWriter - tp TTransport -} - -func (p *TBufferedTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return NewTBufferedTransport(trans, p.size), nil -} - -func NewTBufferedTransportFactory(bufferSize int) *TBufferedTransportFactory { - return &TBufferedTransportFactory{size: bufferSize} -} - -func NewTBufferedTransport(trans TTransport, bufferSize int) *TBufferedTransport { - return &TBufferedTransport{ - ReadWriter: bufio.ReadWriter{ - Reader: bufio.NewReaderSize(trans, bufferSize), - Writer: bufio.NewWriterSize(trans, bufferSize), - }, - tp: trans, - } -} - -func (p *TBufferedTransport) IsOpen() bool { - return p.tp.IsOpen() -} - -func (p *TBufferedTransport) Open() (err error) { - return p.tp.Open() -} - -func (p *TBufferedTransport) Close() (err error) { - return p.tp.Close() -} - -func (p *TBufferedTransport) Read(b []byte) (int, error) { - n, err := p.ReadWriter.Read(b) - if err != nil { - p.ReadWriter.Reader.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Write(b []byte) (int, error) { - n, err := p.ReadWriter.Write(b) - if err != nil { - p.ReadWriter.Writer.Reset(p.tp) - } - return n, err -} - -func (p *TBufferedTransport) Flush(ctx context.Context) error { - if err := p.ReadWriter.Flush(); err != nil { - p.ReadWriter.Writer.Reset(p.tp) - return err - } - return p.tp.Flush(ctx) -} - -func (p *TBufferedTransport) RemainingBytes() (num_bytes uint64) { - return p.tp.RemainingBytes() -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go deleted file mode 100644 index 28791ccd0..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/client.go +++ /dev/null @@ -1,85 +0,0 @@ -package thrift - -import ( - "context" - "fmt" -) - -type TClient interface { - Call(ctx context.Context, method string, args, result TStruct) error -} - -type TStandardClient struct { - seqId int32 - iprot, oprot TProtocol -} - -// TStandardClient implements TClient, and uses the standard message format for Thrift. -// It is not safe for concurrent use. -func NewTStandardClient(inputProtocol, outputProtocol TProtocol) *TStandardClient { - return &TStandardClient{ - iprot: inputProtocol, - oprot: outputProtocol, - } -} - -func (p *TStandardClient) Send(ctx context.Context, oprot TProtocol, seqId int32, method string, args TStruct) error { - if err := oprot.WriteMessageBegin(method, CALL, seqId); err != nil { - return err - } - if err := args.Write(oprot); err != nil { - return err - } - if err := oprot.WriteMessageEnd(); err != nil { - return err - } - return oprot.Flush(ctx) -} - -func (p *TStandardClient) Recv(iprot TProtocol, seqId int32, method string, result TStruct) error { - rMethod, rTypeId, rSeqId, err := iprot.ReadMessageBegin() - if err != nil { - return err - } - - if method != rMethod { - return NewTApplicationException(WRONG_METHOD_NAME, fmt.Sprintf("%s: wrong method name", method)) - } else if seqId != rSeqId { - return NewTApplicationException(BAD_SEQUENCE_ID, fmt.Sprintf("%s: out of order sequence response", method)) - } else if rTypeId == EXCEPTION { - var exception tApplicationException - if err := exception.Read(iprot); err != nil { - return err - } - - if err := iprot.ReadMessageEnd(); err != nil { - return err - } - - return &exception - } else if rTypeId != REPLY { - return NewTApplicationException(INVALID_MESSAGE_TYPE_EXCEPTION, fmt.Sprintf("%s: invalid message type", method)) - } - - if err := result.Read(iprot); err != nil { - return err - } - - return iprot.ReadMessageEnd() -} - -func (p *TStandardClient) Call(ctx context.Context, method string, args, result TStruct) error { - p.seqId++ - seqId := p.seqId - - if err := p.Send(ctx, p.oprot, seqId, method, args); err != nil { - return err - } - - // method is oneway - if result == nil { - return nil - } - - return p.Recv(p.iprot, seqId, method, result) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go deleted file mode 100644 index 66fbf5c33..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/compact_protocol.go +++ /dev/null @@ -1,816 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/binary" - "fmt" - "io" - "math" -) - -const ( - COMPACT_PROTOCOL_ID = 0x082 - COMPACT_VERSION = 1 - COMPACT_VERSION_MASK = 0x1f - COMPACT_TYPE_MASK = 0x0E0 - COMPACT_TYPE_BITS = 0x07 - COMPACT_TYPE_SHIFT_AMOUNT = 5 -) - -type tCompactType byte - -const ( - COMPACT_BOOLEAN_TRUE = 0x01 - COMPACT_BOOLEAN_FALSE = 0x02 - COMPACT_BYTE = 0x03 - COMPACT_I16 = 0x04 - COMPACT_I32 = 0x05 - COMPACT_I64 = 0x06 - COMPACT_DOUBLE = 0x07 - COMPACT_BINARY = 0x08 - COMPACT_LIST = 0x09 - COMPACT_SET = 0x0A - COMPACT_MAP = 0x0B - COMPACT_STRUCT = 0x0C -) - -var ( - ttypeToCompactType map[TType]tCompactType -) - -func init() { - ttypeToCompactType = map[TType]tCompactType{ - STOP: STOP, - BOOL: COMPACT_BOOLEAN_TRUE, - BYTE: COMPACT_BYTE, - I16: COMPACT_I16, - I32: COMPACT_I32, - I64: COMPACT_I64, - DOUBLE: COMPACT_DOUBLE, - STRING: COMPACT_BINARY, - LIST: COMPACT_LIST, - SET: COMPACT_SET, - MAP: COMPACT_MAP, - STRUCT: COMPACT_STRUCT, - } -} - -type TCompactProtocolFactory struct{} - -func NewTCompactProtocolFactory() *TCompactProtocolFactory { - return &TCompactProtocolFactory{} -} - -func (p *TCompactProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTCompactProtocol(trans) -} - -type TCompactProtocol struct { - trans TRichTransport - origTransport TTransport - - // Used to keep track of the last field for the current and previous structs, - // so we can do the delta stuff. - lastField []int - lastFieldId int - - // If we encounter a boolean field begin, save the TField here so it can - // have the value incorporated. - booleanFieldName string - booleanFieldId int16 - booleanFieldPending bool - - // If we read a field header, and it's a boolean field, save the boolean - // value here so that readBool can use it. - boolValue bool - boolValueIsNotNull bool - buffer [64]byte -} - -// Create a TCompactProtocol given a TTransport -func NewTCompactProtocol(trans TTransport) *TCompactProtocol { - p := &TCompactProtocol{origTransport: trans, lastField: []int{}} - if et, ok := trans.(TRichTransport); ok { - p.trans = et - } else { - p.trans = NewTRichTransport(trans) - } - - return p - -} - -// -// Public Writing methods. -// - -// Write a message header to the wire. Compact Protocol messages contain the -// protocol version so we can migrate forwards in the future if need be. -func (p *TCompactProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := p.writeByteDirect(COMPACT_PROTOCOL_ID) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect((COMPACT_VERSION & COMPACT_VERSION_MASK) | ((byte(typeId) << COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_MASK)) - if err != nil { - return NewTProtocolException(err) - } - _, err = p.writeVarint32(seqid) - if err != nil { - return NewTProtocolException(err) - } - e := p.WriteString(name) - return e - -} - -func (p *TCompactProtocol) WriteMessageEnd() error { return nil } - -// Write a struct begin. This doesn't actually put anything on the wire. We -// use it as an opportunity to put special placeholder markers on the field -// stack so we can get the field id deltas correct. -func (p *TCompactProtocol) WriteStructBegin(name string) error { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return nil -} - -// Write a struct end. This doesn't actually put anything on the wire. We use -// this as an opportunity to pop the last field from the current struct off -// of the field stack. -func (p *TCompactProtocol) WriteStructEnd() error { - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -func (p *TCompactProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if typeId == BOOL { - // we want to possibly include the value, so we'll wait. - p.booleanFieldName, p.booleanFieldId, p.booleanFieldPending = name, id, true - return nil - } - _, err := p.writeFieldBeginInternal(name, typeId, id, 0xFF) - return NewTProtocolException(err) -} - -// The workhorse of writeFieldBegin. It has the option of doing a -// 'type override' of the type header. This is used specifically in the -// boolean field case. -func (p *TCompactProtocol) writeFieldBeginInternal(name string, typeId TType, id int16, typeOverride byte) (int, error) { - // short lastField = lastField_.pop(); - - // if there's a type override, use that. - var typeToWrite byte - if typeOverride == 0xFF { - typeToWrite = byte(p.getCompactType(typeId)) - } else { - typeToWrite = typeOverride - } - // check if we can use delta encoding for the field id - fieldId := int(id) - written := 0 - if fieldId > p.lastFieldId && fieldId-p.lastFieldId <= 15 { - // write them together - err := p.writeByteDirect(byte((fieldId-p.lastFieldId)<<4) | typeToWrite) - if err != nil { - return 0, err - } - } else { - // write them separate - err := p.writeByteDirect(typeToWrite) - if err != nil { - return 0, err - } - err = p.WriteI16(id) - written = 1 + 2 - if err != nil { - return 0, err - } - } - - p.lastFieldId = fieldId - // p.lastField.Push(field.id); - return written, nil -} - -func (p *TCompactProtocol) WriteFieldEnd() error { return nil } - -func (p *TCompactProtocol) WriteFieldStop() error { - err := p.writeByteDirect(STOP) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if size == 0 { - err := p.writeByteDirect(0) - return NewTProtocolException(err) - } - _, err := p.writeVarint32(int32(size)) - if err != nil { - return NewTProtocolException(err) - } - err = p.writeByteDirect(byte(p.getCompactType(keyType))<<4 | byte(p.getCompactType(valueType))) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteMapEnd() error { return nil } - -// Write a list header. -func (p *TCompactProtocol) WriteListBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteListEnd() error { return nil } - -// Write a set header. -func (p *TCompactProtocol) WriteSetBegin(elemType TType, size int) error { - _, err := p.writeCollectionBegin(elemType, size) - return NewTProtocolException(err) -} - -func (p *TCompactProtocol) WriteSetEnd() error { return nil } - -func (p *TCompactProtocol) WriteBool(value bool) error { - v := byte(COMPACT_BOOLEAN_FALSE) - if value { - v = byte(COMPACT_BOOLEAN_TRUE) - } - if p.booleanFieldPending { - // we haven't written the field header yet - _, err := p.writeFieldBeginInternal(p.booleanFieldName, BOOL, p.booleanFieldId, v) - p.booleanFieldPending = false - return NewTProtocolException(err) - } - // we're not part of a field, so just write the value. - err := p.writeByteDirect(v) - return NewTProtocolException(err) -} - -// Write a byte. Nothing to see here! -func (p *TCompactProtocol) WriteByte(value int8) error { - err := p.writeByteDirect(byte(value)) - return NewTProtocolException(err) -} - -// Write an I16 as a zigzag varint. -func (p *TCompactProtocol) WriteI16(value int16) error { - _, err := p.writeVarint32(p.int32ToZigzag(int32(value))) - return NewTProtocolException(err) -} - -// Write an i32 as a zigzag varint. -func (p *TCompactProtocol) WriteI32(value int32) error { - _, err := p.writeVarint32(p.int32ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write an i64 as a zigzag varint. -func (p *TCompactProtocol) WriteI64(value int64) error { - _, err := p.writeVarint64(p.int64ToZigzag(value)) - return NewTProtocolException(err) -} - -// Write a double to the wire as 8 bytes. -func (p *TCompactProtocol) WriteDouble(value float64) error { - buf := p.buffer[0:8] - binary.LittleEndian.PutUint64(buf, math.Float64bits(value)) - _, err := p.trans.Write(buf) - return NewTProtocolException(err) -} - -// Write a string to the wire with a varint size preceding. -func (p *TCompactProtocol) WriteString(value string) error { - _, e := p.writeVarint32(int32(len(value))) - if e != nil { - return NewTProtocolException(e) - } - if len(value) > 0 { - } - _, e = p.trans.WriteString(value) - return e -} - -// Write a byte array, using a varint for the size. -func (p *TCompactProtocol) WriteBinary(bin []byte) error { - _, e := p.writeVarint32(int32(len(bin))) - if e != nil { - return NewTProtocolException(e) - } - if len(bin) > 0 { - _, e = p.trans.Write(bin) - return NewTProtocolException(e) - } - return nil -} - -// -// Reading methods. -// - -// Read a message header. -func (p *TCompactProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - - protocolId, err := p.readByteDirect() - if err != nil { - return - } - - if protocolId != COMPACT_PROTOCOL_ID { - e := fmt.Errorf("Expected protocol id %02x but got %02x", COMPACT_PROTOCOL_ID, protocolId) - return "", typeId, seqId, NewTProtocolExceptionWithType(BAD_VERSION, e) - } - - versionAndType, err := p.readByteDirect() - if err != nil { - return - } - - version := versionAndType & COMPACT_VERSION_MASK - typeId = TMessageType((versionAndType >> COMPACT_TYPE_SHIFT_AMOUNT) & COMPACT_TYPE_BITS) - if version != COMPACT_VERSION { - e := fmt.Errorf("Expected version %02x but got %02x", COMPACT_VERSION, version) - err = NewTProtocolExceptionWithType(BAD_VERSION, e) - return - } - seqId, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - name, err = p.ReadString() - return -} - -func (p *TCompactProtocol) ReadMessageEnd() error { return nil } - -// Read a struct begin. There's nothing on the wire for this, but it is our -// opportunity to push a new struct begin marker onto the field stack. -func (p *TCompactProtocol) ReadStructBegin() (name string, err error) { - p.lastField = append(p.lastField, p.lastFieldId) - p.lastFieldId = 0 - return -} - -// Doesn't actually consume any wire data, just removes the last field for -// this struct from the field stack. -func (p *TCompactProtocol) ReadStructEnd() error { - // consume the last field we read off the wire. - p.lastFieldId = p.lastField[len(p.lastField)-1] - p.lastField = p.lastField[:len(p.lastField)-1] - return nil -} - -// Read a field header off the wire. -func (p *TCompactProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - t, err := p.readByteDirect() - if err != nil { - return - } - - // if it's a stop, then we can return immediately, as the struct is over. - if (t & 0x0f) == STOP { - return "", STOP, 0, nil - } - - // mask off the 4 MSB of the type header. it could contain a field id delta. - modifier := int16((t & 0xf0) >> 4) - if modifier == 0 { - // not a delta. look ahead for the zigzag varint field id. - id, err = p.ReadI16() - if err != nil { - return - } - } else { - // has a delta. add the delta to the last read field id. - id = int16(p.lastFieldId) + modifier - } - typeId, e := p.getTType(tCompactType(t & 0x0f)) - if e != nil { - err = NewTProtocolException(e) - return - } - - // if this happens to be a boolean field, the value is encoded in the type - if p.isBoolType(t) { - // save the boolean value in a special instance variable. - p.boolValue = (byte(t)&0x0f == COMPACT_BOOLEAN_TRUE) - p.boolValueIsNotNull = true - } - - // push the new field onto the field stack so we can keep the deltas going. - p.lastFieldId = int(id) - return -} - -func (p *TCompactProtocol) ReadFieldEnd() error { return nil } - -// Read a map header off the wire. If the size is zero, skip reading the key -// and value type. This means that 0-length maps will yield TMaps without the -// "correct" types. -func (p *TCompactProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - size32, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size32 < 0 { - err = invalidDataLength - return - } - size = int(size32) - - keyAndValueType := byte(STOP) - if size != 0 { - keyAndValueType, err = p.readByteDirect() - if err != nil { - return - } - } - keyType, _ = p.getTType(tCompactType(keyAndValueType >> 4)) - valueType, _ = p.getTType(tCompactType(keyAndValueType & 0xf)) - return -} - -func (p *TCompactProtocol) ReadMapEnd() error { return nil } - -// Read a list header off the wire. If the list size is 0-14, the size will -// be packed into the element type header. If it's a longer list, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadListBegin() (elemType TType, size int, err error) { - size_and_type, err := p.readByteDirect() - if err != nil { - return - } - size = int((size_and_type >> 4) & 0x0f) - if size == 15 { - size2, e := p.readVarint32() - if e != nil { - err = NewTProtocolException(e) - return - } - if size2 < 0 { - err = invalidDataLength - return - } - size = int(size2) - } - elemType, e := p.getTType(tCompactType(size_and_type)) - if e != nil { - err = NewTProtocolException(e) - return - } - return -} - -func (p *TCompactProtocol) ReadListEnd() error { return nil } - -// Read a set header off the wire. If the set size is 0-14, the size will -// be packed into the element type header. If it's a longer set, the 4 MSB -// of the element type header will be 0xF, and a varint will follow with the -// true size. -func (p *TCompactProtocol) ReadSetBegin() (elemType TType, size int, err error) { - return p.ReadListBegin() -} - -func (p *TCompactProtocol) ReadSetEnd() error { return nil } - -// Read a boolean off the wire. If this is a boolean field, the value should -// already have been read during readFieldBegin, so we'll just consume the -// pre-stored value. Otherwise, read a byte. -func (p *TCompactProtocol) ReadBool() (value bool, err error) { - if p.boolValueIsNotNull { - p.boolValueIsNotNull = false - return p.boolValue, nil - } - v, err := p.readByteDirect() - return v == COMPACT_BOOLEAN_TRUE, err -} - -// Read a single byte off the wire. Nothing interesting here. -func (p *TCompactProtocol) ReadByte() (int8, error) { - v, err := p.readByteDirect() - if err != nil { - return 0, NewTProtocolException(err) - } - return int8(v), err -} - -// Read an i16 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI16() (value int16, err error) { - v, err := p.ReadI32() - return int16(v), err -} - -// Read an i32 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI32() (value int32, err error) { - v, e := p.readVarint32() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt32(v) - return value, nil -} - -// Read an i64 from the wire as a zigzag varint. -func (p *TCompactProtocol) ReadI64() (value int64, err error) { - v, e := p.readVarint64() - if e != nil { - return 0, NewTProtocolException(e) - } - value = p.zigzagToInt64(v) - return value, nil -} - -// No magic here - just read a double off the wire. -func (p *TCompactProtocol) ReadDouble() (value float64, err error) { - longBits := p.buffer[0:8] - _, e := io.ReadFull(p.trans, longBits) - if e != nil { - return 0.0, NewTProtocolException(e) - } - return math.Float64frombits(p.bytesToUint64(longBits)), nil -} - -// Reads a []byte (via readBinary), and then UTF-8 decodes it. -func (p *TCompactProtocol) ReadString() (value string, err error) { - length, e := p.readVarint32() - if e != nil { - return "", NewTProtocolException(e) - } - if length < 0 { - return "", invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return "", invalidDataLength - } - - if length == 0 { - return "", nil - } - var buf []byte - if length <= int32(len(p.buffer)) { - buf = p.buffer[0:length] - } else { - buf = make([]byte, length) - } - _, e = io.ReadFull(p.trans, buf) - return string(buf), NewTProtocolException(e) -} - -// Read a []byte from the wire. -func (p *TCompactProtocol) ReadBinary() (value []byte, err error) { - length, e := p.readVarint32() - if e != nil { - return nil, NewTProtocolException(e) - } - if length == 0 { - return []byte{}, nil - } - if length < 0 { - return nil, invalidDataLength - } - if uint64(length) > p.trans.RemainingBytes() { - return nil, invalidDataLength - } - - buf := make([]byte, length) - _, e = io.ReadFull(p.trans, buf) - return buf, NewTProtocolException(e) -} - -func (p *TCompactProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.trans.Flush(ctx)) -} - -func (p *TCompactProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TCompactProtocol) Transport() TTransport { - return p.origTransport -} - -// -// Internal writing methods -// - -// Abstract method for writing the start of lists and sets. List and sets on -// the wire differ only by the type indicator. -func (p *TCompactProtocol) writeCollectionBegin(elemType TType, size int) (int, error) { - if size <= 14 { - return 1, p.writeByteDirect(byte(int32(size<<4) | int32(p.getCompactType(elemType)))) - } - err := p.writeByteDirect(0xf0 | byte(p.getCompactType(elemType))) - if err != nil { - return 0, err - } - m, err := p.writeVarint32(int32(size)) - return 1 + m, err -} - -// Write an i32 as a varint. Results in 1-5 bytes on the wire. -// TODO(pomack): make a permanent buffer like writeVarint64? -func (p *TCompactProtocol) writeVarint32(n int32) (int, error) { - i32buf := p.buffer[0:5] - idx := 0 - for { - if (n & ^0x7F) == 0 { - i32buf[idx] = byte(n) - idx++ - // p.writeByteDirect(byte(n)); - break - // return; - } else { - i32buf[idx] = byte((n & 0x7F) | 0x80) - idx++ - // p.writeByteDirect(byte(((n & 0x7F) | 0x80))); - u := uint32(n) - n = int32(u >> 7) - } - } - return p.trans.Write(i32buf[0:idx]) -} - -// Write an i64 as a varint. Results in 1-10 bytes on the wire. -func (p *TCompactProtocol) writeVarint64(n int64) (int, error) { - varint64out := p.buffer[0:10] - idx := 0 - for { - if (n & ^0x7F) == 0 { - varint64out[idx] = byte(n) - idx++ - break - } else { - varint64out[idx] = byte((n & 0x7F) | 0x80) - idx++ - u := uint64(n) - n = int64(u >> 7) - } - } - return p.trans.Write(varint64out[0:idx]) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int64ToZigzag(l int64) int64 { - return (l << 1) ^ (l >> 63) -} - -// Convert l into a zigzag long. This allows negative numbers to be -// represented compactly as a varint. -func (p *TCompactProtocol) int32ToZigzag(n int32) int32 { - return (n << 1) ^ (n >> 31) -} - -func (p *TCompactProtocol) fixedUint64ToBytes(n uint64, buf []byte) { - binary.LittleEndian.PutUint64(buf, n) -} - -func (p *TCompactProtocol) fixedInt64ToBytes(n int64, buf []byte) { - binary.LittleEndian.PutUint64(buf, uint64(n)) -} - -// Writes a byte without any possibility of all that field header nonsense. -// Used internally by other writing methods that know they need to write a byte. -func (p *TCompactProtocol) writeByteDirect(b byte) error { - return p.trans.WriteByte(b) -} - -// Writes a byte without any possibility of all that field header nonsense. -func (p *TCompactProtocol) writeIntAsByteDirect(n int) (int, error) { - return 1, p.writeByteDirect(byte(n)) -} - -// -// Internal reading methods -// - -// Read an i32 from the wire as a varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 5 bytes. -func (p *TCompactProtocol) readVarint32() (int32, error) { - // if the wire contains the right stuff, this will just truncate the i64 we - // read and get us the right sign. - v, err := p.readVarint64() - return int32(v), err -} - -// Read an i64 from the wire as a proper varint. The MSB of each byte is set -// if there is another byte to follow. This can read up to 10 bytes. -func (p *TCompactProtocol) readVarint64() (int64, error) { - shift := uint(0) - result := int64(0) - for { - b, err := p.readByteDirect() - if err != nil { - return 0, err - } - result |= int64(b&0x7f) << shift - if (b & 0x80) != 0x80 { - break - } - shift += 7 - } - return result, nil -} - -// Read a byte, unlike ReadByte that reads Thrift-byte that is i8. -func (p *TCompactProtocol) readByteDirect() (byte, error) { - return p.trans.ReadByte() -} - -// -// encoding helpers -// - -// Convert from zigzag int to int. -func (p *TCompactProtocol) zigzagToInt32(n int32) int32 { - u := uint32(n) - return int32(u>>1) ^ -(n & 1) -} - -// Convert from zigzag long to long. -func (p *TCompactProtocol) zigzagToInt64(n int64) int64 { - u := uint64(n) - return int64(u>>1) ^ -(n & 1) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToInt64(b []byte) int64 { - return int64(binary.LittleEndian.Uint64(b)) -} - -// Note that it's important that the mask bytes are long literals, -// otherwise they'll default to ints, and when you shift an int left 56 bits, -// you just get a messed up int. -func (p *TCompactProtocol) bytesToUint64(b []byte) uint64 { - return binary.LittleEndian.Uint64(b) -} - -// -// type testing and converting -// - -func (p *TCompactProtocol) isBoolType(b byte) bool { - return (b&0x0f) == COMPACT_BOOLEAN_TRUE || (b&0x0f) == COMPACT_BOOLEAN_FALSE -} - -// Given a tCompactType constant, convert it to its corresponding -// TType value. -func (p *TCompactProtocol) getTType(t tCompactType) (TType, error) { - switch byte(t) & 0x0f { - case STOP: - return STOP, nil - case COMPACT_BOOLEAN_FALSE, COMPACT_BOOLEAN_TRUE: - return BOOL, nil - case COMPACT_BYTE: - return BYTE, nil - case COMPACT_I16: - return I16, nil - case COMPACT_I32: - return I32, nil - case COMPACT_I64: - return I64, nil - case COMPACT_DOUBLE: - return DOUBLE, nil - case COMPACT_BINARY: - return STRING, nil - case COMPACT_LIST: - return LIST, nil - case COMPACT_SET: - return SET, nil - case COMPACT_MAP: - return MAP, nil - case COMPACT_STRUCT: - return STRUCT, nil - } - return STOP, TException(fmt.Errorf("don't know what type: %v", t&0x0f)) -} - -// Given a TType value, find the appropriate TCompactProtocol.Types constant. -func (p *TCompactProtocol) getCompactType(t TType) tCompactType { - return ttypeToCompactType[t] -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go deleted file mode 100644 index d15c1bcf8..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/context.go +++ /dev/null @@ -1,24 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -var defaultCtx = context.Background() diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go deleted file mode 100644 index 57943e0f3..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/debug_protocol.go +++ /dev/null @@ -1,270 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "log" -) - -type TDebugProtocol struct { - Delegate TProtocol - LogPrefix string -} - -type TDebugProtocolFactory struct { - Underlying TProtocolFactory - LogPrefix string -} - -func NewTDebugProtocolFactory(underlying TProtocolFactory, logPrefix string) *TDebugProtocolFactory { - return &TDebugProtocolFactory{ - Underlying: underlying, - LogPrefix: logPrefix, - } -} - -func (t *TDebugProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return &TDebugProtocol{ - Delegate: t.Underlying.GetProtocol(trans), - LogPrefix: t.LogPrefix, - } -} - -func (tdp *TDebugProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - err := tdp.Delegate.WriteMessageBegin(name, typeId, seqid) - log.Printf("%sWriteMessageBegin(name=%#v, typeId=%#v, seqid=%#v) => %#v", tdp.LogPrefix, name, typeId, seqid, err) - return err -} -func (tdp *TDebugProtocol) WriteMessageEnd() error { - err := tdp.Delegate.WriteMessageEnd() - log.Printf("%sWriteMessageEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteStructBegin(name string) error { - err := tdp.Delegate.WriteStructBegin(name) - log.Printf("%sWriteStructBegin(name=%#v) => %#v", tdp.LogPrefix, name, err) - return err -} -func (tdp *TDebugProtocol) WriteStructEnd() error { - err := tdp.Delegate.WriteStructEnd() - log.Printf("%sWriteStructEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - err := tdp.Delegate.WriteFieldBegin(name, typeId, id) - log.Printf("%sWriteFieldBegin(name=%#v, typeId=%#v, id%#v) => %#v", tdp.LogPrefix, name, typeId, id, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldEnd() error { - err := tdp.Delegate.WriteFieldEnd() - log.Printf("%sWriteFieldEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteFieldStop() error { - err := tdp.Delegate.WriteFieldStop() - log.Printf("%sWriteFieldStop() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - err := tdp.Delegate.WriteMapBegin(keyType, valueType, size) - log.Printf("%sWriteMapBegin(keyType=%#v, valueType=%#v, size=%#v) => %#v", tdp.LogPrefix, keyType, valueType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteMapEnd() error { - err := tdp.Delegate.WriteMapEnd() - log.Printf("%sWriteMapEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteListBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteListBegin(elemType, size) - log.Printf("%sWriteListBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteListEnd() error { - err := tdp.Delegate.WriteListEnd() - log.Printf("%sWriteListEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteSetBegin(elemType TType, size int) error { - err := tdp.Delegate.WriteSetBegin(elemType, size) - log.Printf("%sWriteSetBegin(elemType=%#v, size=%#v) => %#v", tdp.LogPrefix, elemType, size, err) - return err -} -func (tdp *TDebugProtocol) WriteSetEnd() error { - err := tdp.Delegate.WriteSetEnd() - log.Printf("%sWriteSetEnd() => %#v", tdp.LogPrefix, err) - return err -} -func (tdp *TDebugProtocol) WriteBool(value bool) error { - err := tdp.Delegate.WriteBool(value) - log.Printf("%sWriteBool(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteByte(value int8) error { - err := tdp.Delegate.WriteByte(value) - log.Printf("%sWriteByte(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI16(value int16) error { - err := tdp.Delegate.WriteI16(value) - log.Printf("%sWriteI16(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI32(value int32) error { - err := tdp.Delegate.WriteI32(value) - log.Printf("%sWriteI32(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteI64(value int64) error { - err := tdp.Delegate.WriteI64(value) - log.Printf("%sWriteI64(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteDouble(value float64) error { - err := tdp.Delegate.WriteDouble(value) - log.Printf("%sWriteDouble(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteString(value string) error { - err := tdp.Delegate.WriteString(value) - log.Printf("%sWriteString(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} -func (tdp *TDebugProtocol) WriteBinary(value []byte) error { - err := tdp.Delegate.WriteBinary(value) - log.Printf("%sWriteBinary(value=%#v) => %#v", tdp.LogPrefix, value, err) - return err -} - -func (tdp *TDebugProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { - name, typeId, seqid, err = tdp.Delegate.ReadMessageBegin() - log.Printf("%sReadMessageBegin() (name=%#v, typeId=%#v, seqid=%#v, err=%#v)", tdp.LogPrefix, name, typeId, seqid, err) - return -} -func (tdp *TDebugProtocol) ReadMessageEnd() (err error) { - err = tdp.Delegate.ReadMessageEnd() - log.Printf("%sReadMessageEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadStructBegin() (name string, err error) { - name, err = tdp.Delegate.ReadStructBegin() - log.Printf("%sReadStructBegin() (name%#v, err=%#v)", tdp.LogPrefix, name, err) - return -} -func (tdp *TDebugProtocol) ReadStructEnd() (err error) { - err = tdp.Delegate.ReadStructEnd() - log.Printf("%sReadStructEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadFieldBegin() (name string, typeId TType, id int16, err error) { - name, typeId, id, err = tdp.Delegate.ReadFieldBegin() - log.Printf("%sReadFieldBegin() (name=%#v, typeId=%#v, id=%#v, err=%#v)", tdp.LogPrefix, name, typeId, id, err) - return -} -func (tdp *TDebugProtocol) ReadFieldEnd() (err error) { - err = tdp.Delegate.ReadFieldEnd() - log.Printf("%sReadFieldEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, err error) { - keyType, valueType, size, err = tdp.Delegate.ReadMapBegin() - log.Printf("%sReadMapBegin() (keyType=%#v, valueType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, keyType, valueType, size, err) - return -} -func (tdp *TDebugProtocol) ReadMapEnd() (err error) { - err = tdp.Delegate.ReadMapEnd() - log.Printf("%sReadMapEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadListBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadListBegin() - log.Printf("%sReadListBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadListEnd() (err error) { - err = tdp.Delegate.ReadListEnd() - log.Printf("%sReadListEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadSetBegin() (elemType TType, size int, err error) { - elemType, size, err = tdp.Delegate.ReadSetBegin() - log.Printf("%sReadSetBegin() (elemType=%#v, size=%#v, err=%#v)", tdp.LogPrefix, elemType, size, err) - return -} -func (tdp *TDebugProtocol) ReadSetEnd() (err error) { - err = tdp.Delegate.ReadSetEnd() - log.Printf("%sReadSetEnd() err=%#v", tdp.LogPrefix, err) - return -} -func (tdp *TDebugProtocol) ReadBool() (value bool, err error) { - value, err = tdp.Delegate.ReadBool() - log.Printf("%sReadBool() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadByte() (value int8, err error) { - value, err = tdp.Delegate.ReadByte() - log.Printf("%sReadByte() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI16() (value int16, err error) { - value, err = tdp.Delegate.ReadI16() - log.Printf("%sReadI16() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI32() (value int32, err error) { - value, err = tdp.Delegate.ReadI32() - log.Printf("%sReadI32() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadI64() (value int64, err error) { - value, err = tdp.Delegate.ReadI64() - log.Printf("%sReadI64() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadDouble() (value float64, err error) { - value, err = tdp.Delegate.ReadDouble() - log.Printf("%sReadDouble() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadString() (value string, err error) { - value, err = tdp.Delegate.ReadString() - log.Printf("%sReadString() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) ReadBinary() (value []byte, err error) { - value, err = tdp.Delegate.ReadBinary() - log.Printf("%sReadBinary() (value=%#v, err=%#v)", tdp.LogPrefix, value, err) - return -} -func (tdp *TDebugProtocol) Skip(fieldType TType) (err error) { - err = tdp.Delegate.Skip(fieldType) - log.Printf("%sSkip(fieldType=%#v) (err=%#v)", tdp.LogPrefix, fieldType, err) - return -} -func (tdp *TDebugProtocol) Flush(ctx context.Context) (err error) { - err = tdp.Delegate.Flush(ctx) - log.Printf("%sFlush() (err=%#v)", tdp.LogPrefix, err) - return -} - -func (tdp *TDebugProtocol) Transport() TTransport { - return tdp.Delegate.Transport() -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go deleted file mode 100644 index 91a0983a4..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/deserializer.go +++ /dev/null @@ -1,58 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TDeserializer struct { - Transport TTransport - Protocol TProtocol -} - -func NewTDeserializer() *TDeserializer { - var transport TTransport - transport = NewTMemoryBufferLen(1024) - - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TDeserializer{ - transport, - protocol} -} - -func (t *TDeserializer) ReadString(msg TStruct, s string) (err error) { - err = nil - if _, err = t.Transport.Write([]byte(s)); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} - -func (t *TDeserializer) Read(msg TStruct, b []byte) (err error) { - err = nil - if _, err = t.Transport.Write(b); err != nil { - return - } - if err = msg.Read(t.Protocol); err != nil { - return - } - return -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go deleted file mode 100644 index ea8d6f661..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/exception.go +++ /dev/null @@ -1,44 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" -) - -// Generic Thrift exception -type TException interface { - error -} - -// Prepends additional information to an error without losing the Thrift exception interface -func PrependError(prepend string, err error) error { - if t, ok := err.(TTransportException); ok { - return NewTTransportException(t.TypeId(), prepend+t.Error()) - } - if t, ok := err.(TProtocolException); ok { - return NewTProtocolExceptionWithType(t.TypeId(), errors.New(prepend+err.Error())) - } - if t, ok := err.(TApplicationException); ok { - return NewTApplicationException(t.TypeId(), prepend+t.Error()) - } - - return errors.New(prepend + err.Error()) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go deleted file mode 100644 index 9d6652550..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/field.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Helper class that encapsulates field metadata. -type field struct { - name string - typeId TType - id int -} - -func newField(n string, t TType, i int) *field { - return &field{name: n, typeId: t, id: i} -} - -func (p *field) Name() string { - if p == nil { - return "" - } - return p.name -} - -func (p *field) TypeId() TType { - if p == nil { - return TType(VOID) - } - return p.typeId -} - -func (p *field) Id() int { - if p == nil { - return -1 - } - return p.id -} - -func (p *field) String() string { - if p == nil { - return "" - } - return "" -} - -var ANONYMOUS_FIELD *field - -type fieldSlice []field - -func (p fieldSlice) Len() int { - return len(p) -} - -func (p fieldSlice) Less(i, j int) bool { - return p[i].Id() < p[j].Id() -} - -func (p fieldSlice) Swap(i, j int) { - p[i], p[j] = p[j], p[i] -} - -func init() { - ANONYMOUS_FIELD = newField("", STOP, 0) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go deleted file mode 100644 index 81fa65aaa..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/framed_transport.go +++ /dev/null @@ -1,173 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/binary" - "fmt" - "io" -) - -const DEFAULT_MAX_LENGTH = 16384000 - -type TFramedTransport struct { - transport TTransport - buf bytes.Buffer - reader *bufio.Reader - frameSize uint32 //Current remaining size of the frame. if ==0 read next frame header - buffer [4]byte - maxLength uint32 -} - -type tFramedTransportFactory struct { - factory TTransportFactory - maxLength uint32 -} - -func NewTFramedTransportFactory(factory TTransportFactory) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportFactoryMaxLength(factory TTransportFactory, maxLength uint32) TTransportFactory { - return &tFramedTransportFactory{factory: factory, maxLength: maxLength} -} - -func (p *tFramedTransportFactory) GetTransport(base TTransport) (TTransport, error) { - tt, err := p.factory.GetTransport(base) - if err != nil { - return nil, err - } - return NewTFramedTransportMaxLength(tt, p.maxLength), nil -} - -func NewTFramedTransport(transport TTransport) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: DEFAULT_MAX_LENGTH} -} - -func NewTFramedTransportMaxLength(transport TTransport, maxLength uint32) *TFramedTransport { - return &TFramedTransport{transport: transport, reader: bufio.NewReader(transport), maxLength: maxLength} -} - -func (p *TFramedTransport) Open() error { - return p.transport.Open() -} - -func (p *TFramedTransport) IsOpen() bool { - return p.transport.IsOpen() -} - -func (p *TFramedTransport) Close() error { - return p.transport.Close() -} - -func (p *TFramedTransport) Read(buf []byte) (l int, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < uint32(len(buf)) { - frameSize := p.frameSize - tmp := make([]byte, p.frameSize) - l, err = p.Read(tmp) - copy(buf, tmp) - if err == nil { - err = NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", frameSize, len(buf))) - return - } - } - got, err := p.reader.Read(buf) - p.frameSize = p.frameSize - uint32(got) - //sanity check - if p.frameSize < 0 { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "Negative frame size") - } - return got, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) ReadByte() (c byte, err error) { - if p.frameSize == 0 { - p.frameSize, err = p.readFrameHeader() - if err != nil { - return - } - } - if p.frameSize < 1 { - return 0, NewTTransportExceptionFromError(fmt.Errorf("Not enough frame size %d to read %d bytes", p.frameSize, 1)) - } - c, err = p.reader.ReadByte() - if err == nil { - p.frameSize-- - } - return -} - -func (p *TFramedTransport) Write(buf []byte) (int, error) { - n, err := p.buf.Write(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) WriteByte(c byte) error { - return p.buf.WriteByte(c) -} - -func (p *TFramedTransport) WriteString(s string) (n int, err error) { - return p.buf.WriteString(s) -} - -func (p *TFramedTransport) Flush(ctx context.Context) error { - size := p.buf.Len() - buf := p.buffer[:4] - binary.BigEndian.PutUint32(buf, uint32(size)) - _, err := p.transport.Write(buf) - if err != nil { - p.buf.Truncate(0) - return NewTTransportExceptionFromError(err) - } - if size > 0 { - if n, err := p.buf.WriteTo(p.transport); err != nil { - print("Error while flushing write buffer of size ", size, " to transport, only wrote ", n, " bytes: ", err.Error(), "\n") - p.buf.Truncate(0) - return NewTTransportExceptionFromError(err) - } - } - err = p.transport.Flush(ctx) - return NewTTransportExceptionFromError(err) -} - -func (p *TFramedTransport) readFrameHeader() (uint32, error) { - buf := p.buffer[:4] - if _, err := io.ReadFull(p.reader, buf); err != nil { - return 0, err - } - size := binary.BigEndian.Uint32(buf) - if size < 0 || size > p.maxLength { - return 0, NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, fmt.Sprintf("Incorrect frame size (%d)", size)) - } - return size, nil -} - -func (p *TFramedTransport) RemainingBytes() (num_bytes uint64) { - return uint64(p.frameSize) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go deleted file mode 100644 index 5c82bf538..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_client.go +++ /dev/null @@ -1,242 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" - "io" - "io/ioutil" - "net/http" - "net/url" - "strconv" -) - -// Default to using the shared http client. Library users are -// free to change this global client or specify one through -// THttpClientOptions. -var DefaultHttpClient *http.Client = http.DefaultClient - -type THttpClient struct { - client *http.Client - response *http.Response - url *url.URL - requestBuffer *bytes.Buffer - header http.Header - nsecConnectTimeout int64 - nsecReadTimeout int64 -} - -type THttpClientTransportFactory struct { - options THttpClientOptions - url string -} - -func (p *THttpClientTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*THttpClient) - if ok && t.url != nil { - return NewTHttpClientWithOptions(t.url.String(), p.options) - } - } - return NewTHttpClientWithOptions(p.url, p.options) -} - -type THttpClientOptions struct { - // If nil, DefaultHttpClient is used - Client *http.Client -} - -func NewTHttpClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -func NewTHttpClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return &THttpClientTransportFactory{url: url, options: options} -} - -func NewTHttpClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - parsedURL, err := url.Parse(urlstr) - if err != nil { - return nil, err - } - buf := make([]byte, 0, 1024) - client := options.Client - if client == nil { - client = DefaultHttpClient - } - httpHeader := map[string][]string{"Content-Type": {"application/x-thrift"}} - return &THttpClient{client: client, url: parsedURL, requestBuffer: bytes.NewBuffer(buf), header: httpHeader}, nil -} - -func NewTHttpClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} - -// Set the HTTP Header for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.SetHeader("User-Agent","Thrift Client 1.0") -func (p *THttpClient) SetHeader(key string, value string) { - p.header.Add(key, value) -} - -// Get the HTTP Header represented by the supplied Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// hdrValue := httpTrans.GetHeader("User-Agent") -func (p *THttpClient) GetHeader(key string) string { - return p.header.Get(key) -} - -// Deletes the HTTP Header given a Header Key for this specific Thrift Transport -// It is important that you first assert the TTransport as a THttpClient type -// like so: -// -// httpTrans := trans.(THttpClient) -// httpTrans.DelHeader("User-Agent") -func (p *THttpClient) DelHeader(key string) { - p.header.Del(key) -} - -func (p *THttpClient) Open() error { - // do nothing - return nil -} - -func (p *THttpClient) IsOpen() bool { - return p.response != nil || p.requestBuffer != nil -} - -func (p *THttpClient) closeResponse() error { - var err error - if p.response != nil && p.response.Body != nil { - // The docs specify that if keepalive is enabled and the response body is not - // read to completion the connection will never be returned to the pool and - // reused. Errors are being ignored here because if the connection is invalid - // and this fails for some reason, the Close() method will do any remaining - // cleanup. - io.Copy(ioutil.Discard, p.response.Body) - - err = p.response.Body.Close() - } - - p.response = nil - return err -} - -func (p *THttpClient) Close() error { - if p.requestBuffer != nil { - p.requestBuffer.Reset() - p.requestBuffer = nil - } - return p.closeResponse() -} - -func (p *THttpClient) Read(buf []byte) (int, error) { - if p.response == nil { - return 0, NewTTransportException(NOT_OPEN, "Response buffer is empty, no request.") - } - n, err := p.response.Body.Read(buf) - if n > 0 && (err == nil || err == io.EOF) { - return n, nil - } - return n, NewTTransportExceptionFromError(err) -} - -func (p *THttpClient) ReadByte() (c byte, err error) { - return readByte(p.response.Body) -} - -func (p *THttpClient) Write(buf []byte) (int, error) { - n, err := p.requestBuffer.Write(buf) - return n, err -} - -func (p *THttpClient) WriteByte(c byte) error { - return p.requestBuffer.WriteByte(c) -} - -func (p *THttpClient) WriteString(s string) (n int, err error) { - return p.requestBuffer.WriteString(s) -} - -func (p *THttpClient) Flush(ctx context.Context) error { - // Close any previous response body to avoid leaking connections. - p.closeResponse() - - req, err := http.NewRequest("POST", p.url.String(), p.requestBuffer) - if err != nil { - return NewTTransportExceptionFromError(err) - } - req.Header = p.header - if ctx != nil { - req = req.WithContext(ctx) - } - response, err := p.client.Do(req) - if err != nil { - return NewTTransportExceptionFromError(err) - } - if response.StatusCode != http.StatusOK { - // Close the response to avoid leaking file descriptors. closeResponse does - // more than just call Close(), so temporarily assign it and reuse the logic. - p.response = response - p.closeResponse() - - // TODO(pomack) log bad response - return NewTTransportException(UNKNOWN_TRANSPORT_EXCEPTION, "HTTP Response code: "+strconv.Itoa(response.StatusCode)) - } - p.response = response - return nil -} - -func (p *THttpClient) RemainingBytes() (num_bytes uint64) { - len := p.response.ContentLength - if len >= 0 { - return uint64(len) - } - - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} - -// Deprecated: Use NewTHttpClientTransportFactory instead. -func NewTHttpPostClientTransportFactory(url string) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, THttpClientOptions{}) -} - -// Deprecated: Use NewTHttpClientTransportFactoryWithOptions instead. -func NewTHttpPostClientTransportFactoryWithOptions(url string, options THttpClientOptions) *THttpClientTransportFactory { - return NewTHttpClientTransportFactoryWithOptions(url, options) -} - -// Deprecated: Use NewTHttpClientWithOptions instead. -func NewTHttpPostClientWithOptions(urlstr string, options THttpClientOptions) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, options) -} - -// Deprecated: Use NewTHttpClient instead. -func NewTHttpPostClient(urlstr string) (TTransport, error) { - return NewTHttpClientWithOptions(urlstr, THttpClientOptions{}) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go deleted file mode 100644 index 66f0f388a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/http_transport.go +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "compress/gzip" - "io" - "net/http" - "strings" -) - -// NewThriftHandlerFunc is a function that create a ready to use Apache Thrift Handler function -func NewThriftHandlerFunc(processor TProcessor, - inPfactory, outPfactory TProtocolFactory) func(w http.ResponseWriter, r *http.Request) { - - return gz(func(w http.ResponseWriter, r *http.Request) { - w.Header().Add("Content-Type", "application/x-thrift") - - transport := NewStreamTransport(r.Body, w) - processor.Process(r.Context(), inPfactory.GetProtocol(transport), outPfactory.GetProtocol(transport)) - }) -} - -// gz transparently compresses the HTTP response if the client supports it. -func gz(handler http.HandlerFunc) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - if !strings.Contains(r.Header.Get("Accept-Encoding"), "gzip") { - handler(w, r) - return - } - w.Header().Set("Content-Encoding", "gzip") - gz := gzip.NewWriter(w) - defer gz.Close() - gzw := gzipResponseWriter{Writer: gz, ResponseWriter: w} - handler(gzw, r) - } -} - -type gzipResponseWriter struct { - io.Writer - http.ResponseWriter -} - -func (w gzipResponseWriter) Write(b []byte) (int, error) { - return w.Writer.Write(b) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go deleted file mode 100644 index fea93bcef..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/iostream_transport.go +++ /dev/null @@ -1,214 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "context" - "io" -) - -// StreamTransport is a Transport made of an io.Reader and/or an io.Writer -type StreamTransport struct { - io.Reader - io.Writer - isReadWriter bool - closed bool -} - -type StreamTransportFactory struct { - Reader io.Reader - Writer io.Writer - isReadWriter bool -} - -func (p *StreamTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*StreamTransport) - if ok { - if t.isReadWriter { - return NewStreamTransportRW(t.Reader.(io.ReadWriter)), nil - } - if t.Reader != nil && t.Writer != nil { - return NewStreamTransport(t.Reader, t.Writer), nil - } - if t.Reader != nil && t.Writer == nil { - return NewStreamTransportR(t.Reader), nil - } - if t.Reader == nil && t.Writer != nil { - return NewStreamTransportW(t.Writer), nil - } - return &StreamTransport{}, nil - } - } - if p.isReadWriter { - return NewStreamTransportRW(p.Reader.(io.ReadWriter)), nil - } - if p.Reader != nil && p.Writer != nil { - return NewStreamTransport(p.Reader, p.Writer), nil - } - if p.Reader != nil && p.Writer == nil { - return NewStreamTransportR(p.Reader), nil - } - if p.Reader == nil && p.Writer != nil { - return NewStreamTransportW(p.Writer), nil - } - return &StreamTransport{}, nil -} - -func NewStreamTransportFactory(reader io.Reader, writer io.Writer, isReadWriter bool) *StreamTransportFactory { - return &StreamTransportFactory{Reader: reader, Writer: writer, isReadWriter: isReadWriter} -} - -func NewStreamTransport(r io.Reader, w io.Writer) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r), Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportR(r io.Reader) *StreamTransport { - return &StreamTransport{Reader: bufio.NewReader(r)} -} - -func NewStreamTransportW(w io.Writer) *StreamTransport { - return &StreamTransport{Writer: bufio.NewWriter(w)} -} - -func NewStreamTransportRW(rw io.ReadWriter) *StreamTransport { - bufrw := bufio.NewReadWriter(bufio.NewReader(rw), bufio.NewWriter(rw)) - return &StreamTransport{Reader: bufrw, Writer: bufrw, isReadWriter: true} -} - -func (p *StreamTransport) IsOpen() bool { - return !p.closed -} - -// implicitly opened on creation, can't be reopened once closed -func (p *StreamTransport) Open() error { - if !p.closed { - return NewTTransportException(ALREADY_OPEN, "StreamTransport already open.") - } else { - return NewTTransportException(NOT_OPEN, "cannot reopen StreamTransport.") - } -} - -// Closes both the input and output streams. -func (p *StreamTransport) Close() error { - if p.closed { - return NewTTransportException(NOT_OPEN, "StreamTransport already closed.") - } - p.closed = true - closedReader := false - if p.Reader != nil { - c, ok := p.Reader.(io.Closer) - if ok { - e := c.Close() - closedReader = true - if e != nil { - return e - } - } - p.Reader = nil - } - if p.Writer != nil && (!closedReader || !p.isReadWriter) { - c, ok := p.Writer.(io.Closer) - if ok { - e := c.Close() - if e != nil { - return e - } - } - p.Writer = nil - } - return nil -} - -// Flushes the underlying output stream if not null. -func (p *StreamTransport) Flush(ctx context.Context) error { - if p.Writer == nil { - return NewTTransportException(NOT_OPEN, "Cannot flush null outputStream") - } - f, ok := p.Writer.(Flusher) - if ok { - err := f.Flush() - if err != nil { - return NewTTransportExceptionFromError(err) - } - } - return nil -} - -func (p *StreamTransport) Read(c []byte) (n int, err error) { - n, err = p.Reader.Read(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) ReadByte() (c byte, err error) { - f, ok := p.Reader.(io.ByteReader) - if ok { - c, err = f.ReadByte() - } else { - c, err = readByte(p.Reader) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) Write(c []byte) (n int, err error) { - n, err = p.Writer.Write(c) - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteByte(c byte) (err error) { - f, ok := p.Writer.(io.ByteWriter) - if ok { - err = f.WriteByte(c) - } else { - err = writeByte(p.Writer, c) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) WriteString(s string) (n int, err error) { - f, ok := p.Writer.(stringWriter) - if ok { - n, err = f.WriteString(s) - } else { - n, err = p.Writer.Write([]byte(s)) - } - if err != nil { - err = NewTTransportExceptionFromError(err) - } - return -} - -func (p *StreamTransport) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go deleted file mode 100644 index 7be685d43..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/json_protocol.go +++ /dev/null @@ -1,584 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "encoding/base64" - "fmt" -) - -const ( - THRIFT_JSON_PROTOCOL_VERSION = 1 -) - -// for references to _ParseContext see tsimplejson_protocol.go - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TJSONProtocol struct { - *TSimpleJSONProtocol -} - -// Constructor -func NewTJSONProtocol(t TTransport) *TJSONProtocol { - v := &TJSONProtocol{TSimpleJSONProtocol: NewTSimpleJSONProtocol(t)} - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type TJSONProtocolFactory struct{} - -func (p *TJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTJSONProtocol(trans) -} - -func NewTJSONProtocolFactory() *TJSONProtocolFactory { - return &TJSONProtocolFactory{} -} - -func (p *TJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteI32(THRIFT_JSON_PROTOCOL_VERSION); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteI16(id); e != nil { - return e - } - if e := p.OutputObjectBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(typeId) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) WriteFieldEnd() error { - return p.OutputObjectEnd() -} - -func (p *TJSONProtocol) WriteFieldStop() error { return nil } - -func (p *TJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(keyType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - s, e1 = p.TypeIdToString(valueType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return p.OutputObjectBegin() -} - -func (p *TJSONProtocol) WriteMapEnd() error { - if e := p.OutputObjectEnd(); e != nil { - return e - } - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteListBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteSetBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TJSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *TJSONProtocol) WriteBool(b bool) error { - if b { - return p.WriteI32(1) - } - return p.WriteI32(0) -} - -func (p *TJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) -} - -func (p *TJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *TJSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TJSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *TJSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *TJSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - version, err := p.ReadI32() - if err != nil { - return name, typeId, seqId, err - } - if version != THRIFT_JSON_PROTOCOL_VERSION { - e := fmt.Errorf("Unknown Protocol version %d, expected version %d", version, THRIFT_JSON_PROTOCOL_VERSION) - return name, typeId, seqId, NewTProtocolExceptionWithType(INVALID_DATA, e) - - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TJSONProtocol) ReadMessageEnd() error { - err := p.ParseListEnd() - return err -} - -func (p *TJSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TJSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { - b, _ := p.reader.Peek(1) - if len(b) < 1 || b[0] == JSON_RBRACE[0] || b[0] == JSON_RBRACKET[0] { - return "", STOP, -1, nil - } - fieldId, err := p.ReadI16() - if err != nil { - return "", STOP, fieldId, err - } - if _, err = p.ParseObjectStart(); err != nil { - return "", STOP, fieldId, err - } - sType, err := p.ReadString() - if err != nil { - return "", STOP, fieldId, err - } - fType, err := p.StringToTypeId(sType) - return "", fType, fieldId, err -} - -func (p *TJSONProtocol) ReadFieldEnd() error { - return p.ParseObjectEnd() -} - -func (p *TJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - sKeyType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - keyType, e = p.StringToTypeId(sKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - sValueType, e := p.ReadString() - if e != nil { - return keyType, valueType, size, e - } - valueType, e = p.StringToTypeId(sValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, e := p.ReadI64() - if e != nil { - return keyType, valueType, size, e - } - size = int(iSize) - - _, e = p.ParseObjectStart() - return keyType, valueType, size, e -} - -func (p *TJSONProtocol) ReadMapEnd() error { - e := p.ParseObjectEnd() - if e != nil { - return e - } - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TJSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *TJSONProtocol) ReadBool() (bool, error) { - value, err := p.ReadI32() - return (value != 0), err -} - -func (p *TJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() - return int8(v), err -} - -func (p *TJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *TJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *TJSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TJSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TJSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TJSONProtocol) Flush(ctx context.Context) (err error) { - err = p.writer.Flush() - if err == nil { - err = p.trans.Flush(ctx) - } - return NewTProtocolException(err) -} - -func (p *TJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.WriteString(s); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) readElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - sElemType, err := p.ReadString() - if err != nil { - return VOID, size, err - } - elemType, err = p.StringToTypeId(sElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TJSONProtocol) writeElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - s, e1 := p.TypeIdToString(elemType) - if e1 != nil { - return e1 - } - if e := p.OutputString(s); e != nil { - return e - } - if e := p.OutputI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TJSONProtocol) TypeIdToString(fieldType TType) (string, error) { - switch byte(fieldType) { - case BOOL: - return "tf", nil - case BYTE: - return "i8", nil - case I16: - return "i16", nil - case I32: - return "i32", nil - case I64: - return "i64", nil - case DOUBLE: - return "dbl", nil - case STRING: - return "str", nil - case STRUCT: - return "rec", nil - case MAP: - return "map", nil - case SET: - return "set", nil - case LIST: - return "lst", nil - } - - e := fmt.Errorf("Unknown fieldType: %d", int(fieldType)) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TJSONProtocol) StringToTypeId(fieldType string) (TType, error) { - switch fieldType { - case "tf": - return TType(BOOL), nil - case "i8": - return TType(BYTE), nil - case "i16": - return TType(I16), nil - case "i32": - return TType(I32), nil - case "i64": - return TType(I64), nil - case "dbl": - return TType(DOUBLE), nil - case "str": - return TType(STRING), nil - case "rec": - return TType(STRUCT), nil - case "map": - return TType(MAP), nil - case "set": - return TType(SET), nil - case "lst": - return TType(LIST), nil - } - - e := fmt.Errorf("Unknown type identifier: %s", fieldType) - return TType(STOP), NewTProtocolExceptionWithType(INVALID_DATA, e) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go deleted file mode 100644 index 5936d2730..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/memory_buffer.go +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bytes" - "context" -) - -// Memory buffer-based implementation of the TTransport interface. -type TMemoryBuffer struct { - *bytes.Buffer - size int -} - -type TMemoryBufferTransportFactory struct { - size int -} - -func (p *TMemoryBufferTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if trans != nil { - t, ok := trans.(*TMemoryBuffer) - if ok && t.size > 0 { - return NewTMemoryBufferLen(t.size), nil - } - } - return NewTMemoryBufferLen(p.size), nil -} - -func NewTMemoryBufferTransportFactory(size int) *TMemoryBufferTransportFactory { - return &TMemoryBufferTransportFactory{size: size} -} - -func NewTMemoryBuffer() *TMemoryBuffer { - return &TMemoryBuffer{Buffer: &bytes.Buffer{}, size: 0} -} - -func NewTMemoryBufferLen(size int) *TMemoryBuffer { - buf := make([]byte, 0, size) - return &TMemoryBuffer{Buffer: bytes.NewBuffer(buf), size: size} -} - -func (p *TMemoryBuffer) IsOpen() bool { - return true -} - -func (p *TMemoryBuffer) Open() error { - return nil -} - -func (p *TMemoryBuffer) Close() error { - p.Buffer.Reset() - return nil -} - -// Flushing a memory buffer is a no-op -func (p *TMemoryBuffer) Flush(ctx context.Context) error { - return nil -} - -func (p *TMemoryBuffer) RemainingBytes() (num_bytes uint64) { - return uint64(p.Buffer.Len()) -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go deleted file mode 100644 index 25ab2e98a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/messagetype.go +++ /dev/null @@ -1,31 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Message type constants in the Thrift protocol. -type TMessageType int32 - -const ( - INVALID_TMESSAGE_TYPE TMessageType = 0 - CALL TMessageType = 1 - REPLY TMessageType = 2 - EXCEPTION TMessageType = 3 - ONEWAY TMessageType = 4 -) diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go deleted file mode 100644 index d028a30b3..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/multiplexed_protocol.go +++ /dev/null @@ -1,170 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "fmt" - "strings" -) - -/* -TMultiplexedProtocol is a protocol-independent concrete decorator -that allows a Thrift client to communicate with a multiplexing Thrift server, -by prepending the service name to the function name during function calls. - -NOTE: THIS IS NOT USED BY SERVERS. On the server, use TMultiplexedProcessor to handle request -from a multiplexing client. - -This example uses a single socket transport to invoke two services: - -socket := thrift.NewTSocketFromAddrTimeout(addr, TIMEOUT) -transport := thrift.NewTFramedTransport(socket) -protocol := thrift.NewTBinaryProtocolTransport(transport) - -mp := thrift.NewTMultiplexedProtocol(protocol, "Calculator") -service := Calculator.NewCalculatorClient(mp) - -mp2 := thrift.NewTMultiplexedProtocol(protocol, "WeatherReport") -service2 := WeatherReport.NewWeatherReportClient(mp2) - -err := transport.Open() -if err != nil { - t.Fatal("Unable to open client socket", err) -} - -fmt.Println(service.Add(2,2)) -fmt.Println(service2.GetTemperature()) -*/ - -type TMultiplexedProtocol struct { - TProtocol - serviceName string -} - -const MULTIPLEXED_SEPARATOR = ":" - -func NewTMultiplexedProtocol(protocol TProtocol, serviceName string) *TMultiplexedProtocol { - return &TMultiplexedProtocol{ - TProtocol: protocol, - serviceName: serviceName, - } -} - -func (t *TMultiplexedProtocol) WriteMessageBegin(name string, typeId TMessageType, seqid int32) error { - if typeId == CALL || typeId == ONEWAY { - return t.TProtocol.WriteMessageBegin(t.serviceName+MULTIPLEXED_SEPARATOR+name, typeId, seqid) - } else { - return t.TProtocol.WriteMessageBegin(name, typeId, seqid) - } -} - -/* -TMultiplexedProcessor is a TProcessor allowing -a single TServer to provide multiple services. - -To do so, you instantiate the processor and then register additional -processors with it, as shown in the following example: - -var processor = thrift.NewTMultiplexedProcessor() - -firstProcessor := -processor.RegisterProcessor("FirstService", firstProcessor) - -processor.registerProcessor( - "Calculator", - Calculator.NewCalculatorProcessor(&CalculatorHandler{}), -) - -processor.registerProcessor( - "WeatherReport", - WeatherReport.NewWeatherReportProcessor(&WeatherReportHandler{}), -) - -serverTransport, err := thrift.NewTServerSocketTimeout(addr, TIMEOUT) -if err != nil { - t.Fatal("Unable to create server socket", err) -} -server := thrift.NewTSimpleServer2(processor, serverTransport) -server.Serve(); -*/ - -type TMultiplexedProcessor struct { - serviceProcessorMap map[string]TProcessor - DefaultProcessor TProcessor -} - -func NewTMultiplexedProcessor() *TMultiplexedProcessor { - return &TMultiplexedProcessor{ - serviceProcessorMap: make(map[string]TProcessor), - } -} - -func (t *TMultiplexedProcessor) RegisterDefault(processor TProcessor) { - t.DefaultProcessor = processor -} - -func (t *TMultiplexedProcessor) RegisterProcessor(name string, processor TProcessor) { - if t.serviceProcessorMap == nil { - t.serviceProcessorMap = make(map[string]TProcessor) - } - t.serviceProcessorMap[name] = processor -} - -func (t *TMultiplexedProcessor) Process(ctx context.Context, in, out TProtocol) (bool, TException) { - name, typeId, seqid, err := in.ReadMessageBegin() - if err != nil { - return false, err - } - if typeId != CALL && typeId != ONEWAY { - return false, fmt.Errorf("Unexpected message type %v", typeId) - } - //extract the service name - v := strings.SplitN(name, MULTIPLEXED_SEPARATOR, 2) - if len(v) != 2 { - if t.DefaultProcessor != nil { - smb := NewStoredMessageProtocol(in, name, typeId, seqid) - return t.DefaultProcessor.Process(ctx, smb, out) - } - return false, fmt.Errorf("Service name not found in message name: %s. Did you forget to use a TMultiplexProtocol in your client?", name) - } - actualProcessor, ok := t.serviceProcessorMap[v[0]] - if !ok { - return false, fmt.Errorf("Service name not found: %s. Did you forget to call registerProcessor()?", v[0]) - } - smb := NewStoredMessageProtocol(in, v[1], typeId, seqid) - return actualProcessor.Process(ctx, smb, out) -} - -//Protocol that use stored message for ReadMessageBegin -type storedMessageProtocol struct { - TProtocol - name string - typeId TMessageType - seqid int32 -} - -func NewStoredMessageProtocol(protocol TProtocol, name string, typeId TMessageType, seqid int32) *storedMessageProtocol { - return &storedMessageProtocol{protocol, name, typeId, seqid} -} - -func (s *storedMessageProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) { - return s.name, s.typeId, s.seqid, nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go deleted file mode 100644 index aa8daa9b5..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/numeric.go +++ /dev/null @@ -1,164 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "math" - "strconv" -) - -type Numeric interface { - Int64() int64 - Int32() int32 - Int16() int16 - Byte() byte - Int() int - Float64() float64 - Float32() float32 - String() string - isNull() bool -} - -type numeric struct { - iValue int64 - dValue float64 - sValue string - isNil bool -} - -var ( - INFINITY Numeric - NEGATIVE_INFINITY Numeric - NAN Numeric - ZERO Numeric - NUMERIC_NULL Numeric -) - -func NewNumericFromDouble(dValue float64) Numeric { - if math.IsInf(dValue, 1) { - return INFINITY - } - if math.IsInf(dValue, -1) { - return NEGATIVE_INFINITY - } - if math.IsNaN(dValue) { - return NAN - } - iValue := int64(dValue) - sValue := strconv.FormatFloat(dValue, 'g', 10, 64) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI64(iValue int64) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromI32(iValue int32) Numeric { - dValue := float64(iValue) - sValue := string(iValue) - isNil := false - return &numeric{iValue: int64(iValue), dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromString(sValue string) Numeric { - if sValue == INFINITY.String() { - return INFINITY - } - if sValue == NEGATIVE_INFINITY.String() { - return NEGATIVE_INFINITY - } - if sValue == NAN.String() { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - isNil := len(sValue) == 0 - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNil} -} - -func NewNumericFromJSONString(sValue string, isNull bool) Numeric { - if isNull { - return NewNullNumeric() - } - if sValue == JSON_INFINITY { - return INFINITY - } - if sValue == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY - } - if sValue == JSON_NAN { - return NAN - } - iValue, _ := strconv.ParseInt(sValue, 10, 64) - dValue, _ := strconv.ParseFloat(sValue, 64) - return &numeric{iValue: iValue, dValue: dValue, sValue: sValue, isNil: isNull} -} - -func NewNullNumeric() Numeric { - return &numeric{iValue: 0, dValue: 0.0, sValue: "", isNil: true} -} - -func (p *numeric) Int64() int64 { - return p.iValue -} - -func (p *numeric) Int32() int32 { - return int32(p.iValue) -} - -func (p *numeric) Int16() int16 { - return int16(p.iValue) -} - -func (p *numeric) Byte() byte { - return byte(p.iValue) -} - -func (p *numeric) Int() int { - return int(p.iValue) -} - -func (p *numeric) Float64() float64 { - return p.dValue -} - -func (p *numeric) Float32() float32 { - return float32(p.dValue) -} - -func (p *numeric) String() string { - return p.sValue -} - -func (p *numeric) isNull() bool { - return p.isNil -} - -func init() { - INFINITY = &numeric{iValue: 0, dValue: math.Inf(1), sValue: "Infinity", isNil: false} - NEGATIVE_INFINITY = &numeric{iValue: 0, dValue: math.Inf(-1), sValue: "-Infinity", isNil: false} - NAN = &numeric{iValue: 0, dValue: math.NaN(), sValue: "NaN", isNil: false} - ZERO = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: false} - NUMERIC_NULL = &numeric{iValue: 0, dValue: 0, sValue: "0", isNil: true} -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go deleted file mode 100644 index 8d6b2c215..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/pointerize.go +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -/////////////////////////////////////////////////////////////////////////////// -// This file is home to helpers that convert from various base types to -// respective pointer types. This is necessary because Go does not permit -// references to constants, nor can a pointer type to base type be allocated -// and initialized in a single expression. -// -// E.g., this is not allowed: -// -// var ip *int = &5 -// -// But this *is* allowed: -// -// func IntPtr(i int) *int { return &i } -// var ip *int = IntPtr(5) -// -// Since pointers to base types are commonplace as [optional] fields in -// exported thrift structs, we factor such helpers here. -/////////////////////////////////////////////////////////////////////////////// - -func Float32Ptr(v float32) *float32 { return &v } -func Float64Ptr(v float64) *float64 { return &v } -func IntPtr(v int) *int { return &v } -func Int32Ptr(v int32) *int32 { return &v } -func Int64Ptr(v int64) *int64 { return &v } -func StringPtr(v string) *string { return &v } -func Uint32Ptr(v uint32) *uint32 { return &v } -func Uint64Ptr(v uint64) *uint64 { return &v } -func BoolPtr(v bool) *bool { return &v } -func ByteSlicePtr(v []byte) *[]byte { return &v } diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go deleted file mode 100644 index e4b132b30..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/processor_factory.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "context" - -// A processor is a generic object which operates upon an input stream and -// writes to some output stream. -type TProcessor interface { - Process(ctx context.Context, in, out TProtocol) (bool, TException) -} - -type TProcessorFunction interface { - Process(ctx context.Context, seqId int32, in, out TProtocol) (bool, TException) -} - -// The default processor factory just returns a singleton -// instance. -type TProcessorFactory interface { - GetProcessor(trans TTransport) TProcessor -} - -type tProcessorFactory struct { - processor TProcessor -} - -func NewTProcessorFactory(p TProcessor) TProcessorFactory { - return &tProcessorFactory{processor: p} -} - -func (p *tProcessorFactory) GetProcessor(trans TTransport) TProcessor { - return p.processor -} - -/** - * The default processor factory just returns a singleton - * instance. - */ -type TProcessorFunctionFactory interface { - GetProcessorFunction(trans TTransport) TProcessorFunction -} - -type tProcessorFunctionFactory struct { - processor TProcessorFunction -} - -func NewTProcessorFunctionFactory(p TProcessorFunction) TProcessorFunctionFactory { - return &tProcessorFunctionFactory{processor: p} -} - -func (p *tProcessorFunctionFactory) GetProcessorFunction(trans TTransport) TProcessorFunction { - return p.processor -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go deleted file mode 100644 index 615b7a4a8..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol.go +++ /dev/null @@ -1,179 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "fmt" -) - -const ( - VERSION_MASK = 0xffff0000 - VERSION_1 = 0x80010000 -) - -type TProtocol interface { - WriteMessageBegin(name string, typeId TMessageType, seqid int32) error - WriteMessageEnd() error - WriteStructBegin(name string) error - WriteStructEnd() error - WriteFieldBegin(name string, typeId TType, id int16) error - WriteFieldEnd() error - WriteFieldStop() error - WriteMapBegin(keyType TType, valueType TType, size int) error - WriteMapEnd() error - WriteListBegin(elemType TType, size int) error - WriteListEnd() error - WriteSetBegin(elemType TType, size int) error - WriteSetEnd() error - WriteBool(value bool) error - WriteByte(value int8) error - WriteI16(value int16) error - WriteI32(value int32) error - WriteI64(value int64) error - WriteDouble(value float64) error - WriteString(value string) error - WriteBinary(value []byte) error - - ReadMessageBegin() (name string, typeId TMessageType, seqid int32, err error) - ReadMessageEnd() error - ReadStructBegin() (name string, err error) - ReadStructEnd() error - ReadFieldBegin() (name string, typeId TType, id int16, err error) - ReadFieldEnd() error - ReadMapBegin() (keyType TType, valueType TType, size int, err error) - ReadMapEnd() error - ReadListBegin() (elemType TType, size int, err error) - ReadListEnd() error - ReadSetBegin() (elemType TType, size int, err error) - ReadSetEnd() error - ReadBool() (value bool, err error) - ReadByte() (value int8, err error) - ReadI16() (value int16, err error) - ReadI32() (value int32, err error) - ReadI64() (value int64, err error) - ReadDouble() (value float64, err error) - ReadString() (value string, err error) - ReadBinary() (value []byte, err error) - - Skip(fieldType TType) (err error) - Flush(ctx context.Context) (err error) - - Transport() TTransport -} - -// The maximum recursive depth the skip() function will traverse -const DEFAULT_RECURSION_DEPTH = 64 - -// Skips over the next data element from the provided input TProtocol object. -func SkipDefaultDepth(prot TProtocol, typeId TType) (err error) { - return Skip(prot, typeId, DEFAULT_RECURSION_DEPTH) -} - -// Skips over the next data element from the provided input TProtocol object. -func Skip(self TProtocol, fieldType TType, maxDepth int) (err error) { - - if maxDepth <= 0 { - return NewTProtocolExceptionWithType(DEPTH_LIMIT, errors.New("Depth limit exceeded")) - } - - switch fieldType { - case STOP: - return - case BOOL: - _, err = self.ReadBool() - return - case BYTE: - _, err = self.ReadByte() - return - case I16: - _, err = self.ReadI16() - return - case I32: - _, err = self.ReadI32() - return - case I64: - _, err = self.ReadI64() - return - case DOUBLE: - _, err = self.ReadDouble() - return - case STRING: - _, err = self.ReadString() - return - case STRUCT: - if _, err = self.ReadStructBegin(); err != nil { - return err - } - for { - _, typeId, _, _ := self.ReadFieldBegin() - if typeId == STOP { - break - } - err := Skip(self, typeId, maxDepth-1) - if err != nil { - return err - } - self.ReadFieldEnd() - } - return self.ReadStructEnd() - case MAP: - keyType, valueType, size, err := self.ReadMapBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, keyType, maxDepth-1) - if err != nil { - return err - } - self.Skip(valueType) - } - return self.ReadMapEnd() - case SET: - elemType, size, err := self.ReadSetBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadSetEnd() - case LIST: - elemType, size, err := self.ReadListBegin() - if err != nil { - return err - } - for i := 0; i < size; i++ { - err := Skip(self, elemType, maxDepth-1) - if err != nil { - return err - } - } - return self.ReadListEnd() - default: - return NewTProtocolExceptionWithType(INVALID_DATA, errors.New(fmt.Sprintf("Unknown data type %d", fieldType))) - } - return nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go deleted file mode 100644 index 29ab75d92..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_exception.go +++ /dev/null @@ -1,77 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "encoding/base64" -) - -// Thrift Protocol exception -type TProtocolException interface { - TException - TypeId() int -} - -const ( - UNKNOWN_PROTOCOL_EXCEPTION = 0 - INVALID_DATA = 1 - NEGATIVE_SIZE = 2 - SIZE_LIMIT = 3 - BAD_VERSION = 4 - NOT_IMPLEMENTED = 5 - DEPTH_LIMIT = 6 -) - -type tProtocolException struct { - typeId int - message string -} - -func (p *tProtocolException) TypeId() int { - return p.typeId -} - -func (p *tProtocolException) String() string { - return p.message -} - -func (p *tProtocolException) Error() string { - return p.message -} - -func NewTProtocolException(err error) TProtocolException { - if err == nil { - return nil - } - if e, ok := err.(TProtocolException); ok { - return e - } - if _, ok := err.(base64.CorruptInputError); ok { - return &tProtocolException{INVALID_DATA, err.Error()} - } - return &tProtocolException{UNKNOWN_PROTOCOL_EXCEPTION, err.Error()} -} - -func NewTProtocolExceptionWithType(errType int, err error) TProtocolException { - if err == nil { - return nil - } - return &tProtocolException{errType, err.Error()} -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go deleted file mode 100644 index c40f796d8..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/protocol_factory.go +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory interface for constructing protocol instances. -type TProtocolFactory interface { - GetProtocol(trans TTransport) TProtocol -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go deleted file mode 100644 index 4025bebea..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/rich_transport.go +++ /dev/null @@ -1,68 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import "io" - -type RichTransport struct { - TTransport -} - -// Wraps Transport to provide TRichTransport interface -func NewTRichTransport(trans TTransport) *RichTransport { - return &RichTransport{trans} -} - -func (r *RichTransport) ReadByte() (c byte, err error) { - return readByte(r.TTransport) -} - -func (r *RichTransport) WriteByte(c byte) error { - return writeByte(r.TTransport, c) -} - -func (r *RichTransport) WriteString(s string) (n int, err error) { - return r.Write([]byte(s)) -} - -func (r *RichTransport) RemainingBytes() (num_bytes uint64) { - return r.TTransport.RemainingBytes() -} - -func readByte(r io.Reader) (c byte, err error) { - v := [1]byte{0} - n, err := r.Read(v[0:1]) - if n > 0 && (err == nil || err == io.EOF) { - return v[0], nil - } - if n > 0 && err != nil { - return v[0], err - } - if err != nil { - return 0, err - } - return v[0], nil -} - -func writeByte(w io.Writer, c byte) error { - v := [1]byte{c} - _, err := w.Write(v[0:1]) - return err -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go deleted file mode 100644 index 1ff4d3754..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/serializer.go +++ /dev/null @@ -1,79 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" -) - -type TSerializer struct { - Transport *TMemoryBuffer - Protocol TProtocol -} - -type TStruct interface { - Write(p TProtocol) error - Read(p TProtocol) error -} - -func NewTSerializer() *TSerializer { - transport := NewTMemoryBufferLen(1024) - protocol := NewTBinaryProtocolFactoryDefault().GetProtocol(transport) - - return &TSerializer{ - transport, - protocol} -} - -func (t *TSerializer) WriteString(ctx context.Context, msg TStruct) (s string, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - if err = t.Transport.Flush(ctx); err != nil { - return - } - - return t.Transport.String(), nil -} - -func (t *TSerializer) Write(ctx context.Context, msg TStruct) (b []byte, err error) { - t.Transport.Reset() - - if err = msg.Write(t.Protocol); err != nil { - return - } - - if err = t.Protocol.Flush(ctx); err != nil { - return - } - - if err = t.Transport.Flush(ctx); err != nil { - return - } - - b = append(b, t.Transport.Bytes()...) - return -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go deleted file mode 100644 index f813fa353..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/server.go +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -type TServer interface { - ProcessorFactory() TProcessorFactory - ServerTransport() TServerTransport - InputTransportFactory() TTransportFactory - OutputTransportFactory() TTransportFactory - InputProtocolFactory() TProtocolFactory - OutputProtocolFactory() TProtocolFactory - - // Starts the server - Serve() error - // Stops the server. This is optional on a per-implementation basis. Not - // all servers are required to be cleanly stoppable. - Stop() error -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go deleted file mode 100644 index 80313c4be..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_socket.go +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "net" - "sync" - "time" -) - -type TServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - - // Protects the interrupted value to make it thread safe. - mu sync.RWMutex - interrupted bool -} - -func NewTServerSocket(listenAddr string) (*TServerSocket, error) { - return NewTServerSocketTimeout(listenAddr, 0) -} - -func NewTServerSocketTimeout(listenAddr string, clientTimeout time.Duration) (*TServerSocket, error) { - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TServerSocket{addr: addr, clientTimeout: clientTimeout}, nil -} - -// Creates a TServerSocket from a net.Addr -func NewTServerSocketFromAddrTimeout(addr net.Addr, clientTimeout time.Duration) *TServerSocket { - return &TServerSocket{addr: addr, clientTimeout: clientTimeout} -} - -func (p *TServerSocket) Listen() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return nil - } - l, err := net.Listen(p.addr.Network(), p.addr.String()) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TServerSocket) Accept() (TTransport, error) { - p.mu.RLock() - interrupted := p.interrupted - p.mu.RUnlock() - - if interrupted { - return nil, errTransportInterrupted - } - - listener := p.listener - if listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - - conn, err := listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSocketFromConnTimeout(conn, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TServerSocket) Open() error { - p.mu.Lock() - defer p.mu.Unlock() - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := net.Listen(p.addr.Network(), p.addr.String()); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TServerSocket) Addr() net.Addr { - if p.listener != nil { - return p.listener.Addr() - } - return p.addr -} - -func (p *TServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TServerSocket) Interrupt() error { - p.mu.Lock() - defer p.mu.Unlock() - p.interrupted = true - p.Close() - - return nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go deleted file mode 100644 index 51c40b64a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/server_transport.go +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Server transport. Object which provides client transports. -type TServerTransport interface { - Listen() error - Accept() (TTransport, error) - Close() error - - // Optional method implementation. This signals to the server transport - // that it should break out of any accept() or listen() that it is currently - // blocked on. This method, if implemented, MUST be thread safe, as it may - // be called from a different thread context than the other TServerTransport - // methods. - Interrupt() error -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go deleted file mode 100644 index 2e8a71112..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_json_protocol.go +++ /dev/null @@ -1,1338 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "bufio" - "bytes" - "context" - "encoding/base64" - "encoding/json" - "fmt" - "io" - "math" - "strconv" -) - -type _ParseContext int - -const ( - _CONTEXT_IN_TOPLEVEL _ParseContext = 1 - _CONTEXT_IN_LIST_FIRST _ParseContext = 2 - _CONTEXT_IN_LIST _ParseContext = 3 - _CONTEXT_IN_OBJECT_FIRST _ParseContext = 4 - _CONTEXT_IN_OBJECT_NEXT_KEY _ParseContext = 5 - _CONTEXT_IN_OBJECT_NEXT_VALUE _ParseContext = 6 -) - -func (p _ParseContext) String() string { - switch p { - case _CONTEXT_IN_TOPLEVEL: - return "TOPLEVEL" - case _CONTEXT_IN_LIST_FIRST: - return "LIST-FIRST" - case _CONTEXT_IN_LIST: - return "LIST" - case _CONTEXT_IN_OBJECT_FIRST: - return "OBJECT-FIRST" - case _CONTEXT_IN_OBJECT_NEXT_KEY: - return "OBJECT-NEXT-KEY" - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - return "OBJECT-NEXT-VALUE" - } - return "UNKNOWN-PARSE-CONTEXT" -} - -// JSON protocol implementation for thrift. -// -// This protocol produces/consumes a simple output format -// suitable for parsing by scripting languages. It should not be -// confused with the full-featured TJSONProtocol. -// -type TSimpleJSONProtocol struct { - trans TTransport - - parseContextStack []int - dumpContext []int - - writer *bufio.Writer - reader *bufio.Reader -} - -// Constructor -func NewTSimpleJSONProtocol(t TTransport) *TSimpleJSONProtocol { - v := &TSimpleJSONProtocol{trans: t, - writer: bufio.NewWriter(t), - reader: bufio.NewReader(t), - } - v.parseContextStack = append(v.parseContextStack, int(_CONTEXT_IN_TOPLEVEL)) - v.dumpContext = append(v.dumpContext, int(_CONTEXT_IN_TOPLEVEL)) - return v -} - -// Factory -type TSimpleJSONProtocolFactory struct{} - -func (p *TSimpleJSONProtocolFactory) GetProtocol(trans TTransport) TProtocol { - return NewTSimpleJSONProtocol(trans) -} - -func NewTSimpleJSONProtocolFactory() *TSimpleJSONProtocolFactory { - return &TSimpleJSONProtocolFactory{} -} - -var ( - JSON_COMMA []byte - JSON_COLON []byte - JSON_LBRACE []byte - JSON_RBRACE []byte - JSON_LBRACKET []byte - JSON_RBRACKET []byte - JSON_QUOTE byte - JSON_QUOTE_BYTES []byte - JSON_NULL []byte - JSON_TRUE []byte - JSON_FALSE []byte - JSON_INFINITY string - JSON_NEGATIVE_INFINITY string - JSON_NAN string - JSON_INFINITY_BYTES []byte - JSON_NEGATIVE_INFINITY_BYTES []byte - JSON_NAN_BYTES []byte - json_nonbase_map_elem_bytes []byte -) - -func init() { - JSON_COMMA = []byte{','} - JSON_COLON = []byte{':'} - JSON_LBRACE = []byte{'{'} - JSON_RBRACE = []byte{'}'} - JSON_LBRACKET = []byte{'['} - JSON_RBRACKET = []byte{']'} - JSON_QUOTE = '"' - JSON_QUOTE_BYTES = []byte{'"'} - JSON_NULL = []byte{'n', 'u', 'l', 'l'} - JSON_TRUE = []byte{'t', 'r', 'u', 'e'} - JSON_FALSE = []byte{'f', 'a', 'l', 's', 'e'} - JSON_INFINITY = "Infinity" - JSON_NEGATIVE_INFINITY = "-Infinity" - JSON_NAN = "NaN" - JSON_INFINITY_BYTES = []byte{'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NEGATIVE_INFINITY_BYTES = []byte{'-', 'I', 'n', 'f', 'i', 'n', 'i', 't', 'y'} - JSON_NAN_BYTES = []byte{'N', 'a', 'N'} - json_nonbase_map_elem_bytes = []byte{']', ',', '['} -} - -func jsonQuote(s string) string { - b, _ := json.Marshal(s) - s1 := string(b) - return s1 -} - -func jsonUnquote(s string) (string, bool) { - s1 := new(string) - err := json.Unmarshal([]byte(s), s1) - return *s1, err == nil -} - -func mismatch(expected, actual string) error { - return fmt.Errorf("Expected '%s' but found '%s' while parsing JSON.", expected, actual) -} - -func (p *TSimpleJSONProtocol) WriteMessageBegin(name string, typeId TMessageType, seqId int32) error { - p.resetContextStack() // THRIFT-3735 - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteString(name); e != nil { - return e - } - if e := p.WriteByte(int8(typeId)); e != nil { - return e - } - if e := p.WriteI32(seqId); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteMessageEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteStructBegin(name string) error { - if e := p.OutputObjectBegin(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteStructEnd() error { - return p.OutputObjectEnd() -} - -func (p *TSimpleJSONProtocol) WriteFieldBegin(name string, typeId TType, id int16) error { - if e := p.WriteString(name); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldEnd() error { - //return p.OutputListEnd() - return nil -} - -func (p *TSimpleJSONProtocol) WriteFieldStop() error { return nil } - -func (p *TSimpleJSONProtocol) WriteMapBegin(keyType TType, valueType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(int8(keyType)); e != nil { - return e - } - if e := p.WriteByte(int8(valueType)); e != nil { - return e - } - return p.WriteI32(int32(size)) -} - -func (p *TSimpleJSONProtocol) WriteMapEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteListBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteListEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteSetBegin(elemType TType, size int) error { - return p.OutputElemListBegin(elemType, size) -} - -func (p *TSimpleJSONProtocol) WriteSetEnd() error { - return p.OutputListEnd() -} - -func (p *TSimpleJSONProtocol) WriteBool(b bool) error { - return p.OutputBool(b) -} - -func (p *TSimpleJSONProtocol) WriteByte(b int8) error { - return p.WriteI32(int32(b)) -} - -func (p *TSimpleJSONProtocol) WriteI16(v int16) error { - return p.WriteI32(int32(v)) -} - -func (p *TSimpleJSONProtocol) WriteI32(v int32) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteI64(v int64) error { - return p.OutputI64(int64(v)) -} - -func (p *TSimpleJSONProtocol) WriteDouble(v float64) error { - return p.OutputF64(v) -} - -func (p *TSimpleJSONProtocol) WriteString(v string) error { - return p.OutputString(v) -} - -func (p *TSimpleJSONProtocol) WriteBinary(v []byte) error { - // JSON library only takes in a string, - // not an arbitrary byte array, to ensure bytes are transmitted - // efficiently we must convert this into a valid JSON string - // therefore we use base64 encoding to avoid excessive escaping/quoting - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - writer := base64.NewEncoder(base64.StdEncoding, p.writer) - if _, e := writer.Write(v); e != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - return NewTProtocolException(e) - } - if e := writer.Close(); e != nil { - return NewTProtocolException(e) - } - if _, e := p.write(JSON_QUOTE_BYTES); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -// Reading methods. -func (p *TSimpleJSONProtocol) ReadMessageBegin() (name string, typeId TMessageType, seqId int32, err error) { - p.resetContextStack() // THRIFT-3735 - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, typeId, seqId, err - } - if name, err = p.ReadString(); err != nil { - return name, typeId, seqId, err - } - bTypeId, err := p.ReadByte() - typeId = TMessageType(bTypeId) - if err != nil { - return name, typeId, seqId, err - } - if seqId, err = p.ReadI32(); err != nil { - return name, typeId, seqId, err - } - return name, typeId, seqId, nil -} - -func (p *TSimpleJSONProtocol) ReadMessageEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadStructBegin() (name string, err error) { - _, err = p.ParseObjectStart() - return "", err -} - -func (p *TSimpleJSONProtocol) ReadStructEnd() error { - return p.ParseObjectEnd() -} - -func (p *TSimpleJSONProtocol) ReadFieldBegin() (string, TType, int16, error) { - if err := p.ParsePreValue(); err != nil { - return "", STOP, 0, err - } - b, _ := p.reader.Peek(1) - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return "", STOP, 0, nil - case JSON_QUOTE: - p.reader.ReadByte() - name, err := p.ParseStringBody() - // simplejson is not meant to be read back into thrift - // - see http://wiki.apache.org/thrift/ThriftUsageJava - // - use JSON instead - if err != nil { - return name, STOP, 0, err - } - return name, STOP, -1, p.ParsePostValue() - /* - if err = p.ParsePostValue(); err != nil { - return name, STOP, 0, err - } - if isNull, err := p.ParseListBegin(); isNull || err != nil { - return name, STOP, 0, err - } - bType, err := p.ReadByte() - thetype := TType(bType) - if err != nil { - return name, thetype, 0, err - } - id, err := p.ReadI16() - return name, thetype, id, err - */ - } - e := fmt.Errorf("Expected \"}\" or '\"', but found: '%s'", string(b)) - return "", STOP, 0, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return "", STOP, 0, NewTProtocolException(io.EOF) -} - -func (p *TSimpleJSONProtocol) ReadFieldEnd() error { - return nil - //return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadMapBegin() (keyType TType, valueType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, VOID, 0, e - } - - // read keyType - bKeyType, e := p.ReadByte() - keyType = TType(bKeyType) - if e != nil { - return keyType, valueType, size, e - } - - // read valueType - bValueType, e := p.ReadByte() - valueType = TType(bValueType) - if e != nil { - return keyType, valueType, size, e - } - - // read size - iSize, err := p.ReadI64() - size = int(iSize) - return keyType, valueType, size, err -} - -func (p *TSimpleJSONProtocol) ReadMapEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadListBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadListEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadSetBegin() (elemType TType, size int, e error) { - return p.ParseElemListBegin() -} - -func (p *TSimpleJSONProtocol) ReadSetEnd() error { - return p.ParseListEnd() -} - -func (p *TSimpleJSONProtocol) ReadBool() (bool, error) { - var value bool - - if err := p.ParsePreValue(); err != nil { - return value, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 { - switch f[0] { - case JSON_TRUE[0]: - b := make([]byte, len(JSON_TRUE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_TRUE) { - value = true - } else { - e := fmt.Errorf("Expected \"true\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_FALSE[0]: - b := make([]byte, len(JSON_FALSE)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_FALSE) { - value = false - } else { - e := fmt.Errorf("Expected \"false\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - break - case JSON_NULL[0]: - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return false, NewTProtocolException(err) - } - if string(b) == string(JSON_NULL) { - value = false - } else { - e := fmt.Errorf("Expected \"null\" but found: %s", string(b)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - default: - e := fmt.Errorf("Expected \"true\", \"false\", or \"null\" but found: %s", string(f)) - return value, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - return value, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadByte() (int8, error) { - v, err := p.ReadI64() - return int8(v), err -} - -func (p *TSimpleJSONProtocol) ReadI16() (int16, error) { - v, err := p.ReadI64() - return int16(v), err -} - -func (p *TSimpleJSONProtocol) ReadI32() (int32, error) { - v, err := p.ReadI64() - return int32(v), err -} - -func (p *TSimpleJSONProtocol) ReadI64() (int64, error) { - v, _, err := p.ParseI64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadDouble() (float64, error) { - v, _, err := p.ParseF64() - return v, err -} - -func (p *TSimpleJSONProtocol) ReadString() (string, error) { - var v string - if err := p.ParsePreValue(); err != nil { - return v, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseStringBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ReadBinary() ([]byte, error) { - var v []byte - if err := p.ParsePreValue(); err != nil { - return nil, err - } - f, _ := p.reader.Peek(1) - if len(f) > 0 && f[0] == JSON_QUOTE { - p.reader.ReadByte() - value, err := p.ParseBase64EncodedBody() - v = value - if err != nil { - return v, err - } - } else if len(f) > 0 && f[0] == JSON_NULL[0] { - b := make([]byte, len(JSON_NULL)) - _, err := p.reader.Read(b) - if err != nil { - return v, NewTProtocolException(err) - } - if string(b) != string(JSON_NULL) { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(b)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } else { - e := fmt.Errorf("Expected a JSON string, found unquoted data started with %s", string(f)) - return v, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - - return v, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) Flush(ctx context.Context) (err error) { - return NewTProtocolException(p.writer.Flush()) -} - -func (p *TSimpleJSONProtocol) Skip(fieldType TType) (err error) { - return SkipDefaultDepth(p, fieldType) -} - -func (p *TSimpleJSONProtocol) Transport() TTransport { - return p.trans -} - -func (p *TSimpleJSONProtocol) OutputPreValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST, _CONTEXT_IN_OBJECT_NEXT_KEY: - if _, e := p.write(JSON_COMMA); e != nil { - return NewTProtocolException(e) - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if _, e := p.write(JSON_COLON); e != nil { - return NewTProtocolException(e) - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputPostValue() error { - cxt := _ParseContext(p.dumpContext[len(p.dumpContext)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputBool(value bool) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if value { - v = string(JSON_TRUE) - } else { - v = string(JSON_FALSE) - } - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputNull() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_NULL); e != nil { - return NewTProtocolException(e) - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputF64(value float64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - var v string - if math.IsNaN(value) { - v = string(JSON_QUOTE) + JSON_NAN + string(JSON_QUOTE) - } else if math.IsInf(value, 1) { - v = string(JSON_QUOTE) + JSON_INFINITY + string(JSON_QUOTE) - } else if math.IsInf(value, -1) { - v = string(JSON_QUOTE) + JSON_NEGATIVE_INFINITY + string(JSON_QUOTE) - } else { - v = strconv.FormatFloat(value, 'g', -1, 64) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = string(JSON_QUOTE) + v + string(JSON_QUOTE) - default: - } - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputI64(value int64) error { - if e := p.OutputPreValue(); e != nil { - return e - } - v := strconv.FormatInt(value, 10) - switch _ParseContext(p.dumpContext[len(p.dumpContext)-1]) { - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - v = jsonQuote(v) - default: - } - if e := p.OutputStringData(v); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputString(s string) error { - if e := p.OutputPreValue(); e != nil { - return e - } - if e := p.OutputStringData(jsonQuote(s)); e != nil { - return e - } - return p.OutputPostValue() -} - -func (p *TSimpleJSONProtocol) OutputStringData(s string) error { - _, e := p.write([]byte(s)) - return NewTProtocolException(e) -} - -func (p *TSimpleJSONProtocol) OutputObjectBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_OBJECT_FIRST)) - return nil -} - -func (p *TSimpleJSONProtocol) OutputObjectEnd() error { - if _, e := p.write(JSON_RBRACE); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputListBegin() error { - if e := p.OutputPreValue(); e != nil { - return e - } - if _, e := p.write(JSON_LBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = append(p.dumpContext, int(_CONTEXT_IN_LIST_FIRST)) - return nil -} - -func (p *TSimpleJSONProtocol) OutputListEnd() error { - if _, e := p.write(JSON_RBRACKET); e != nil { - return NewTProtocolException(e) - } - p.dumpContext = p.dumpContext[:len(p.dumpContext)-1] - if e := p.OutputPostValue(); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) OutputElemListBegin(elemType TType, size int) error { - if e := p.OutputListBegin(); e != nil { - return e - } - if e := p.WriteByte(int8(elemType)); e != nil { - return e - } - if e := p.WriteI64(int64(size)); e != nil { - return e - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePreValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - b, _ := p.reader.Peek(1) - switch cxt { - case _CONTEXT_IN_LIST: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACKET[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"]\" or \",\" in list context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_KEY: - if len(b) > 0 { - switch b[0] { - case JSON_RBRACE[0]: - return nil - case JSON_COMMA[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \"}\" or \",\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - if len(b) > 0 { - switch b[0] { - case JSON_COLON[0]: - p.reader.ReadByte() - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - return nil - default: - e := fmt.Errorf("Expected \":\" in object context, but found \"%s\"", string(b)) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParsePostValue() error { - if e := p.readNonSignificantWhitespace(); e != nil { - return NewTProtocolException(e) - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - switch cxt { - case _CONTEXT_IN_LIST_FIRST: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST)) - break - case _CONTEXT_IN_OBJECT_FIRST, _CONTEXT_IN_OBJECT_NEXT_KEY: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_VALUE)) - break - case _CONTEXT_IN_OBJECT_NEXT_VALUE: - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_NEXT_KEY)) - break - } - return nil -} - -func (p *TSimpleJSONProtocol) readNonSignificantWhitespace() error { - for { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return nil - } - switch b[0] { - case ' ', '\r', '\n', '\t': - p.reader.ReadByte() - continue - default: - break - } - break - } - return nil -} - -func (p *TSimpleJSONProtocol) ParseStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - v, ok := jsonUnquote(string(JSON_QUOTE) + line) - if !ok { - return "", NewTProtocolException(err) - } - return v, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - str := string(JSON_QUOTE) + line + s - v, ok := jsonUnquote(str) - if !ok { - e := fmt.Errorf("Unable to parse as JSON string %s", str) - return "", NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseQuotedStringBody() (string, error) { - line, err := p.reader.ReadString(JSON_QUOTE) - if err != nil { - return "", NewTProtocolException(err) - } - l := len(line) - // count number of escapes to see if we need to keep going - i := 1 - for ; i < l; i++ { - if line[l-i-1] != '\\' { - break - } - } - if i&0x01 == 1 { - return line, nil - } - s, err := p.ParseQuotedStringBody() - if err != nil { - return "", NewTProtocolException(err) - } - v := line + s - return v, nil -} - -func (p *TSimpleJSONProtocol) ParseBase64EncodedBody() ([]byte, error) { - line, err := p.reader.ReadBytes(JSON_QUOTE) - if err != nil { - return line, NewTProtocolException(err) - } - line2 := line[0 : len(line)-1] - l := len(line2) - if (l % 4) != 0 { - pad := 4 - (l % 4) - fill := [...]byte{'=', '=', '='} - line2 = append(line2, fill[:pad]...) - l = len(line2) - } - output := make([]byte, base64.StdEncoding.DecodedLen(l)) - n, err := base64.StdEncoding.Decode(output, line2) - return output[0:n], NewTProtocolException(err) -} - -func (p *TSimpleJSONProtocol) ParseI64() (int64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value int64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Int64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseF64() (float64, bool, error) { - if err := p.ParsePreValue(); err != nil { - return 0, false, err - } - var value float64 - var isnull bool - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - isnull = true - } else { - num, err := p.readNumeric() - isnull = (num == nil) - if !isnull { - value = num.Float64() - } - if err != nil { - return value, isnull, err - } - } - return value, isnull, p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseObjectStart() (bool, error) { - if err := p.ParsePreValue(); err != nil { - return false, err - } - var b []byte - b, err := p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) > 0 && b[0] == JSON_LBRACE[0] { - p.reader.ReadByte() - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_OBJECT_FIRST)) - return false, nil - } else if p.safePeekContains(JSON_NULL) { - return true, nil - } - e := fmt.Errorf("Expected '{' or null, but found '%s'", string(b)) - return false, NewTProtocolExceptionWithType(INVALID_DATA, e) -} - -func (p *TSimpleJSONProtocol) ParseObjectEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if (cxt != _CONTEXT_IN_OBJECT_FIRST) && (cxt != _CONTEXT_IN_OBJECT_NEXT_KEY) { - e := fmt.Errorf("Expected to be in the Object Context, but not in Object Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACE[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of object \"}\", but found: \"%s\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', '}': - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) ParseListBegin() (isNull bool, err error) { - if e := p.ParsePreValue(); e != nil { - return false, e - } - var b []byte - b, err = p.reader.Peek(1) - if err != nil { - return false, err - } - if len(b) >= 1 && b[0] == JSON_LBRACKET[0] { - p.parseContextStack = append(p.parseContextStack, int(_CONTEXT_IN_LIST_FIRST)) - p.reader.ReadByte() - isNull = false - } else if p.safePeekContains(JSON_NULL) { - isNull = true - } else { - err = fmt.Errorf("Expected \"null\" or \"[\", received %q", b) - } - return isNull, NewTProtocolExceptionWithType(INVALID_DATA, err) -} - -func (p *TSimpleJSONProtocol) ParseElemListBegin() (elemType TType, size int, e error) { - if isNull, e := p.ParseListBegin(); isNull || e != nil { - return VOID, 0, e - } - bElemType, err := p.ReadByte() - elemType = TType(bElemType) - if err != nil { - return elemType, size, err - } - nSize, err2 := p.ReadI64() - size = int(nSize) - return elemType, size, err2 -} - -func (p *TSimpleJSONProtocol) ParseListEnd() error { - if isNull, err := p.readIfNull(); isNull || err != nil { - return err - } - cxt := _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) - if cxt != _CONTEXT_IN_LIST { - e := fmt.Errorf("Expected to be in the List Context, but not in List Context (%d)", cxt) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - } - line, err := p.reader.ReadString(JSON_RBRACKET[0]) - if err != nil { - return NewTProtocolException(err) - } - for _, char := range line { - switch char { - default: - e := fmt.Errorf("Expecting end of list \"]\", but found: \"%v\"", line) - return NewTProtocolExceptionWithType(INVALID_DATA, e) - case ' ', '\n', '\r', '\t', rune(JSON_RBRACKET[0]): - break - } - } - p.parseContextStack = p.parseContextStack[:len(p.parseContextStack)-1] - if _ParseContext(p.parseContextStack[len(p.parseContextStack)-1]) == _CONTEXT_IN_TOPLEVEL { - return nil - } - return p.ParsePostValue() -} - -func (p *TSimpleJSONProtocol) readSingleValue() (interface{}, TType, error) { - e := p.readNonSignificantWhitespace() - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - b, e := p.reader.Peek(1) - if len(b) > 0 { - c := b[0] - switch c { - case JSON_NULL[0]: - buf := make([]byte, len(JSON_NULL)) - _, e := p.reader.Read(buf) - if e != nil { - return nil, VOID, NewTProtocolException(e) - } - if string(JSON_NULL) != string(buf) { - e = mismatch(string(JSON_NULL), string(buf)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return nil, VOID, nil - case JSON_QUOTE: - p.reader.ReadByte() - v, e := p.ParseStringBody() - if e != nil { - return v, UTF8, NewTProtocolException(e) - } - if v == JSON_INFINITY { - return INFINITY, DOUBLE, nil - } else if v == JSON_NEGATIVE_INFINITY { - return NEGATIVE_INFINITY, DOUBLE, nil - } else if v == JSON_NAN { - return NAN, DOUBLE, nil - } - return v, UTF8, nil - case JSON_TRUE[0]: - buf := make([]byte, len(JSON_TRUE)) - _, e := p.reader.Read(buf) - if e != nil { - return true, BOOL, NewTProtocolException(e) - } - if string(JSON_TRUE) != string(buf) { - e := mismatch(string(JSON_TRUE), string(buf)) - return true, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return true, BOOL, nil - case JSON_FALSE[0]: - buf := make([]byte, len(JSON_FALSE)) - _, e := p.reader.Read(buf) - if e != nil { - return false, BOOL, NewTProtocolException(e) - } - if string(JSON_FALSE) != string(buf) { - e := mismatch(string(JSON_FALSE), string(buf)) - return false, BOOL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return false, BOOL, nil - case JSON_LBRACKET[0]: - _, e := p.reader.ReadByte() - return make([]interface{}, 0), LIST, NewTProtocolException(e) - case JSON_LBRACE[0]: - _, e := p.reader.ReadByte() - return make(map[string]interface{}), STRUCT, NewTProtocolException(e) - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'e', 'E', '.', '+', '-', JSON_INFINITY[0], JSON_NAN[0]: - // assume numeric - v, e := p.readNumeric() - return v, DOUBLE, e - default: - e := fmt.Errorf("Expected element in list but found '%s' while parsing JSON.", string(c)) - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - e = fmt.Errorf("Cannot read a single element while parsing JSON.") - return nil, VOID, NewTProtocolExceptionWithType(INVALID_DATA, e) - -} - -func (p *TSimpleJSONProtocol) readIfNull() (bool, error) { - cont := true - for cont { - b, _ := p.reader.Peek(1) - if len(b) < 1 { - return false, nil - } - switch b[0] { - default: - return false, nil - case JSON_NULL[0]: - cont = false - break - case ' ', '\n', '\r', '\t': - p.reader.ReadByte() - break - } - } - if p.safePeekContains(JSON_NULL) { - p.reader.Read(make([]byte, len(JSON_NULL))) - return true, nil - } - return false, nil -} - -func (p *TSimpleJSONProtocol) readQuoteIfNext() { - b, _ := p.reader.Peek(1) - if len(b) > 0 && b[0] == JSON_QUOTE { - p.reader.ReadByte() - } -} - -func (p *TSimpleJSONProtocol) readNumeric() (Numeric, error) { - isNull, err := p.readIfNull() - if isNull || err != nil { - return NUMERIC_NULL, err - } - hasDecimalPoint := false - nextCanBeSign := true - hasE := false - MAX_LEN := 40 - buf := bytes.NewBuffer(make([]byte, 0, MAX_LEN)) - continueFor := true - inQuotes := false - for continueFor { - c, err := p.reader.ReadByte() - if err != nil { - if err == io.EOF { - break - } - return NUMERIC_NULL, NewTProtocolException(err) - } - switch c { - case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': - buf.WriteByte(c) - nextCanBeSign = false - case '.': - if hasDecimalPoint { - e := fmt.Errorf("Unable to parse number with multiple decimal points '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if hasE { - e := fmt.Errorf("Unable to parse number with decimal points in the exponent '%s.'", buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasDecimalPoint, nextCanBeSign = true, false - case 'e', 'E': - if hasE { - e := fmt.Errorf("Unable to parse number with multiple exponents '%s%c'", buf.String(), c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - hasE, nextCanBeSign = true, true - case '-', '+': - if !nextCanBeSign { - e := fmt.Errorf("Negative sign within number") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - buf.WriteByte(c) - nextCanBeSign = false - case ' ', 0, '\t', '\n', '\r', JSON_RBRACE[0], JSON_RBRACKET[0], JSON_COMMA[0], JSON_COLON[0]: - p.reader.UnreadByte() - continueFor = false - case JSON_NAN[0]: - if buf.Len() == 0 { - buffer := make([]byte, len(JSON_NAN)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NAN != string(buffer) { - e := mismatch(JSON_NAN, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NAN, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_INFINITY[0]: - if buf.Len() == 0 || (buf.Len() == 1 && buf.Bytes()[0] == '+') { - buffer := make([]byte, len(JSON_INFINITY)) - buffer[0] = c - _, e := p.reader.Read(buffer[1:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_INFINITY != string(buffer) { - e := mismatch(JSON_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return INFINITY, nil - } else if buf.Len() == 1 && buf.Bytes()[0] == JSON_NEGATIVE_INFINITY[0] { - buffer := make([]byte, len(JSON_NEGATIVE_INFINITY)) - buffer[0] = JSON_NEGATIVE_INFINITY[0] - buffer[1] = c - _, e := p.reader.Read(buffer[2:]) - if e != nil { - return NUMERIC_NULL, NewTProtocolException(e) - } - if JSON_NEGATIVE_INFINITY != string(buffer) { - e := mismatch(JSON_NEGATIVE_INFINITY, string(buffer)) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - if inQuotes { - p.readQuoteIfNext() - } - return NEGATIVE_INFINITY, nil - } else { - e := fmt.Errorf("Unable to parse number starting with character '%c' due to existing buffer %s", c, buf.String()) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - case JSON_QUOTE: - if !inQuotes { - inQuotes = true - } else { - break - } - default: - e := fmt.Errorf("Unable to parse number starting with character '%c'", c) - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - } - if buf.Len() == 0 { - e := fmt.Errorf("Unable to parse number from empty string ''") - return NUMERIC_NULL, NewTProtocolExceptionWithType(INVALID_DATA, e) - } - return NewNumericFromJSONString(buf.String(), false), nil -} - -// Safely peeks into the buffer, reading only what is necessary -func (p *TSimpleJSONProtocol) safePeekContains(b []byte) bool { - for i := 0; i < len(b); i++ { - a, _ := p.reader.Peek(i + 1) - if len(a) == 0 || a[i] != b[i] { - return false - } - } - return true -} - -// Reset the context stack to its initial state. -func (p *TSimpleJSONProtocol) resetContextStack() { - p.parseContextStack = []int{int(_CONTEXT_IN_TOPLEVEL)} - p.dumpContext = []int{int(_CONTEXT_IN_TOPLEVEL)} -} - -func (p *TSimpleJSONProtocol) write(b []byte) (int, error) { - n, err := p.writer.Write(b) - if err != nil { - p.writer.Reset(p.trans) // THRIFT-3735 - } - return n, err -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go deleted file mode 100644 index 603580251..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/simple_server.go +++ /dev/null @@ -1,227 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "log" - "runtime/debug" - "sync" - "sync/atomic" -) - -/* - * This is not a typical TSimpleServer as it is not blocked after accept a socket. - * It is more like a TThreadedServer that can handle different connections in different goroutines. - * This will work if golang user implements a conn-pool like thing in client side. - */ -type TSimpleServer struct { - closed int32 - wg sync.WaitGroup - mu sync.Mutex - - processorFactory TProcessorFactory - serverTransport TServerTransport - inputTransportFactory TTransportFactory - outputTransportFactory TTransportFactory - inputProtocolFactory TProtocolFactory - outputProtocolFactory TProtocolFactory -} - -func NewTSimpleServer2(processor TProcessor, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory2(NewTProcessorFactory(processor), serverTransport) -} - -func NewTSimpleServer4(processor TProcessor, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory4(NewTProcessorFactory(processor), - serverTransport, - transportFactory, - protocolFactory, - ) -} - -func NewTSimpleServer6(processor TProcessor, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(NewTProcessorFactory(processor), - serverTransport, - inputTransportFactory, - outputTransportFactory, - inputProtocolFactory, - outputProtocolFactory, - ) -} - -func NewTSimpleServerFactory2(processorFactory TProcessorFactory, serverTransport TServerTransport) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - NewTTransportFactory(), - NewTTransportFactory(), - NewTBinaryProtocolFactoryDefault(), - NewTBinaryProtocolFactoryDefault(), - ) -} - -func NewTSimpleServerFactory4(processorFactory TProcessorFactory, serverTransport TServerTransport, transportFactory TTransportFactory, protocolFactory TProtocolFactory) *TSimpleServer { - return NewTSimpleServerFactory6(processorFactory, - serverTransport, - transportFactory, - transportFactory, - protocolFactory, - protocolFactory, - ) -} - -func NewTSimpleServerFactory6(processorFactory TProcessorFactory, serverTransport TServerTransport, inputTransportFactory TTransportFactory, outputTransportFactory TTransportFactory, inputProtocolFactory TProtocolFactory, outputProtocolFactory TProtocolFactory) *TSimpleServer { - return &TSimpleServer{ - processorFactory: processorFactory, - serverTransport: serverTransport, - inputTransportFactory: inputTransportFactory, - outputTransportFactory: outputTransportFactory, - inputProtocolFactory: inputProtocolFactory, - outputProtocolFactory: outputProtocolFactory, - } -} - -func (p *TSimpleServer) ProcessorFactory() TProcessorFactory { - return p.processorFactory -} - -func (p *TSimpleServer) ServerTransport() TServerTransport { - return p.serverTransport -} - -func (p *TSimpleServer) InputTransportFactory() TTransportFactory { - return p.inputTransportFactory -} - -func (p *TSimpleServer) OutputTransportFactory() TTransportFactory { - return p.outputTransportFactory -} - -func (p *TSimpleServer) InputProtocolFactory() TProtocolFactory { - return p.inputProtocolFactory -} - -func (p *TSimpleServer) OutputProtocolFactory() TProtocolFactory { - return p.outputProtocolFactory -} - -func (p *TSimpleServer) Listen() error { - return p.serverTransport.Listen() -} - -func (p *TSimpleServer) innerAccept() (int32, error) { - client, err := p.serverTransport.Accept() - p.mu.Lock() - defer p.mu.Unlock() - closed := atomic.LoadInt32(&p.closed) - if closed != 0 { - return closed, nil - } - if err != nil { - return 0, err - } - if client != nil { - p.wg.Add(1) - go func() { - defer p.wg.Done() - if err := p.processRequests(client); err != nil { - log.Println("error processing request:", err) - } - }() - } - return 0, nil -} - -func (p *TSimpleServer) AcceptLoop() error { - for { - closed, err := p.innerAccept() - if err != nil { - return err - } - if closed != 0 { - return nil - } - } -} - -func (p *TSimpleServer) Serve() error { - err := p.Listen() - if err != nil { - return err - } - p.AcceptLoop() - return nil -} - -func (p *TSimpleServer) Stop() error { - p.mu.Lock() - defer p.mu.Unlock() - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - atomic.StoreInt32(&p.closed, 1) - p.serverTransport.Interrupt() - p.wg.Wait() - return nil -} - -func (p *TSimpleServer) processRequests(client TTransport) error { - processor := p.processorFactory.GetProcessor(client) - inputTransport, err := p.inputTransportFactory.GetTransport(client) - if err != nil { - return err - } - outputTransport, err := p.outputTransportFactory.GetTransport(client) - if err != nil { - return err - } - inputProtocol := p.inputProtocolFactory.GetProtocol(inputTransport) - outputProtocol := p.outputProtocolFactory.GetProtocol(outputTransport) - defer func() { - if e := recover(); e != nil { - log.Printf("panic in processor: %s: %s", e, debug.Stack()) - } - }() - - if inputTransport != nil { - defer inputTransport.Close() - } - if outputTransport != nil { - defer outputTransport.Close() - } - for { - if atomic.LoadInt32(&p.closed) != 0 { - return nil - } - - ok, err := processor.Process(defaultCtx, inputProtocol, outputProtocol) - if err, ok := err.(TTransportException); ok && err.TypeId() == END_OF_FILE { - return nil - } else if err != nil { - return err - } - if err, ok := err.(TApplicationException); ok && err.TypeId() == UNKNOWN_METHOD { - continue - } - if !ok { - break - } - } - return nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go deleted file mode 100644 index 885427965..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/socket.go +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "net" - "time" -) - -type TSocket struct { - conn net.Conn - addr net.Addr - timeout time.Duration -} - -// NewTSocket creates a net.Conn-backed TTransport, given a host and port -// -// Example: -// trans, err := thrift.NewTSocket("localhost:9090") -func NewTSocket(hostPort string) (*TSocket, error) { - return NewTSocketTimeout(hostPort, 0) -} - -// NewTSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a timeout as a time.Duration -func NewTSocketTimeout(hostPort string, timeout time.Duration) (*TSocket, error) { - //conn, err := net.DialTimeout(network, address, timeout) - addr, err := net.ResolveTCPAddr("tcp", hostPort) - if err != nil { - return nil, err - } - return NewTSocketFromAddrTimeout(addr, timeout), nil -} - -// Creates a TSocket from a net.Addr -func NewTSocketFromAddrTimeout(addr net.Addr, timeout time.Duration) *TSocket { - return &TSocket{addr: addr, timeout: timeout} -} - -// Creates a TSocket from an existing net.Conn -func NewTSocketFromConnTimeout(conn net.Conn, timeout time.Duration) *TSocket { - return &TSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout} -} - -// Sets the socket timeout -func (p *TSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSocket) Open() error { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - var err error - if p.conn, err = net.DialTimeout(p.addr.Network(), p.addr.String(), p.timeout); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -//Returns the remote address of the socket. -func (p *TSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go deleted file mode 100644 index 907afca32..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_server_socket.go +++ /dev/null @@ -1,112 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "crypto/tls" - "net" - "time" -) - -type TSSLServerSocket struct { - listener net.Listener - addr net.Addr - clientTimeout time.Duration - interrupted bool - cfg *tls.Config -} - -func NewTSSLServerSocket(listenAddr string, cfg *tls.Config) (*TSSLServerSocket, error) { - return NewTSSLServerSocketTimeout(listenAddr, cfg, 0) -} - -func NewTSSLServerSocketTimeout(listenAddr string, cfg *tls.Config, clientTimeout time.Duration) (*TSSLServerSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - addr, err := net.ResolveTCPAddr("tcp", listenAddr) - if err != nil { - return nil, err - } - return &TSSLServerSocket{addr: addr, clientTimeout: clientTimeout, cfg: cfg}, nil -} - -func (p *TSSLServerSocket) Listen() error { - if p.IsListening() { - return nil - } - l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg) - if err != nil { - return err - } - p.listener = l - return nil -} - -func (p *TSSLServerSocket) Accept() (TTransport, error) { - if p.interrupted { - return nil, errTransportInterrupted - } - if p.listener == nil { - return nil, NewTTransportException(NOT_OPEN, "No underlying server socket") - } - conn, err := p.listener.Accept() - if err != nil { - return nil, NewTTransportExceptionFromError(err) - } - return NewTSSLSocketFromConnTimeout(conn, p.cfg, p.clientTimeout), nil -} - -// Checks whether the socket is listening. -func (p *TSSLServerSocket) IsListening() bool { - return p.listener != nil -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLServerSocket) Open() error { - if p.IsListening() { - return NewTTransportException(ALREADY_OPEN, "Server socket already open") - } - if l, err := tls.Listen(p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return err - } else { - p.listener = l - } - return nil -} - -func (p *TSSLServerSocket) Addr() net.Addr { - return p.addr -} - -func (p *TSSLServerSocket) Close() error { - defer func() { - p.listener = nil - }() - if p.IsListening() { - return p.listener.Close() - } - return nil -} - -func (p *TSSLServerSocket) Interrupt() error { - p.interrupted = true - return nil -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go deleted file mode 100644 index ba6337726..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/ssl_socket.go +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "crypto/tls" - "net" - "time" -) - -type TSSLSocket struct { - conn net.Conn - // hostPort contains host:port (e.g. "asdf.com:12345"). The field is - // only valid if addr is nil. - hostPort string - // addr is nil when hostPort is not "", and is only used when the - // TSSLSocket is constructed from a net.Addr. - addr net.Addr - timeout time.Duration - cfg *tls.Config -} - -// NewTSSLSocket creates a net.Conn-backed TTransport, given a host and port and tls Configuration -// -// Example: -// trans, err := thrift.NewTSSLSocket("localhost:9090", nil) -func NewTSSLSocket(hostPort string, cfg *tls.Config) (*TSSLSocket, error) { - return NewTSSLSocketTimeout(hostPort, cfg, 0) -} - -// NewTSSLSocketTimeout creates a net.Conn-backed TTransport, given a host and port -// it also accepts a tls Configuration and a timeout as a time.Duration -func NewTSSLSocketTimeout(hostPort string, cfg *tls.Config, timeout time.Duration) (*TSSLSocket, error) { - if cfg.MinVersion == 0 { - cfg.MinVersion = tls.VersionTLS10 - } - return &TSSLSocket{hostPort: hostPort, timeout: timeout, cfg: cfg}, nil -} - -// Creates a TSSLSocket from a net.Addr -func NewTSSLSocketFromAddrTimeout(addr net.Addr, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{addr: addr, timeout: timeout, cfg: cfg} -} - -// Creates a TSSLSocket from an existing net.Conn -func NewTSSLSocketFromConnTimeout(conn net.Conn, cfg *tls.Config, timeout time.Duration) *TSSLSocket { - return &TSSLSocket{conn: conn, addr: conn.RemoteAddr(), timeout: timeout, cfg: cfg} -} - -// Sets the socket timeout -func (p *TSSLSocket) SetTimeout(timeout time.Duration) error { - p.timeout = timeout - return nil -} - -func (p *TSSLSocket) pushDeadline(read, write bool) { - var t time.Time - if p.timeout > 0 { - t = time.Now().Add(time.Duration(p.timeout)) - } - if read && write { - p.conn.SetDeadline(t) - } else if read { - p.conn.SetReadDeadline(t) - } else if write { - p.conn.SetWriteDeadline(t) - } -} - -// Connects the socket, creating a new socket object if necessary. -func (p *TSSLSocket) Open() error { - var err error - // If we have a hostname, we need to pass the hostname to tls.Dial for - // certificate hostname checks. - if p.hostPort != "" { - if p.conn, err = tls.DialWithDialer(&net.Dialer{ - Timeout: p.timeout}, "tcp", p.hostPort, p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } else { - if p.IsOpen() { - return NewTTransportException(ALREADY_OPEN, "Socket already connected.") - } - if p.addr == nil { - return NewTTransportException(NOT_OPEN, "Cannot open nil address.") - } - if len(p.addr.Network()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad network name.") - } - if len(p.addr.String()) == 0 { - return NewTTransportException(NOT_OPEN, "Cannot open bad address.") - } - if p.conn, err = tls.DialWithDialer(&net.Dialer{ - Timeout: p.timeout}, p.addr.Network(), p.addr.String(), p.cfg); err != nil { - return NewTTransportException(NOT_OPEN, err.Error()) - } - } - return nil -} - -// Retrieve the underlying net.Conn -func (p *TSSLSocket) Conn() net.Conn { - return p.conn -} - -// Returns true if the connection is open -func (p *TSSLSocket) IsOpen() bool { - if p.conn == nil { - return false - } - return true -} - -// Closes the socket. -func (p *TSSLSocket) Close() error { - // Close the socket - if p.conn != nil { - err := p.conn.Close() - if err != nil { - return err - } - p.conn = nil - } - return nil -} - -func (p *TSSLSocket) Read(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(true, false) - n, err := p.conn.Read(buf) - return n, NewTTransportExceptionFromError(err) -} - -func (p *TSSLSocket) Write(buf []byte) (int, error) { - if !p.IsOpen() { - return 0, NewTTransportException(NOT_OPEN, "Connection not open") - } - p.pushDeadline(false, true) - return p.conn.Write(buf) -} - -func (p *TSSLSocket) Flush(ctx context.Context) error { - return nil -} - -func (p *TSSLSocket) Interrupt() error { - if !p.IsOpen() { - return nil - } - return p.conn.Close() -} - -func (p *TSSLSocket) RemainingBytes() (num_bytes uint64) { - const maxSize = ^uint64(0) - return maxSize // the thruth is, we just don't know unless framed is used -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go deleted file mode 100644 index ba2738a8d..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport.go +++ /dev/null @@ -1,70 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "context" - "errors" - "io" -) - -var errTransportInterrupted = errors.New("Transport Interrupted") - -type Flusher interface { - Flush() (err error) -} - -type ContextFlusher interface { - Flush(ctx context.Context) (err error) -} - -type ReadSizeProvider interface { - RemainingBytes() (num_bytes uint64) -} - -// Encapsulates the I/O layer -type TTransport interface { - io.ReadWriteCloser - ContextFlusher - ReadSizeProvider - - // Opens the transport for communication - Open() error - - // Returns true if the transport is open - IsOpen() bool -} - -type stringWriter interface { - WriteString(s string) (n int, err error) -} - -// This is "enchanced" transport with extra capabilities. You need to use one of these -// to construct protocol. -// Notably, TSocket does not implement this interface, and it is always a mistake to use -// TSocket directly in protocol. -type TRichTransport interface { - io.ReadWriter - io.ByteReader - io.ByteWriter - stringWriter - ContextFlusher - ReadSizeProvider -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go deleted file mode 100644 index 9505b4461..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_exception.go +++ /dev/null @@ -1,90 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -import ( - "errors" - "io" -) - -type timeoutable interface { - Timeout() bool -} - -// Thrift Transport exception -type TTransportException interface { - TException - TypeId() int - Err() error -} - -const ( - UNKNOWN_TRANSPORT_EXCEPTION = 0 - NOT_OPEN = 1 - ALREADY_OPEN = 2 - TIMED_OUT = 3 - END_OF_FILE = 4 -) - -type tTransportException struct { - typeId int - err error -} - -func (p *tTransportException) TypeId() int { - return p.typeId -} - -func (p *tTransportException) Error() string { - return p.err.Error() -} - -func (p *tTransportException) Err() error { - return p.err -} - -func NewTTransportException(t int, e string) TTransportException { - return &tTransportException{typeId: t, err: errors.New(e)} -} - -func NewTTransportExceptionFromError(e error) TTransportException { - if e == nil { - return nil - } - - if t, ok := e.(TTransportException); ok { - return t - } - - switch v := e.(type) { - case TTransportException: - return v - case timeoutable: - if v.Timeout() { - return &tTransportException{typeId: TIMED_OUT, err: e} - } - } - - if e == io.EOF { - return &tTransportException{typeId: END_OF_FILE, err: e} - } - - return &tTransportException{typeId: UNKNOWN_TRANSPORT_EXCEPTION, err: e} -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go deleted file mode 100644 index c80580794..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/transport_factory.go +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Factory class used to create wrapped instance of Transports. -// This is used primarily in servers, which get Transports from -// a ServerTransport and then may want to mutate them (i.e. create -// a BufferedTransport from the underlying base transport) -type TTransportFactory interface { - GetTransport(trans TTransport) (TTransport, error) -} - -type tTransportFactory struct{} - -// Return a wrapped instance of the base Transport. -func (p *tTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - return trans, nil -} - -func NewTTransportFactory() TTransportFactory { - return &tTransportFactory{} -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go deleted file mode 100644 index 4292ffcad..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/type.go +++ /dev/null @@ -1,69 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package thrift - -// Type constants in the Thrift protocol -type TType byte - -const ( - STOP = 0 - VOID = 1 - BOOL = 2 - BYTE = 3 - I08 = 3 - DOUBLE = 4 - I16 = 6 - I32 = 8 - I64 = 10 - STRING = 11 - UTF7 = 11 - STRUCT = 12 - MAP = 13 - SET = 14 - LIST = 15 - UTF8 = 16 - UTF16 = 17 - //BINARY = 18 wrong and unusued -) - -var typeNames = map[int]string{ - STOP: "STOP", - VOID: "VOID", - BOOL: "BOOL", - BYTE: "BYTE", - DOUBLE: "DOUBLE", - I16: "I16", - I32: "I32", - I64: "I64", - STRING: "STRING", - STRUCT: "STRUCT", - MAP: "MAP", - SET: "SET", - LIST: "LIST", - UTF8: "UTF8", - UTF16: "UTF16", -} - -func (p TType) String() string { - if s, ok := typeNames[int(p)]; ok { - return s - } - return "Unknown" -} diff --git a/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go b/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go deleted file mode 100644 index f3d42673a..000000000 --- a/vendor/git.apache.org/thrift.git/lib/go/thrift/zlib_transport.go +++ /dev/null @@ -1,132 +0,0 @@ -/* -* Licensed to the Apache Software Foundation (ASF) under one -* or more contributor license agreements. See the NOTICE file -* distributed with this work for additional information -* regarding copyright ownership. The ASF licenses this file -* to you under the Apache License, Version 2.0 (the -* "License"); you may not use this file except in compliance -* with the License. You may obtain a copy of the License at -* -* http://www.apache.org/licenses/LICENSE-2.0 -* -* Unless required by applicable law or agreed to in writing, -* software distributed under the License is distributed on an -* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY -* KIND, either express or implied. See the License for the -* specific language governing permissions and limitations -* under the License. - */ - -package thrift - -import ( - "compress/zlib" - "context" - "io" - "log" -) - -// TZlibTransportFactory is a factory for TZlibTransport instances -type TZlibTransportFactory struct { - level int - factory TTransportFactory -} - -// TZlibTransport is a TTransport implementation that makes use of zlib compression. -type TZlibTransport struct { - reader io.ReadCloser - transport TTransport - writer *zlib.Writer -} - -// GetTransport constructs a new instance of NewTZlibTransport -func (p *TZlibTransportFactory) GetTransport(trans TTransport) (TTransport, error) { - if p.factory != nil { - // wrap other factory - var err error - trans, err = p.factory.GetTransport(trans) - if err != nil { - return nil, err - } - } - return NewTZlibTransport(trans, p.level) -} - -// NewTZlibTransportFactory constructs a new instance of NewTZlibTransportFactory -func NewTZlibTransportFactory(level int) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: nil} -} - -// NewTZlibTransportFactory constructs a new instance of TZlibTransportFactory -// as a wrapper over existing transport factory -func NewTZlibTransportFactoryWithFactory(level int, factory TTransportFactory) *TZlibTransportFactory { - return &TZlibTransportFactory{level: level, factory: factory} -} - -// NewTZlibTransport constructs a new instance of TZlibTransport -func NewTZlibTransport(trans TTransport, level int) (*TZlibTransport, error) { - w, err := zlib.NewWriterLevel(trans, level) - if err != nil { - log.Println(err) - return nil, err - } - - return &TZlibTransport{ - writer: w, - transport: trans, - }, nil -} - -// Close closes the reader and writer (flushing any unwritten data) and closes -// the underlying transport. -func (z *TZlibTransport) Close() error { - if z.reader != nil { - if err := z.reader.Close(); err != nil { - return err - } - } - if err := z.writer.Close(); err != nil { - return err - } - return z.transport.Close() -} - -// Flush flushes the writer and its underlying transport. -func (z *TZlibTransport) Flush(ctx context.Context) error { - if err := z.writer.Flush(); err != nil { - return err - } - return z.transport.Flush(ctx) -} - -// IsOpen returns true if the transport is open -func (z *TZlibTransport) IsOpen() bool { - return z.transport.IsOpen() -} - -// Open opens the transport for communication -func (z *TZlibTransport) Open() error { - return z.transport.Open() -} - -func (z *TZlibTransport) Read(p []byte) (int, error) { - if z.reader == nil { - r, err := zlib.NewReader(z.transport) - if err != nil { - return 0, NewTTransportExceptionFromError(err) - } - z.reader = r - } - - return z.reader.Read(p) -} - -// RemainingBytes returns the size in bytes of the data that is still to be -// read. -func (z *TZlibTransport) RemainingBytes() uint64 { - return z.transport.RemainingBytes() -} - -func (z *TZlibTransport) Write(p []byte) (int, error) { - return z.writer.Write(p) -} diff --git a/vendor/git.apache.org/thrift.git/lib/hs/LICENSE b/vendor/git.apache.org/thrift.git/lib/hs/LICENSE deleted file mode 100644 index d64569567..000000000 --- a/vendor/git.apache.org/thrift.git/lib/hs/LICENSE +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE b/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE deleted file mode 100644 index 3b6d7d74c..000000000 --- a/vendor/git.apache.org/thrift.git/tutorial/hs/LICENSE +++ /dev/null @@ -1,239 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. - --------------------------------------------------- -SOFTWARE DISTRIBUTED WITH THRIFT: - -The Apache Thrift software includes a number of subcomponents with -separate copyright notices and license terms. Your use of the source -code for the these subcomponents is subject to the terms and -conditions of the following licenses. - --------------------------------------------------- -Portions of the following files are licensed under the MIT License: - - lib/erl/src/Makefile.am - -Please see doc/otp-base-license.txt for the full terms of this license. - --------------------------------------------------- -For the aclocal/ax_boost_base.m4 and contrib/fb303/aclocal/ax_boost_base.m4 components: - -# Copyright (c) 2007 Thomas Porschberg -# -# Copying and distribution of this file, with or without -# modification, are permitted in any medium without royalty provided -# the copyright notice and this notice are preserved. - --------------------------------------------------- -For the lib/nodejs/lib/thrift/json_parse.js: - -/* - json_parse.js - 2015-05-02 - Public Domain. - NO WARRANTY EXPRESSED OR IMPLIED. USE AT YOUR OWN RISK. - -*/ -(By Douglas Crockford ) --------------------------------------------------- From 604812a238915a34d8783eb79cedeabd9aca0cc4 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Mon, 18 Mar 2019 14:33:39 -0700 Subject: [PATCH 138/212] Exemplar: revert wrong implementation. (#1067) * Exemplar: move to metricdata. * Remove AttachmentExtractor. * More cleanup. * Add a TODO for next PR. --- exemplar/exemplar.go | 79 ------------------ {trace => metric/metricdata}/exemplar.go | 36 +++----- metric/metricdata/point.go | 4 +- stats/record.go | 4 +- stats/view/aggregation_data.go | 67 ++++++--------- stats/view/aggregation_data_test.go | 40 ++------- stats/view/collector.go | 6 +- stats/view/view.go | 6 +- stats/view/view_test.go | 33 +++----- stats/view/worker_commands.go | 9 +- tag/context.go | 24 ------ tag/context_test.go | 44 ---------- trace/exemplar_test.go | 100 ----------------------- 13 files changed, 65 insertions(+), 387 deletions(-) delete mode 100644 exemplar/exemplar.go rename {trace => metric/metricdata}/exemplar.go (51%) delete mode 100644 tag/context_test.go delete mode 100644 trace/exemplar_test.go diff --git a/exemplar/exemplar.go b/exemplar/exemplar.go deleted file mode 100644 index acc225af9..000000000 --- a/exemplar/exemplar.go +++ /dev/null @@ -1,79 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package exemplar implements support for exemplars. Exemplars are additional -// data associated with each measurement. -// -// Their purpose it to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -package exemplar - -import ( - "context" - "time" -) - -// Exemplars keys. -const ( - KeyTraceID = "trace_id" - KeySpanID = "span_id" - KeyPrefixTag = "tag:" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -// The map should only be mutated from AttachmentExtractor functions. -type Attachments map[string]string - -// AttachmentExtractor is a function capable of extracting exemplar attachments -// from the context used to record measurements. -// The map passed to the function should be mutated and returned. It will -// initially be nil: the first AttachmentExtractor that would like to add keys to the -// map is responsible for initializing it. -type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments - -var extractors []AttachmentExtractor - -// RegisterAttachmentExtractor registers the given extractor associated with the exemplar -// type name. -// -// Extractors will be used to attempt to extract exemplars from the context -// associated with each recorded measurement. -// -// Packages that support exemplars should register their extractor functions on -// initialization. -// -// RegisterAttachmentExtractor should not be called after any measurements have -// been recorded. -func RegisterAttachmentExtractor(e AttachmentExtractor) { - extractors = append(extractors, e) -} - -// AttachmentsFromContext extracts exemplars from the given context. -// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an -// unspecified order to add attachments to the exemplar. -func AttachmentsFromContext(ctx context.Context) Attachments { - var a Attachments - for _, extractor := range extractors { - a = extractor(ctx, a) - } - return a -} diff --git a/trace/exemplar.go b/metric/metricdata/exemplar.go similarity index 51% rename from trace/exemplar.go rename to metric/metricdata/exemplar.go index 416d80590..f51e33f73 100644 --- a/trace/exemplar.go +++ b/metric/metricdata/exemplar.go @@ -12,32 +12,22 @@ // See the License for the specific language governing permissions and // limitations under the License. -package trace +package metricdata import ( - "context" - "encoding/hex" - - "go.opencensus.io/exemplar" + "time" ) -func init() { - exemplar.RegisterAttachmentExtractor(attachSpanContext) +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Their purpose is to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) } -func attachSpanContext(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - span := FromContext(ctx) - if span == nil { - return a - } - sc := span.SpanContext() - if !sc.IsSampled() { - return a - } - if a == nil { - a = make(exemplar.Attachments) - } - a[exemplar.KeyTraceID] = hex.EncodeToString(sc.TraceID[:]) - a[exemplar.KeySpanID] = hex.EncodeToString(sc.SpanID[:]) - return a -} +// Attachments is a map of extra values associated with a recorded data point. +type Attachments map[string]string diff --git a/metric/metricdata/point.go b/metric/metricdata/point.go index 2c2a87156..7fe057b19 100644 --- a/metric/metricdata/point.go +++ b/metric/metricdata/point.go @@ -16,8 +16,6 @@ package metricdata import ( "time" - - "go.opencensus.io/exemplar" ) // Point is a single data point of a time series. @@ -144,7 +142,7 @@ type Bucket struct { // bucket_bounds. Count int64 // Exemplar associated with this bucket (if any). - Exemplar *exemplar.Exemplar + Exemplar *Exemplar } // Summary is a representation of percentiles. diff --git a/stats/record.go b/stats/record.go index 86f491e22..a0f0ec4be 100644 --- a/stats/record.go +++ b/stats/record.go @@ -18,7 +18,6 @@ package stats import ( "context" - "go.opencensus.io/exemplar" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -51,7 +50,8 @@ func Record(ctx context.Context, ms ...Measurement) { if !record { return } - recorder(tag.FromContext(ctx), ms, exemplar.AttachmentsFromContext(ctx)) + // TODO(songy23): fix attachments. + recorder(tag.FromContext(ctx), ms, map[string]string{}) } // RecordWithTags records one or multiple measurements at once. diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index 960b94601..bf8015f3a 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -18,7 +18,7 @@ package view import ( "math" - "go.opencensus.io/exemplar" + "go.opencensus.io/metric/metricdata" ) // AggregationData represents an aggregated value from a collection. @@ -26,7 +26,7 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(e *exemplar.Exemplar) + addSample(v float64) clone() AggregationData equal(other AggregationData) bool } @@ -43,7 +43,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(_ *exemplar.Exemplar) { +func (a *CountData) addSample(_ float64) { a.Value = a.Value + 1 } @@ -70,8 +70,8 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(e *exemplar.Exemplar) { - a.Value += e.Value +func (a *SumData) addSample(v float64) { + a.Value += v } func (a *SumData) clone() AggregationData { @@ -101,8 +101,8 @@ type DistributionData struct { SumOfSquaredDev float64 // sum of the squared deviation from the mean CountPerBucket []int64 // number of occurrences per bucket // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an exemplar for the associated bucket, or nil. - ExemplarsPerBucket []*exemplar.Exemplar + // an metricdata for the associated bucket, or nil. + ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values } @@ -110,7 +110,7 @@ func newDistributionData(bounds []float64) *DistributionData { bucketCount := len(bounds) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), - ExemplarsPerBucket: make([]*exemplar.Exemplar, bucketCount), + ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), bounds: bounds, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, @@ -129,64 +129,45 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } -func (a *DistributionData) addSample(e *exemplar.Exemplar) { - f := e.Value - if f < a.Min { - a.Min = f +// TODO(songy23): support exemplar attachments. +func (a *DistributionData) addSample(v float64) { + if v < a.Min { + a.Min = v } - if f > a.Max { - a.Max = f + if v > a.Max { + a.Max = v } a.Count++ - a.addToBucket(e) + a.addToBucket(v) if a.Count == 1 { - a.Mean = f + a.Mean = v return } oldMean := a.Mean - a.Mean = a.Mean + (f-a.Mean)/float64(a.Count) - a.SumOfSquaredDev = a.SumOfSquaredDev + (f-oldMean)*(f-a.Mean) + a.Mean = a.Mean + (v-a.Mean)/float64(a.Count) + a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } -func (a *DistributionData) addToBucket(e *exemplar.Exemplar) { +func (a *DistributionData) addToBucket(v float64) { var count *int64 - var ex **exemplar.Exemplar for i, b := range a.bounds { - if e.Value < b { + if v < b { count = &a.CountPerBucket[i] - ex = &a.ExemplarsPerBucket[i] break } } - if count == nil { + if count == nil { // Last bucket. count = &a.CountPerBucket[len(a.bounds)] - ex = &a.ExemplarsPerBucket[len(a.bounds)] } *count++ - *ex = maybeRetainExemplar(*ex, e) -} - -func maybeRetainExemplar(old, cur *exemplar.Exemplar) *exemplar.Exemplar { - if old == nil { - return cur - } - - // Heuristic to pick the "better" exemplar: first keep the one with a - // sampled trace attachment, if neither have a trace attachment, pick the - // one with more attachments. - _, haveTraceID := cur.Attachments[exemplar.KeyTraceID] - if haveTraceID || len(cur.Attachments) >= len(old.Attachments) { - return cur - } - return old } func (a *DistributionData) clone() AggregationData { c := *a c.CountPerBucket = append([]int64(nil), a.CountPerBucket...) - c.ExemplarsPerBucket = append([]*exemplar.Exemplar(nil), a.ExemplarsPerBucket...) + c.ExemplarsPerBucket = append([]*metricdata.Exemplar(nil), a.ExemplarsPerBucket...) return &c } @@ -218,8 +199,8 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(e *exemplar.Exemplar) { - l.Value = e.Value +func (l *LastValueData) addSample(v float64) { + l.Value = v } func (l *LastValueData) clone() AggregationData { diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index dc209cbc5..caa624caa 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -18,11 +18,10 @@ package view import ( "reflect" "testing" - "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" - "go.opencensus.io/exemplar" + "go.opencensus.io/metric/metricdata" ) func TestDataClone(t *testing.T) { @@ -67,21 +66,12 @@ func TestDataClone(t *testing.T) { func TestDistributionData_addSample(t *testing.T) { dd := newDistributionData([]float64{1, 2}) - t1, _ := time.Parse("Mon Jan 2 15:04:05 -0700 MST 2006", "Mon Jan 2 15:04:05 -0700 MST 2006") - e1 := &exemplar.Exemplar{ - Attachments: exemplar.Attachments{ - "tag:X": "Y", - "tag:A": "B", - }, - Timestamp: t1, - Value: 0.5, - } - dd.addSample(e1) + dd.addSample(0.5) want := &DistributionData{ Count: 1, CountPerBucket: []int64{1, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{e1, nil, nil}, + ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, Max: 0.5, Min: 0.5, Mean: 0.5, @@ -91,21 +81,13 @@ func TestDistributionData_addSample(t *testing.T) { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } - t2 := t1.Add(time.Microsecond) - e2 := &exemplar.Exemplar{ - Attachments: exemplar.Attachments{ - "tag:X": "Y", - }, - Timestamp: t2, - Value: 0.7, - } - dd.addSample(e2) + dd.addSample(0.7) // Previous exemplar should be preserved, since it has more annotations. want = &DistributionData{ Count: 2, CountPerBucket: []int64{2, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{e1, nil, nil}, + ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, Max: 0.7, Min: 0.5, Mean: 0.6, @@ -115,21 +97,13 @@ func TestDistributionData_addSample(t *testing.T) { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } - t3 := t2.Add(time.Microsecond) - e3 := &exemplar.Exemplar{ - Attachments: exemplar.Attachments{ - exemplar.KeyTraceID: "abcd", - }, - Timestamp: t3, - Value: 0.2, - } - dd.addSample(e3) + dd.addSample(0.2) // Exemplar should be replaced since it has a trace_id. want = &DistributionData{ Count: 3, CountPerBucket: []int64{3, 0, 0}, - ExemplarsPerBucket: []*exemplar.Exemplar{e3, nil, nil}, + ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, Max: 0.7, Min: 0.2, Mean: 0.4666666666666667, diff --git a/stats/view/collector.go b/stats/view/collector.go index 32415d485..250395db2 100644 --- a/stats/view/collector.go +++ b/stats/view/collector.go @@ -18,8 +18,6 @@ package view import ( "sort" - "go.opencensus.io/exemplar" - "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" ) @@ -33,13 +31,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, e *exemplar.Exemplar) { +func (c *collector) addSample(s string, v float64) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(e) + aggregator.addSample(v) } // collectRows returns a snapshot of the collected Row values. diff --git a/stats/view/view.go b/stats/view/view.go index 7372f999f..633d3465a 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -24,8 +24,6 @@ import ( "sync/atomic" "time" - "go.opencensus.io/exemplar" - "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -152,12 +150,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, e *exemplar.Exemplar) { +func (v *viewInternal) addSample(m *tag.Map, val float64) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, e) + v.collector.addSample(sig, val) } // A Data is a set of rows about usage of the single measure associated diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 550110862..aef23521a 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -21,7 +21,7 @@ import ( "github.com/google/go-cmp/cmp" - "go.opencensus.io/exemplar" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/tag" @@ -69,7 +69,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, }, @@ -84,13 +84,13 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, }, @@ -108,25 +108,25 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1"}}, &DistributionData{ - Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 other"}}, &DistributionData{ - Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k2, Value: "v2"}}, &DistributionData{ - Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1"}, {Key: k2, Value: "v2"}}, &DistributionData{ - Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 5, Max: 5, Mean: 5, CountPerBucket: []int64{0, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, }, @@ -146,19 +146,19 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}}, &DistributionData{ - Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 2, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 8, CountPerBucket: []int64{1, 1}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is another very long value key"}}, &DistributionData{ - Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 1, Min: 1, Max: 1, Mean: 1, CountPerBucket: []int64{1, 0}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, { []tag.Tag{{Key: k1, Value: "v1 is a very long value key"}, {Key: k2, Value: "v2 is a very long value key"}}, &DistributionData{ - Count: 4, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 2.66666666666667 * 3, CountPerBucket: []int64{1, 3}, bounds: []float64{2}, ExemplarsPerBucket: []*exemplar.Exemplar{nil, nil}, + Count: 4, Min: 1, Max: 5, Mean: 3, SumOfSquaredDev: 2.66666666666667 * 3, CountPerBucket: []int64{1, 3}, bounds: []float64{2}, ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil}, }, }, }, @@ -177,11 +177,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tc.label, err) } - e := &exemplar.Exemplar{ - Value: r.f, - Attachments: exemplar.AttachmentsFromContext(ctx), - } - view.addSample(tag.FromContext(ctx), e) + view.addSample(tag.FromContext(ctx), r.f) } gotRows := view.collectedRows() @@ -297,10 +293,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tt.label, err) } - e := &exemplar.Exemplar{ - Value: r.f, - } - view.addSample(tag.FromContext(ctx), e) + view.addSample(tag.FromContext(ctx), r.f) } gotRows := view.collectedRows() diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index f71ec1eb0..33f23168b 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -21,8 +21,6 @@ import ( "strings" "time" - "go.opencensus.io/exemplar" - "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -161,12 +159,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - e := &exemplar.Exemplar{ - Value: m.Value(), - Timestamp: cmd.t, - Attachments: cmd.attachments, - } - v.addSample(cmd.tm, e) + v.addSample(cmd.tm, m.Value()) } } } diff --git a/tag/context.go b/tag/context.go index dcc13f498..b27d1b26b 100644 --- a/tag/context.go +++ b/tag/context.go @@ -17,8 +17,6 @@ package tag import ( "context" - - "go.opencensus.io/exemplar" ) // FromContext returns the tag map stored in the context. @@ -43,25 +41,3 @@ func NewContext(ctx context.Context, m *Map) context.Context { type ctxKey struct{} var mapCtxKey = ctxKey{} - -func init() { - exemplar.RegisterAttachmentExtractor(extractTagsAttachments) -} - -func extractTagsAttachments(ctx context.Context, a exemplar.Attachments) exemplar.Attachments { - m := FromContext(ctx) - if m == nil { - return a - } - if len(m.m) == 0 { - return a - } - if a == nil { - a = make(map[string]string) - } - - for k, v := range m.m { - a[exemplar.KeyPrefixTag+k.Name()] = v - } - return a -} diff --git a/tag/context_test.go b/tag/context_test.go deleted file mode 100644 index e85b1c40c..000000000 --- a/tag/context_test.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. -// - -package tag - -import ( - "context" - "testing" -) - -func TestExtractTagsAttachment(t *testing.T) { - // We can't depend on the stats of view package without creating a - // dependency cycle. - - var m map[string]string - ctx := context.Background() - - res := extractTagsAttachments(ctx, m) - if res != nil { - t.Fatalf("res = %v; want nil", res) - } - - k, _ := NewKey("test") - ctx, _ = New(ctx, Insert(k, "test123")) - res = extractTagsAttachments(ctx, m) - if res == nil { - t.Fatal("res = nil") - } - if got, want := res["tag:test"], "test123"; got != want { - t.Fatalf("res[Tags:test] = %v; want %v", got, want) - } -} diff --git a/trace/exemplar_test.go b/trace/exemplar_test.go deleted file mode 100644 index 5d6f62d5c..000000000 --- a/trace/exemplar_test.go +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package trace_test - -import ( - "context" - "testing" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/trace" -) - -func TestTraceExemplar(t *testing.T) { - m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) - v := &view.View{ - Measure: m, - Aggregation: view.Distribution(1, 2, 3), - } - view.Register(v) - ctx := context.Background() - ctx, span := trace.StartSpan(ctx, t.Name(), trace.WithSampler(trace.AlwaysSample())) - stats.Record(ctx, m.M(1.5)) - span.End() - - rows, err := view.RetrieveData(v.Name) - if err != nil { - t.Fatal(err) - } - if len(rows) == 0 { - t.Fatal("len(rows) = 0; want > 0") - } - dd := rows[0].Data.(*view.DistributionData) - if got := len(dd.ExemplarsPerBucket); got < 3 { - t.Fatalf("len(dd.ExemplarsPerBucket) = %d; want >= 2", got) - } - exemplar := dd.ExemplarsPerBucket[1] - if exemplar == nil { - t.Fatal("Expected exemplar") - } - if got, want := exemplar.Value, 1.5; got != want { - t.Fatalf("exemplar.Value = %v; got %v", got, want) - } - if _, ok := exemplar.Attachments["trace_id"]; !ok { - t.Fatalf("exemplar.Attachments = %v; want trace_id key", exemplar.Attachments) - } - if _, ok := exemplar.Attachments["span_id"]; !ok { - t.Fatalf("exemplar.Attachments = %v; want span_id key", exemplar.Attachments) - } -} - -func TestTraceExemplar_notSampled(t *testing.T) { - m := stats.Float64("measure."+t.Name(), "", stats.UnitDimensionless) - v := &view.View{ - Measure: m, - Aggregation: view.Distribution(1, 2, 3), - } - view.Register(v) - ctx := context.Background() - ctx, span := trace.StartSpan(ctx, t.Name(), trace.WithSampler(trace.NeverSample())) - stats.Record(ctx, m.M(1.5)) - span.End() - - rows, err := view.RetrieveData(v.Name) - if err != nil { - t.Fatal(err) - } - if len(rows) == 0 { - t.Fatal("len(rows) = 0; want > 0") - } - dd := rows[0].Data.(*view.DistributionData) - if got := len(dd.ExemplarsPerBucket); got < 3 { - t.Fatalf("len(buckets) = %d; want >= 2", got) - } - exemplar := dd.ExemplarsPerBucket[1] - if exemplar == nil { - t.Fatal("Expected exemplar") - } - if got, want := exemplar.Value, 1.5; got != want { - t.Fatalf("exemplar.Value = %v; got %v", got, want) - } - if _, ok := exemplar.Attachments["trace_id"]; ok { - t.Fatalf("exemplar.Attachments = %v; want no trace_id", exemplar.Attachments) - } - if _, ok := exemplar.Attachments["span_id"]; ok { - t.Fatalf("exemplar.Attachments = %v; want span_id key", exemplar.Attachments) - } -} From 6ddd4bcc9c808594ec82377ce4323c3f7913be6d Mon Sep 17 00:00:00 2001 From: Maxime Vercheval Date: Tue, 19 Mar 2019 13:55:39 +0000 Subject: [PATCH 139/212] ochttp plugin: makes Body a transparent wrapper (#1069) --- plugin/ochttp/client_stats.go | 2 +- plugin/ochttp/trace.go | 3 ++- plugin/ochttp/wrapped_body.go | 44 +++++++++++++++++++++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) create mode 100644 plugin/ochttp/wrapped_body.go diff --git a/plugin/ochttp/client_stats.go b/plugin/ochttp/client_stats.go index e258bcc2a..17142aabe 100644 --- a/plugin/ochttp/client_stats.go +++ b/plugin/ochttp/client_stats.go @@ -68,7 +68,7 @@ func (t statsTransport) RoundTrip(req *http.Request) (*http.Response, error) { track.end() } else { track.body = resp.Body - resp.Body = track + resp.Body = wrappedBody(track, resp.Body) } } return resp, err diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index ca312fcf4..fdf65fc80 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -93,7 +93,8 @@ func (t *traceTransport) RoundTrip(req *http.Request) (*http.Response, error) { // span.End() will be invoked after // a read from resp.Body returns io.EOF or when // resp.Body.Close() is invoked. - resp.Body = &bodyTracker{rc: resp.Body, span: span} + bt := &bodyTracker{rc: resp.Body, span: span} + resp.Body = wrappedBody(bt, resp.Body) return resp, err } diff --git a/plugin/ochttp/wrapped_body.go b/plugin/ochttp/wrapped_body.go new file mode 100644 index 000000000..7d75cae2b --- /dev/null +++ b/plugin/ochttp/wrapped_body.go @@ -0,0 +1,44 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package ochttp + +import ( + "io" +) + +// wrappedBody returns a wrapped version of the original +// Body and only implements the same combination of additional +// interfaces as the original. +func wrappedBody(wrapper io.ReadCloser, body io.ReadCloser) io.ReadCloser { + var ( + wr, i0 = body.(io.Writer) + ) + switch { + case !i0: + return struct { + io.ReadCloser + }{wrapper} + + case i0: + return struct { + io.ReadCloser + io.Writer + }{wrapper, wr} + default: + return struct { + io.ReadCloser + }{wrapper} + } +} From ebb7978abd3f971d98f7b2b2be19e7e374409b0b Mon Sep 17 00:00:00 2001 From: Yang Song Date: Tue, 19 Mar 2019 11:22:01 -0700 Subject: [PATCH 140/212] Exemplar: Restore previous package to avoid breaking downstream. (#1071) --- exemplar/exemplar.go | 83 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 83 insertions(+) create mode 100644 exemplar/exemplar.go diff --git a/exemplar/exemplar.go b/exemplar/exemplar.go new file mode 100644 index 000000000..5a4c4345e --- /dev/null +++ b/exemplar/exemplar.go @@ -0,0 +1,83 @@ +// Copyright 2018, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exemplar implements support for exemplars. Exemplars are additional +// data associated with each measurement. +// +// Their purpose it to provide an example of the kind of thing +// (request, RPC, trace span, etc.) that resulted in that measurement. +// +// Deprecated: Use go.opencensus.io/metric/metricdata instead. +package exemplar + +import ( + "context" + "time" +) + +// Exemplars keys. +const ( + KeyTraceID = "trace_id" + KeySpanID = "span_id" + KeyPrefixTag = "tag:" +) + +// Exemplar is an example data point associated with each bucket of a +// distribution type aggregation. +// +// Deprecated: Use go.opencensus.io/metric/metricdata/exemplar instead. +type Exemplar struct { + Value float64 // the value that was recorded + Timestamp time.Time // the time the value was recorded + Attachments Attachments // attachments (if any) +} + +// Attachments is a map of extra values associated with a recorded data point. +// The map should only be mutated from AttachmentExtractor functions. +type Attachments map[string]string + +// AttachmentExtractor is a function capable of extracting exemplar attachments +// from the context used to record measurements. +// The map passed to the function should be mutated and returned. It will +// initially be nil: the first AttachmentExtractor that would like to add keys to the +// map is responsible for initializing it. +type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments + +var extractors []AttachmentExtractor + +// RegisterAttachmentExtractor registers the given extractor associated with the exemplar +// type name. +// +// Extractors will be used to attempt to extract exemplars from the context +// associated with each recorded measurement. +// +// Packages that support exemplars should register their extractor functions on +// initialization. +// +// RegisterAttachmentExtractor should not be called after any measurements have +// been recorded. +func RegisterAttachmentExtractor(e AttachmentExtractor) { + extractors = append(extractors, e) +} + +// AttachmentsFromContext extracts exemplars from the given context. +// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an +// unspecified order to add attachments to the exemplar. +func AttachmentsFromContext(ctx context.Context) Attachments { + var a Attachments + for _, extractor := range extractors { + a = extractor(ctx, a) + } + return a +} From 8a36f74db452c3eb69935e0ce66aecc030cf5142 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Tue, 19 Mar 2019 19:41:37 -0700 Subject: [PATCH 141/212] Exemplar: Use generic interface for attachment values. (#1070) --- metric/metricdata/exemplar.go | 2 +- stats/internal/record.go | 2 +- stats/record.go | 2 +- stats/view/aggregation_data.go | 38 +++++++++++++++++++++-------- stats/view/aggregation_data_test.go | 34 +++++++++++++++++--------- stats/view/collector.go | 5 ++-- stats/view/view.go | 4 +-- stats/view/view_test.go | 5 ++-- stats/view/worker.go | 2 +- stats/view/worker_commands.go | 4 +-- 10 files changed, 64 insertions(+), 34 deletions(-) diff --git a/metric/metricdata/exemplar.go b/metric/metricdata/exemplar.go index f51e33f73..cdbeef058 100644 --- a/metric/metricdata/exemplar.go +++ b/metric/metricdata/exemplar.go @@ -30,4 +30,4 @@ type Exemplar struct { } // Attachments is a map of extra values associated with a recorded data point. -type Attachments map[string]string +type Attachments map[string]interface{} diff --git a/stats/internal/record.go b/stats/internal/record.go index ed5455205..36935e629 100644 --- a/stats/internal/record.go +++ b/stats/internal/record.go @@ -19,7 +19,7 @@ import ( ) // DefaultRecorder will be called for each Record call. -var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]string) +var DefaultRecorder func(tags *tag.Map, measurement interface{}, attachments map[string]interface{}) // SubscriptionReporter reports when a view subscribed with a measure. var SubscriptionReporter func(measure string) diff --git a/stats/record.go b/stats/record.go index a0f0ec4be..d2af0a60d 100644 --- a/stats/record.go +++ b/stats/record.go @@ -51,7 +51,7 @@ func Record(ctx context.Context, ms ...Measurement) { return } // TODO(songy23): fix attachments. - recorder(tag.FromContext(ctx), ms, map[string]string{}) + recorder(tag.FromContext(ctx), ms, map[string]interface{}{}) } // RecordWithTags records one or multiple measurements at once. diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index bf8015f3a..8774a07f4 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -17,6 +17,7 @@ package view import ( "math" + "time" "go.opencensus.io/metric/metricdata" ) @@ -26,7 +27,7 @@ import ( // Mosts users won't directly access aggregration data. type AggregationData interface { isAggregationData() bool - addSample(v float64) + addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool } @@ -43,7 +44,7 @@ type CountData struct { func (a *CountData) isAggregationData() bool { return true } -func (a *CountData) addSample(_ float64) { +func (a *CountData) addSample(_ float64, _ map[string]interface{}, _ time.Time) { a.Value = a.Value + 1 } @@ -70,7 +71,7 @@ type SumData struct { func (a *SumData) isAggregationData() bool { return true } -func (a *SumData) addSample(v float64) { +func (a *SumData) addSample(v float64, _ map[string]interface{}, _ time.Time) { a.Value += v } @@ -101,7 +102,7 @@ type DistributionData struct { SumOfSquaredDev float64 // sum of the squared deviation from the mean CountPerBucket []int64 // number of occurrences per bucket // ExemplarsPerBucket is slice the same length as CountPerBucket containing - // an metricdata for the associated bucket, or nil. + // an exemplar for the associated bucket, or nil. ExemplarsPerBucket []*metricdata.Exemplar bounds []float64 // histogram distribution of the values } @@ -130,7 +131,7 @@ func (a *DistributionData) variance() float64 { func (a *DistributionData) isAggregationData() bool { return true } // TODO(songy23): support exemplar attachments. -func (a *DistributionData) addSample(v float64) { +func (a *DistributionData) addSample(v float64, attachments map[string]interface{}, t time.Time) { if v < a.Min { a.Min = v } @@ -138,7 +139,7 @@ func (a *DistributionData) addSample(v float64) { a.Max = v } a.Count++ - a.addToBucket(v) + a.addToBucket(v, attachments, t) if a.Count == 1 { a.Mean = v @@ -150,18 +151,35 @@ func (a *DistributionData) addSample(v float64) { a.SumOfSquaredDev = a.SumOfSquaredDev + (v-oldMean)*(v-a.Mean) } -func (a *DistributionData) addToBucket(v float64) { +func (a *DistributionData) addToBucket(v float64, attachments map[string]interface{}, t time.Time) { var count *int64 - for i, b := range a.bounds { + var i int + var b float64 + for i, b = range a.bounds { if v < b { count = &a.CountPerBucket[i] break } } if count == nil { // Last bucket. - count = &a.CountPerBucket[len(a.bounds)] + i = len(a.bounds) + count = &a.CountPerBucket[i] } *count++ + if exemplar := getExemplar(v, attachments, t); exemplar != nil { + a.ExemplarsPerBucket[i] = exemplar + } +} + +func getExemplar(v float64, attachments map[string]interface{}, t time.Time) *metricdata.Exemplar { + if len(attachments) == 0 { + return nil + } + return &metricdata.Exemplar{ + Value: v, + Timestamp: t, + Attachments: attachments, + } } func (a *DistributionData) clone() AggregationData { @@ -199,7 +217,7 @@ func (l *LastValueData) isAggregationData() bool { return true } -func (l *LastValueData) addSample(v float64) { +func (l *LastValueData) addSample(v float64, _ map[string]interface{}, _ time.Time) { l.Value = v } diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index caa624caa..a7e056752 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -18,6 +18,7 @@ package view import ( "reflect" "testing" + "time" "github.com/google/go-cmp/cmp" "github.com/google/go-cmp/cmp/cmpopts" @@ -66,12 +67,15 @@ func TestDataClone(t *testing.T) { func TestDistributionData_addSample(t *testing.T) { dd := newDistributionData([]float64{1, 2}) - dd.addSample(0.5) + attachments1 := map[string]interface{}{"key1": "value1"} + t1 := time.Now() + dd.addSample(0.5, attachments1, t1) + e1 := &metricdata.Exemplar{Value: 0.5, Timestamp: t1, Attachments: attachments1} want := &DistributionData{ Count: 1, CountPerBucket: []int64{1, 0, 0}, - ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, + ExemplarsPerBucket: []*metricdata.Exemplar{e1, nil, nil}, Max: 0.5, Min: 0.5, Mean: 0.5, @@ -81,13 +85,16 @@ func TestDistributionData_addSample(t *testing.T) { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } - dd.addSample(0.7) + attachments2 := map[string]interface{}{"key2": "value2"} + t2 := t1.Add(time.Microsecond) + dd.addSample(0.7, attachments2, t2) - // Previous exemplar should be preserved, since it has more annotations. + // Previous exemplar should be overwritten. + e2 := &metricdata.Exemplar{Value: 0.7, Timestamp: t2, Attachments: attachments2} want = &DistributionData{ Count: 2, CountPerBucket: []int64{2, 0, 0}, - ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, + ExemplarsPerBucket: []*metricdata.Exemplar{e2, nil, nil}, Max: 0.7, Min: 0.5, Mean: 0.6, @@ -97,16 +104,19 @@ func TestDistributionData_addSample(t *testing.T) { t.Fatalf("Unexpected DistributionData -got +want: %s", diff) } - dd.addSample(0.2) + attachments3 := map[string]interface{}{"key3": "value3"} + t3 := t2.Add(time.Microsecond) + dd.addSample(1.2, attachments3, t3) - // Exemplar should be replaced since it has a trace_id. + // e3 is at another bucket. e2 should still be there. + e3 := &metricdata.Exemplar{Value: 1.2, Timestamp: t3, Attachments: attachments3} want = &DistributionData{ Count: 3, - CountPerBucket: []int64{3, 0, 0}, - ExemplarsPerBucket: []*metricdata.Exemplar{nil, nil, nil}, - Max: 0.7, - Min: 0.2, - Mean: 0.4666666666666667, + CountPerBucket: []int64{2, 1, 0}, + ExemplarsPerBucket: []*metricdata.Exemplar{e2, e3, nil}, + Max: 1.2, + Min: 0.5, + Mean: 0.7999999999999999, SumOfSquaredDev: 0, } if diff := cmpDD(dd, want); diff != "" { diff --git a/stats/view/collector.go b/stats/view/collector.go index 250395db2..8a6a2c0fd 100644 --- a/stats/view/collector.go +++ b/stats/view/collector.go @@ -17,6 +17,7 @@ package view import ( "sort" + "time" "go.opencensus.io/internal/tagencoding" "go.opencensus.io/tag" @@ -31,13 +32,13 @@ type collector struct { a *Aggregation } -func (c *collector) addSample(s string, v float64) { +func (c *collector) addSample(s string, v float64, attachments map[string]interface{}, t time.Time) { aggregator, ok := c.signatures[s] if !ok { aggregator = c.a.newData() c.signatures[s] = aggregator } - aggregator.addSample(v) + aggregator.addSample(v, attachments, t) } // collectRows returns a snapshot of the collected Row values. diff --git a/stats/view/view.go b/stats/view/view.go index 633d3465a..95f01ad32 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -150,12 +150,12 @@ func (v *viewInternal) collectedRows() []*Row { return v.collector.collectedRows(v.view.TagKeys) } -func (v *viewInternal) addSample(m *tag.Map, val float64) { +func (v *viewInternal) addSample(m *tag.Map, val float64, attachments map[string]interface{}, t time.Time) { if !v.isSubscribed() { return } sig := string(encodeWithKeys(m, v.view.TagKeys)) - v.collector.addSample(sig, val) + v.collector.addSample(sig, val, attachments, t) } // A Data is a set of rows about usage of the single measure associated diff --git a/stats/view/view_test.go b/stats/view/view_test.go index aef23521a..7d2bed9c4 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -18,6 +18,7 @@ package view import ( "context" "testing" + "time" "github.com/google/go-cmp/cmp" @@ -177,7 +178,7 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tc.label, err) } - view.addSample(tag.FromContext(ctx), r.f) + view.addSample(tag.FromContext(ctx), r.f, nil, time.Now()) } gotRows := view.collectedRows() @@ -293,7 +294,7 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { if err != nil { t.Errorf("%v: New = %v", tt.label, err) } - view.addSample(tag.FromContext(ctx), r.f) + view.addSample(tag.FromContext(ctx), r.f, nil, time.Now()) } gotRows := view.collectedRows() diff --git a/stats/view/worker.go b/stats/view/worker.go index 0069e4bc1..d29dbae92 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -102,7 +102,7 @@ func RetrieveData(viewName string) ([]*Row, error) { return resp.rows, resp.err } -func record(tags *tag.Map, ms interface{}, attachments map[string]string) { +func record(tags *tag.Map, ms interface{}, attachments map[string]interface{}) { req := &recordReq{ tm: tags, ms: ms.([]stats.Measurement), diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index 33f23168b..e27f29401 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -148,7 +148,7 @@ func (cmd *retrieveDataReq) handleCommand(w *worker) { type recordReq struct { tm *tag.Map ms []stats.Measurement - attachments map[string]string + attachments map[string]interface{} t time.Time } @@ -159,7 +159,7 @@ func (cmd *recordReq) handleCommand(w *worker) { } ref := w.getMeasureRef(m.Measure().Name()) for v := range ref.views { - v.addSample(cmd.tm, m.Value()) + v.addSample(cmd.tm, m.Value(), cmd.attachments, time.Now()) } } } From 7396103d6a6f92b9b0dda7164a8abd2bc6e9e5ef Mon Sep 17 00:00:00 2001 From: Sergey Kanzhelev Date: Thu, 21 Mar 2019 06:53:53 -0700 Subject: [PATCH 142/212] http out test cases (#928) * http spec test example * support for local-server based spec tests * read test cases from file: * the same test cases file as for C# * moved test cases file to testdata * updated to the lateat spec * attributes * fixed linter errors * fix sync issue * use already defined map instead --- plugin/ochttp/testdata/download-test-cases.sh | 5 + .../ochttp/testdata/http-out-test-cases.json | 274 ++++++++++++++++++ plugin/ochttp/trace.go | 14 +- plugin/ochttp/trace_test.go | 149 +++++++++- 4 files changed, 437 insertions(+), 5 deletions(-) create mode 100755 plugin/ochttp/testdata/download-test-cases.sh create mode 100644 plugin/ochttp/testdata/http-out-test-cases.json diff --git a/plugin/ochttp/testdata/download-test-cases.sh b/plugin/ochttp/testdata/download-test-cases.sh new file mode 100755 index 000000000..3baa11438 --- /dev/null +++ b/plugin/ochttp/testdata/download-test-cases.sh @@ -0,0 +1,5 @@ +# This script downloads latest test cases from specs + +# TODO: change the link to when test cases are merged to specs repo + +curl https://raw.githubusercontent.com/census-instrumentation/opencensus-specs/master/trace/http-out-test-cases.json -o http-out-test-cases.json \ No newline at end of file diff --git a/plugin/ochttp/testdata/http-out-test-cases.json b/plugin/ochttp/testdata/http-out-test-cases.json new file mode 100644 index 000000000..039a73eb5 --- /dev/null +++ b/plugin/ochttp/testdata/http-out-test-cases.json @@ -0,0 +1,274 @@ +[ + { + "name": "Successful GET call to https://example.com", + "method": "GET", + "url": "https://example.com/", + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "example.com", + "http.status_code": "200", + "http.url": "https://example.com/" + } + }, + { + "name": "Successfully POST call to https://example.com", + "method": "POST", + "url": "https://example.com/", + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "POST", + "http.host": "example.com", + "http.status_code": "200", + "http.url": "https://example.com/" + } + }, + { + "name": "Name is populated as a path", + "method": "GET", + "url": "http://{host}:{port}/path/to/resource/", + "responseCode": 200, + "spanName": "/path/to/resource/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/path/to/resource/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "200", + "http.url": "http://{host}:{port}/path/to/resource/" + } + }, + { + "name": "Call that cannot resolve DNS will be reported as error span", + "method": "GET", + "url": "https://sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com/", + "spanName": "/", + "spanStatus": "UNKNOWN", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com", + "http.url": "https://sdlfaldfjalkdfjlkajdflkajlsdjf.sdlkjafsdjfalfadslkf.com/" + } + }, + { + "name": "Response code: 199. This test case is not possible to implement on some platforms as they don't allow to return this status code. Keeping this test case for visibility, but it actually simply a fallback into 200 test case", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 200, + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "200", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 200", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 200, + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "200", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 399", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 399, + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "399", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 400", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 400, + "spanName": "/", + "spanStatus": "INVALID_ARGUMENT", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "400", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 401", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 401, + "spanName": "/", + "spanStatus": "UNAUTHENTICATED", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "401", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 403", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 403, + "spanName": "/", + "spanStatus": "PERMISSION_DENIED", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "403", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 404", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 404, + "spanName": "/", + "spanStatus": "NOT_FOUND", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "404", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 429", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 429, + "spanName": "/", + "spanStatus": "RESOURCE_EXHAUSTED", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "429", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 501", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 501, + "spanName": "/", + "spanStatus": "UNIMPLEMENTED", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "501", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 503", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 503, + "spanName": "/", + "spanStatus": "UNAVAILABLE", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "503", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 504", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 504, + "spanName": "/", + "spanStatus": "DEADLINE_EXCEEDED", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "504", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "Response code: 600", + "method": "GET", + "url": "http://{host}:{port}/", + "responseCode": 600, + "spanName": "/", + "spanStatus": "UNKNOWN", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "600", + "http.url": "http://{host}:{port}/" + } + }, + { + "name": "User agent attribute populated", + "method": "GET", + "url": "http://{host}:{port}/", + "headers": { + "User-Agent": "test-user-agent" + }, + "responseCode": 200, + "spanName": "/", + "spanStatus": "OK", + "spanKind": "Client", + "spanAttributes": { + "http.path": "/", + "http.method": "GET", + "http.host": "{host}:{port}", + "http.status_code": "200", + "http.user_agent": "test-user-agent", + "http.url": "http://{host}:{port}/" + } + } + ] \ No newline at end of file diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index fdf65fc80..c23b97fb1 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -34,6 +34,7 @@ const ( HostAttribute = "http.host" MethodAttribute = "http.method" PathAttribute = "http.path" + URLAttribute = "http.url" UserAgentAttribute = "http.user_agent" StatusCodeAttribute = "http.status_code" ) @@ -150,12 +151,21 @@ func spanNameFromURL(req *http.Request) string { } func requestAttrs(r *http.Request) []trace.Attribute { - return []trace.Attribute{ + userAgent := r.UserAgent() + + attrs := make([]trace.Attribute, 0, 5) + attrs = append(attrs, trace.StringAttribute(PathAttribute, r.URL.Path), + trace.StringAttribute(URLAttribute, r.URL.String()), trace.StringAttribute(HostAttribute, r.Host), trace.StringAttribute(MethodAttribute, r.Method), - trace.StringAttribute(UserAgentAttribute, r.UserAgent()), + ) + + if userAgent != "" { + attrs = append(attrs, trace.StringAttribute(UserAgentAttribute, userAgent)) } + + return attrs } func responseAttrs(resp *http.Response) []trace.Attribute { diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index 33df4d730..13ef30cab 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -18,13 +18,16 @@ import ( "bytes" "context" "encoding/hex" + "encoding/json" "errors" "fmt" "io" "io/ioutil" "log" + "net" "net/http" "net/http/httptest" + "net/url" "reflect" "strings" "testing" @@ -244,7 +247,7 @@ func TestEndToEnd(t *testing.T) { serverDone := make(chan struct{}) serverReturn := make(chan time.Time) tt.handler.StartOptions.Sampler = trace.AlwaysSample() - url := serveHTTP(tt.handler, serverDone, serverReturn) + url := serveHTTP(tt.handler, serverDone, serverReturn, 200) ctx := context.Background() // Make the request. @@ -342,9 +345,9 @@ func TestEndToEnd(t *testing.T) { } } -func serveHTTP(handler *Handler, done chan struct{}, wait chan time.Time) string { +func serveHTTP(handler *Handler, done chan struct{}, wait chan time.Time, statusCode int) string { handler.Handler = http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - w.WriteHeader(200) + w.WriteHeader(statusCode) w.(http.Flusher).Flush() // Simulate a slow-responding server. @@ -467,12 +470,14 @@ func TestRequestAttributes(t *testing.T) { }, wantAttrs: []trace.Attribute{ trace.StringAttribute("http.path", "/hello"), + trace.StringAttribute("http.url", "http://example.com:779/hello"), trace.StringAttribute("http.host", "example.com:779"), trace.StringAttribute("http.method", "GET"), trace.StringAttribute("http.user_agent", "ua"), }, }, } + for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { req := tt.makeReq() @@ -516,6 +521,144 @@ func TestResponseAttributes(t *testing.T) { } } +type TestCase struct { + Name string + Method string + URL string + Headers map[string]string + ResponseCode int + SpanName string + SpanStatus string + SpanKind string + SpanAttributes map[string]string +} + +func TestAgainstSpecs(t *testing.T) { + + fmt.Println("start") + + dat, err := ioutil.ReadFile("testdata/http-out-test-cases.json") + if err != nil { + t.Fatalf("error reading file: %v", err) + } + + tests := make([]TestCase, 0) + err = json.Unmarshal(dat, &tests) + if err != nil { + t.Fatalf("error parsing json: %v", err) + } + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) + + for _, tt := range tests { + t.Run(tt.Name, func(t *testing.T) { + var spans collector + trace.RegisterExporter(&spans) + defer trace.UnregisterExporter(&spans) + + handler := &Handler{} + transport := &Transport{} + + serverDone := make(chan struct{}) + serverReturn := make(chan time.Time) + host := "" + port := "" + serverRequired := strings.Contains(tt.URL, "{") + if serverRequired { + // Start the server. + localServerURL := serveHTTP(handler, serverDone, serverReturn, tt.ResponseCode) + u, _ := url.Parse(localServerURL) + host, port, _ = net.SplitHostPort(u.Host) + + tt.URL = strings.Replace(tt.URL, "{host}", host, 1) + tt.URL = strings.Replace(tt.URL, "{port}", port, 1) + } + + // Start a root Span in the client. + ctx, _ := trace.StartSpan( + context.Background(), + "top-level") + // Make the request. + req, err := http.NewRequest( + tt.Method, + tt.URL, + nil) + for headerName, headerValue := range tt.Headers { + req.Header.Add(headerName, headerValue) + } + if err != nil { + t.Fatal(err) + } + req = req.WithContext(ctx) + resp, err := transport.RoundTrip(req) + if err != nil { + // do not fail. We want to validate DNS issues + //t.Fatal(err) + } + + if serverRequired { + // Tell the server to return from request handling. + serverReturn <- time.Now().Add(time.Millisecond) + } + + if resp != nil { + // If it simply closes body without reading + // synchronization problem may happen for spans slice. + // Server span and client span will write themselves + // at the same time + ioutil.ReadAll(resp.Body) + resp.Body.Close() + if serverRequired { + <-serverDone + } + } + trace.UnregisterExporter(&spans) + + var client *trace.SpanData + for _, sp := range spans { + if sp.SpanKind == trace.SpanKindClient { + client = sp + } + } + + if client.Name != tt.SpanName { + t.Errorf("span names don't match: expected: %s, actual: %s", tt.SpanName, client.Name) + } + + spanKindToStr := map[int]string{ + trace.SpanKindClient: "Client", + trace.SpanKindServer: "Server", + } + + if !strings.EqualFold(codeToStr[client.Status.Code], tt.SpanStatus) { + t.Errorf("span status don't match: expected: %s, actual: %d (%s)", tt.SpanStatus, client.Status.Code, codeToStr[client.Status.Code]) + } + + if !strings.EqualFold(spanKindToStr[client.SpanKind], tt.SpanKind) { + t.Errorf("span kind don't match: expected: %s, actual: %d (%s)", tt.SpanKind, client.SpanKind, spanKindToStr[client.SpanKind]) + } + + normalizedActualAttributes := map[string]string{} + for k, v := range client.Attributes { + normalizedActualAttributes[k] = fmt.Sprintf("%v", v) + } + + normalizedExpectedAttributes := map[string]string{} + for k, v := range tt.SpanAttributes { + normalizedValue := v + normalizedValue = strings.Replace(normalizedValue, "{host}", host, 1) + normalizedValue = strings.Replace(normalizedValue, "{port}", port, 1) + + normalizedExpectedAttributes[k] = normalizedValue + } + + if got, want := normalizedActualAttributes, normalizedExpectedAttributes; !reflect.DeepEqual(got, want) { + t.Errorf("Request attributes = %#v; want %#v", got, want) + } + }) + } +} + func TestStatusUnitTest(t *testing.T) { tests := []struct { in int From 5ae9166f90ff989c557e45fcb9592ac59afa0fa0 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 22 Mar 2019 09:22:20 -0700 Subject: [PATCH 143/212] Add support for reader. (#1049) * Add support for reader. * handle multiple call to reader.Stop and test for producer while reader is stopped. * fix typo. * fix review comment and refactor reader to metricexport package. * change Reader to IntervalReader and add Reader. Also include ctx in ExportMetric api. * modify export interface to return error and make it plural. * remove option and provide Start function. * move exporter.go to metricexport package. * added option for reader. * make constants private. --- metric/metricexport/export.go | 26 +++ metric/metricexport/reader.go | 187 +++++++++++++++++++++ metric/metricexport/reader_test.go | 260 +++++++++++++++++++++++++++++ 3 files changed, 473 insertions(+) create mode 100644 metric/metricexport/export.go create mode 100644 metric/metricexport/reader.go create mode 100644 metric/metricexport/reader_test.go diff --git a/metric/metricexport/export.go b/metric/metricexport/export.go new file mode 100644 index 000000000..23f4a864a --- /dev/null +++ b/metric/metricexport/export.go @@ -0,0 +1,26 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metricexport + +import ( + "context" + + "go.opencensus.io/metric/metricdata" +) + +// Exporter is an interface that exporters implement to export the metric data. +type Exporter interface { + ExportMetrics(ctx context.Context, data []*metricdata.Metric) error +} diff --git a/metric/metricexport/reader.go b/metric/metricexport/reader.go new file mode 100644 index 000000000..44ace7008 --- /dev/null +++ b/metric/metricexport/reader.go @@ -0,0 +1,187 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package metricexport + +import ( + "fmt" + "time" + + "context" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/trace" + "sync" +) + +var ( + defaultSampler = trace.ProbabilitySampler(0.0001) + errReportingIntervalTooLow = fmt.Errorf("reporting interval less than %d", minimumReportingDuration) + errAlreadyStarted = fmt.Errorf("already started") + errIntervalReaderNil = fmt.Errorf("interval reader is nil") + errExporterNil = fmt.Errorf("exporter is nil") + errReaderNil = fmt.Errorf("reader is nil") +) + +const ( + defaultReportingDuration = 60 * time.Second + minimumReportingDuration = 1 * time.Second + defaultSpanName = "ExportMetrics" +) + +// ReaderOptions contains options pertaining to metrics reader. +type ReaderOptions struct { + // SpanName is the name used for span created to export metrics. + SpanName string +} + +// Reader reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. +type Reader struct { + sampler trace.Sampler + + spanName string +} + +// IntervalReader periodically reads metrics from all producers registered +// with producer manager and exports those metrics using provided +// exporter. Call Reader.Stop() to stop the reader. +type IntervalReader struct { + // ReportingInterval it the time duration between two consecutive + // metrics reporting. defaultReportingDuration is used if it is not set. + // It cannot be set lower than minimumReportingDuration. + ReportingInterval time.Duration + + exporter Exporter + timer *time.Ticker + quit, done chan bool + mu sync.RWMutex + reader *Reader +} + +// ReaderOption apply changes to ReaderOptions. +type ReaderOption func(*ReaderOptions) + +// WithSpanName makes new reader to use given span name when exporting metrics. +func WithSpanName(spanName string) ReaderOption { + return func(o *ReaderOptions) { + o.SpanName = spanName + } +} + +// NewReader returns a reader configured with specified options. +func NewReader(o ...ReaderOption) *Reader { + var opts ReaderOptions + for _, op := range o { + op(&opts) + } + reader := &Reader{defaultSampler, defaultSpanName} + if opts.SpanName != "" { + reader.spanName = opts.SpanName + } + return reader +} + +// NewIntervalReader creates a reader. Once started it periodically +// reads metrics from all producers and exports them using provided exporter. +func NewIntervalReader(reader *Reader, exporter Exporter) (*IntervalReader, error) { + if exporter == nil { + return nil, errExporterNil + } + if reader == nil { + return nil, errReaderNil + } + + r := &IntervalReader{ + exporter: exporter, + reader: reader, + } + return r, nil +} + +// Start starts the IntervalReader which periodically reads metrics from all +// producers registered with global producer manager. If the reporting interval +// is not set prior to calling this function then default reporting interval +// is used. +func (ir *IntervalReader) Start() error { + if ir == nil { + return errIntervalReaderNil + } + ir.mu.Lock() + defer ir.mu.Unlock() + var reportingInterval = defaultReportingDuration + if ir.ReportingInterval != 0 { + if ir.ReportingInterval < minimumReportingDuration { + return errReportingIntervalTooLow + } + reportingInterval = ir.ReportingInterval + } + + if ir.done != nil { + return errAlreadyStarted + } + ir.timer = time.NewTicker(reportingInterval) + ir.quit = make(chan bool) + ir.done = make(chan bool) + + go ir.startInternal() + return nil +} + +func (ir *IntervalReader) startInternal() { + for { + select { + case <-ir.timer.C: + ir.reader.ReadAndExport(ir.exporter) + case <-ir.quit: + ir.timer.Stop() + ir.done <- true + return + } + } +} + +// Stop stops the reader from reading and exporting metrics. +// Additional call to Stop are no-ops. +func (ir *IntervalReader) Stop() { + if ir == nil { + return + } + ir.mu.Lock() + defer ir.mu.Unlock() + if ir.quit == nil { + return + } + ir.quit <- true + <-ir.done + close(ir.quit) + close(ir.done) + ir.quit = nil +} + +// ReadAndExport reads metrics from all producer registered with +// producer manager and then exports them using provided exporter. +func (r *Reader) ReadAndExport(exporter Exporter) { + ctx, span := trace.StartSpan(context.Background(), r.spanName, trace.WithSampler(r.sampler)) + defer span.End() + producers := metricproducer.GlobalManager().GetAll() + data := []*metricdata.Metric{} + for _, producer := range producers { + data = append(data, producer.Read()...) + } + // TODO: [rghetia] add metrics for errors. + exporter.ExportMetrics(ctx, data) +} diff --git a/metric/metricexport/reader_test.go b/metric/metricexport/reader_test.go new file mode 100644 index 000000000..a043530e8 --- /dev/null +++ b/metric/metricexport/reader_test.go @@ -0,0 +1,260 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package metricexport + +import ( + "context" + "sync" + "testing" + "time" + + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" +) + +var ( + ir1 *IntervalReader + ir2 *IntervalReader + reader1 = NewReader(WithSpanName("test-export-span")) + exporter1 = &metricExporter{} + exporter2 = &metricExporter{} + gaugeEntry *metric.Int64GaugeEntry + duration1 = time.Duration(1000 * time.Millisecond) + duration2 = time.Duration(2000 * time.Millisecond) +) + +type metricExporter struct { + sync.Mutex + metrics []*metricdata.Metric +} + +func (e *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + e.Lock() + defer e.Unlock() + + e.metrics = append(e.metrics, metrics...) + return nil +} + +func init() { + r := metric.NewRegistry() + metricproducer.GlobalManager().AddProducer(r) + g, _ := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") + gaugeEntry, _ = g.GetEntry(metricdata.NewLabelValue("foo")) +} + +func TestNewReaderWitDefaultOptions(t *testing.T) { + r := NewReader() + + if r.spanName != defaultSpanName { + t.Errorf("span name: got %v, want %v\n", r.spanName, defaultSpanName) + } +} + +func TestNewReaderWitSpanName(t *testing.T) { + spanName := "test-span" + r := NewReader(WithSpanName(spanName)) + + if r.spanName != spanName { + t.Errorf("span name: got %+v, want %v\n", r.spanName, spanName) + } +} + +func TestNewReader(t *testing.T) { + r := NewReader() + + gaugeEntry.Add(1) + + r.ReadAndExport(exporter1) + + checkExportedCount(exporter1, 1, t) + checkExportedMetricDesc(exporter1, "active_request", t) + resetExporter(exporter1) +} + +func TestNewIntervalReader(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + + gaugeEntry.Add(1) + + time.Sleep(1500 * time.Millisecond) + checkExportedCount(exporter1, 1, t) + checkExportedMetricDesc(exporter1, "active_request", t) + ir1.Stop() + resetExporter(exporter1) +} + +func TestManualReadForIntervalReader(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + + gaugeEntry.Set(1) + reader1.ReadAndExport(exporter1) + gaugeEntry.Set(4) + + time.Sleep(1500 * time.Millisecond) + + checkExportedCount(exporter1, 2, t) + checkExportedValues(exporter1, []int64{1, 4}, t) // one for manual read other for time based. + checkExportedMetricDesc(exporter1, "active_request", t) + ir1.Stop() + resetExporter(exporter1) +} + +func TestProducerWithIntervalReaderStop(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + ir1.Stop() + + gaugeEntry.Add(1) + + time.Sleep(1500 * time.Millisecond) + + checkExportedCount(exporter1, 0, t) + checkExportedMetricDesc(exporter1, "active_request", t) + resetExporter(exporter1) +} + +func TestProducerWithMultipleIntervalReaders(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + ir2 = createAndStart(exporter2, duration2, t) + + gaugeEntry.Add(1) + + time.Sleep(2500 * time.Millisecond) + + checkExportedCount(exporter1, 2, t) + checkExportedMetricDesc(exporter1, "active_request", t) + checkExportedCount(exporter2, 1, t) + checkExportedMetricDesc(exporter2, "active_request", t) + ir1.Stop() + ir2.Stop() + resetExporter(exporter1) + resetExporter(exporter1) +} + +func TestIntervalReaderMultipleStop(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + stop := make(chan bool, 1) + go func() { + ir1.Stop() + ir1.Stop() + stop <- true + }() + + select { + case _ = <-stop: + case <-time.After(1 * time.Second): + t.Fatalf("ir1 stop got blocked") + } +} + +func TestIntervalReaderMultipleStart(t *testing.T) { + ir1 = createAndStart(exporter1, duration1, t) + ir1.Start() + + gaugeEntry.Add(1) + + time.Sleep(1500 * time.Millisecond) + + checkExportedCount(exporter1, 1, t) + checkExportedMetricDesc(exporter1, "active_request", t) + ir1.Stop() + resetExporter(exporter1) +} + +func TestNewIntervalReaderWithNilReader(t *testing.T) { + _, err := NewIntervalReader(nil, exporter1) + if err == nil { + t.Fatalf("expected error but got nil\n") + } +} + +func TestNewIntervalReaderWithNilExporter(t *testing.T) { + _, err := NewIntervalReader(reader1, nil) + if err == nil { + t.Fatalf("expected error but got nil\n") + } +} + +func TestNewIntervalReaderStartWithInvalidInterval(t *testing.T) { + ir, err := NewIntervalReader(reader1, exporter1) + ir.ReportingInterval = time.Duration(500 * time.Millisecond) + err = ir.Start() + if err == nil { + t.Fatalf("expected error but got nil\n") + } +} + +func checkExportedCount(exporter *metricExporter, wantCount int, t *testing.T) { + exporter.Lock() + defer exporter.Unlock() + gotCount := len(exporter.metrics) + if gotCount != wantCount { + t.Fatalf("exported metric count: got %d, want %d\n", gotCount, wantCount) + } +} + +func checkExportedValues(exporter *metricExporter, wantValues []int64, t *testing.T) { + exporter.Lock() + defer exporter.Unlock() + gotCount := len(exporter.metrics) + wantCount := len(wantValues) + if gotCount != wantCount { + t.Errorf("exported metric count: got %d, want %d\n", gotCount, wantCount) + return + } + for i, wantValue := range wantValues { + var gotValue int64 + switch v := exporter.metrics[i].TimeSeries[0].Points[0].Value.(type) { + case int64: + gotValue = v + default: + t.Errorf("expected float64 value but found other %T", exporter.metrics[i].TimeSeries[0].Points[0].Value) + } + if gotValue != wantValue { + t.Errorf("values idx %d, got: %v, want %v", i, gotValue, wantValue) + } + } +} + +func checkExportedMetricDesc(exporter *metricExporter, wantMdName string, t *testing.T) { + exporter.Lock() + defer exporter.Unlock() + for _, metric := range exporter.metrics { + gotMdName := metric.Descriptor.Name + if gotMdName != wantMdName { + t.Errorf("got %s, want %s\n", gotMdName, wantMdName) + } + } + exporter.metrics = nil +} + +func resetExporter(exporter *metricExporter) { + exporter.Lock() + defer exporter.Unlock() + exporter.metrics = nil +} + +// createAndStart stops the current processors and creates a new one. +func createAndStart(exporter *metricExporter, d time.Duration, t *testing.T) *IntervalReader { + ir, _ := NewIntervalReader(reader1, exporter) + ir.ReportingInterval = d + err := ir.Start() + if err != nil { + t.Fatalf("error creating reader %v\n", err) + } + return ir +} From ec71c97329c63957a27562a5ae6833305e9c3519 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 27 Mar 2019 07:16:50 -0700 Subject: [PATCH 144/212] stats worker as metric producer. (#1078) * stats worker as metric producer. * fixed the conversion based on measure type in addition to aggregation type. fixed test. Specifically removed json comparision. * fixed review comments related to count float64. * add check for metricType in toPoint func for distribution Also replaced reflect.DeepEqual with cmp.Equal --- stats/view/aggregation_data.go | 59 +++++ stats/view/view.go | 13 +- stats/view/view_to_metric.go | 131 +++++++++ stats/view/view_to_metric_test.go | 424 ++++++++++++++++++++++++++++++ stats/view/worker.go | 55 ++++ stats/view/worker_commands.go | 2 +- 6 files changed, 678 insertions(+), 6 deletions(-) create mode 100644 stats/view/view_to_metric.go create mode 100644 stats/view/view_to_metric_test.go diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index 8774a07f4..d500e67f7 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -30,6 +30,7 @@ type AggregationData interface { addSample(v float64, attachments map[string]interface{}, t time.Time) clone() AggregationData equal(other AggregationData) bool + toPoint(t metricdata.Type, time time.Time) metricdata.Point } const epsilon = 1e-9 @@ -61,6 +62,15 @@ func (a *CountData) equal(other AggregationData) bool { return a.Value == a2.Value } +func (a *CountData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // SumData is the aggregated data for the Sum aggregation. // A sum aggregation processes data and sums up the recordings. // @@ -87,6 +97,17 @@ func (a *SumData) equal(other AggregationData) bool { return math.Pow(a.Value-a2.Value, 2) < epsilon } +func (a *SumData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeInt64: + return metricdata.NewInt64Point(t, int64(a.Value)) + case metricdata.TypeCumulativeFloat64: + return metricdata.NewFloat64Point(t, a.Value) + default: + panic("unsupported metricdata.Type") + } +} + // DistributionData is the aggregated data for the // Distribution aggregation. // @@ -208,6 +229,33 @@ func (a *DistributionData) equal(other AggregationData) bool { return a.Count == a2.Count && a.Min == a2.Min && a.Max == a2.Max && math.Pow(a.Mean-a2.Mean, 2) < epsilon && math.Pow(a.variance()-a2.variance(), 2) < epsilon } +func (a *DistributionData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeCumulativeDistribution: + buckets := []metricdata.Bucket{} + for i := 0; i < len(a.CountPerBucket); i++ { + buckets = append(buckets, metricdata.Bucket{ + Count: a.CountPerBucket[i], + Exemplar: a.ExemplarsPerBucket[i], + }) + } + bucketOptions := &metricdata.BucketOptions{Bounds: a.bounds} + + val := &metricdata.Distribution{ + Count: a.Count, + Sum: a.Sum(), + SumOfSquaredDeviation: a.SumOfSquaredDev, + BucketOptions: bucketOptions, + Buckets: buckets, + } + return metricdata.NewDistributionPoint(t, val) + + default: + // TODO: [rghetia] when we have a use case for TypeGaugeDistribution. + panic("unsupported metricdata.Type") + } +} + // LastValueData returns the last value recorded for LastValue aggregation. type LastValueData struct { Value float64 @@ -232,3 +280,14 @@ func (l *LastValueData) equal(other AggregationData) bool { } return l.Value == a2.Value } + +func (l *LastValueData) toPoint(metricType metricdata.Type, t time.Time) metricdata.Point { + switch metricType { + case metricdata.TypeGaugeInt64: + return metricdata.NewInt64Point(t, int64(l.Value)) + case metricdata.TypeGaugeFloat64: + return metricdata.NewFloat64Point(t, l.Value) + default: + panic("unsupported metricdata.Type") + } +} diff --git a/stats/view/view.go b/stats/view/view.go index 95f01ad32..37f88e1d9 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -24,6 +24,7 @@ import ( "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -116,15 +117,17 @@ func dropZeroBounds(bounds ...float64) []float64 { // viewInternal is the internal representation of a View. type viewInternal struct { - view *View // view is the canonicalized View definition associated with this view. - subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access - collector *collector + view *View // view is the canonicalized View definition associated with this view. + subscribed uint32 // 1 if someone is subscribed and data need to be exported, use atomic to access + collector *collector + metricDescriptor *metricdata.Descriptor } func newViewInternal(v *View) (*viewInternal, error) { return &viewInternal{ - view: v, - collector: &collector{make(map[string]AggregationData), v.Aggregation}, + view: v, + collector: &collector{make(map[string]AggregationData), v.Aggregation}, + metricDescriptor: viewToMetricDescriptor(v), }, nil } diff --git a/stats/view/view_to_metric.go b/stats/view/view_to_metric.go new file mode 100644 index 000000000..284299faf --- /dev/null +++ b/stats/view/view_to_metric.go @@ -0,0 +1,131 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" +) + +func getUnit(unit string) metricdata.Unit { + switch unit { + case "1": + return metricdata.UnitDimensionless + case "ms": + return metricdata.UnitMilliseconds + case "By": + return metricdata.UnitBytes + } + return metricdata.UnitDimensionless +} + +func getType(v *View) metricdata.Type { + m := v.Measure + agg := v.Aggregation + + switch agg.Type { + case AggTypeSum: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeDistribution: + return metricdata.TypeCumulativeDistribution + case AggTypeLastValue: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeGaugeInt64 + case *stats.Float64Measure: + return metricdata.TypeGaugeFloat64 + default: + panic("unexpected measure type") + } + case AggTypeCount: + switch m.(type) { + case *stats.Int64Measure: + return metricdata.TypeCumulativeInt64 + case *stats.Float64Measure: + return metricdata.TypeCumulativeInt64 + default: + panic("unexpected measure type") + } + default: + panic("unexpected aggregation type") + } +} + +func getLableKeys(v *View) []string { + labelKeys := []string{} + for _, k := range v.TagKeys { + labelKeys = append(labelKeys, k.Name()) + } + return labelKeys +} + +func viewToMetricDescriptor(v *View) *metricdata.Descriptor { + return &metricdata.Descriptor{ + Name: v.Name, + Description: v.Description, + Unit: getUnit(v.Measure.Unit()), + Type: getType(v), + LabelKeys: getLableKeys(v), + } +} + +func toLabelValues(row *Row) []metricdata.LabelValue { + labelValues := []metricdata.LabelValue{} + for _, tag := range row.Tags { + labelValues = append(labelValues, metricdata.NewLabelValue(tag.Value)) + } + return labelValues +} + +func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { + return &metricdata.TimeSeries{ + Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, + LabelValues: toLabelValues(row), + StartTime: startTime, + } +} + +func viewToMetric(v *viewInternal, now time.Time, startTime time.Time) *metricdata.Metric { + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } + + rows := v.collectedRows() + if len(rows) == 0 { + return nil + } + + ts := []*metricdata.TimeSeries{} + for _, row := range rows { + ts = append(ts, rowToTimeseries(v, row, now, startTime)) + } + + m := &metricdata.Metric{ + Descriptor: *v.metricDescriptor, + TimeSeries: ts, + } + return m +} diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go new file mode 100644 index 000000000..d9a5a3de0 --- /dev/null +++ b/stats/view/view_to_metric_test.go @@ -0,0 +1,424 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package view + +import ( + "context" + "testing" + "time" + + "encoding/json" + "github.com/google/go-cmp/cmp" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/tag" +) + +type recordValWithTag struct { + tags []tag.Tag + value interface{} +} +type testToMetrics struct { + vi *viewInternal + view *View + recordValue []recordValWithTag + wantMetric *metricdata.Metric +} + +var ( + // tag objects. + tk1 tag.Key + tk2 tag.Key + tk3 tag.Key + tk1v1 tag.Tag + tk1v2 tag.Tag + tk2v2 tag.Tag + tk3v3 tag.Tag + tags []tag.Tag + labelValues []metricdata.LabelValue + labelKeys []string + + recordsInt64 []recordValWithTag + recordsFloat64 []recordValWithTag + + // distribution objects. + aggDist *Aggregation + aggCnt *Aggregation + aggS *Aggregation + aggL *Aggregation + buckOpt *metricdata.BucketOptions + + // exemplar objects. + attachments metricdata.Attachments + + // views and descriptors + viewTypeFloat64Distribution *View + viewTypeInt64Distribution *View + viewTypeInt64Count *View + viewTypeFloat64Count *View + viewTypeFloat64Sum *View + viewTypeInt64Sum *View + viewTypeFloat64LastValue *View + viewTypeInt64LastValue *View + mdTypeFloat64CumulativeDistribution metricdata.Descriptor + mdTypeInt64CumulativeDistribution metricdata.Descriptor + mdTypeInt64CumulativeCount metricdata.Descriptor + mdTypeFloat64CumulativeCount metricdata.Descriptor + mdTypeInt64CumulativeSum metricdata.Descriptor + mdTypeFloat64CumulativeSum metricdata.Descriptor + mdTypeInt64CumulativeLastValue metricdata.Descriptor + mdTypeFloat64CumulativeLastValue metricdata.Descriptor +) + +const ( + nameInt64DistM1 = "viewToMetricTest_Int64_Distribution/m1" + nameFloat64DistM1 = "viewToMetricTest_Float64_Distribution/m1" + nameInt64CountM1 = "viewToMetricTest_Int64_Count/m1" + nameFloat64CountM1 = "viewToMetricTest_Float64_Count/m1" + nameInt64SumM1 = "viewToMetricTest_Int64_Sum/m1" + nameFloat64SumM1 = "viewToMetricTest_Float64_Sum/m1" + nameInt64LastValueM1 = "viewToMetricTest_Int64_LastValue/m1" + nameFloat64LastValueM1 = "viewToMetricTest_Float64_LastValue/m1" + v1 = "v1" + v2 = "v2" + v3 = "v3" +) + +func init() { + initTags() + initAgg() + initViews() + initMetricDescriptors() + +} + +func initTags() { + tk1, _ = tag.NewKey("k1") + tk2, _ = tag.NewKey("k2") + tk3, _ = tag.NewKey("k3") + tk1v1 = tag.Tag{Key: tk1, Value: v1} + tk1v2 = tag.Tag{Key: tk1, Value: v2} + tk2v2 = tag.Tag{Key: tk2, Value: v2} + tk3v3 = tag.Tag{Key: tk3, Value: v3} + + tags = []tag.Tag{tk1v1, tk2v2} + labelValues = []metricdata.LabelValue{ + {Value: v1, Present: true}, + {Value: v2, Present: true}, + } + labelKeys = []string{tk1.Name(), tk2.Name()} + + recordsInt64 = []recordValWithTag{ + {tags: tags, value: int64(2)}, + {tags: tags, value: int64(4)}, + } + recordsFloat64 = []recordValWithTag{ + {tags: tags, value: float64(1.5)}, + {tags: tags, value: float64(5.4)}, + } +} + +func initAgg() { + aggDist = Distribution(2.0) + aggCnt = Count() + aggS = Sum() + aggL = LastValue() + buckOpt = &metricdata.BucketOptions{Bounds: []float64{2.0}} +} + +func initViews() { + // View objects + viewTypeInt64Distribution = &View{ + Name: nameInt64DistM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Int64(nameInt64DistM1, "", stats.UnitDimensionless), + Aggregation: aggDist, + } + viewTypeFloat64Distribution = &View{ + Name: nameFloat64DistM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Float64(nameFloat64DistM1, "", stats.UnitDimensionless), + Aggregation: aggDist, + } + viewTypeInt64Count = &View{ + Name: nameInt64CountM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Int64(nameInt64CountM1, "", stats.UnitDimensionless), + Aggregation: aggCnt, + } + viewTypeFloat64Count = &View{ + Name: nameFloat64CountM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Float64(nameFloat64CountM1, "", stats.UnitDimensionless), + Aggregation: aggCnt, + } + viewTypeInt64Sum = &View{ + Name: nameInt64SumM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Int64(nameInt64SumM1, "", stats.UnitBytes), + Aggregation: aggS, + } + viewTypeFloat64Sum = &View{ + Name: nameFloat64SumM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Float64(nameFloat64SumM1, "", stats.UnitMilliseconds), + Aggregation: aggS, + } + viewTypeInt64LastValue = &View{ + Name: nameInt64LastValueM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Int64(nameInt64LastValueM1, "", stats.UnitDimensionless), + Aggregation: aggL, + } + viewTypeFloat64LastValue = &View{ + Name: nameFloat64LastValueM1, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Float64(nameFloat64LastValueM1, "", stats.UnitDimensionless), + Aggregation: aggL, + } +} + +func initMetricDescriptors() { + // Metric objects + mdTypeFloat64CumulativeDistribution = metricdata.Descriptor{ + Name: nameFloat64DistM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, LabelKeys: labelKeys, + } + mdTypeInt64CumulativeDistribution = metricdata.Descriptor{ + Name: nameInt64DistM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, LabelKeys: labelKeys, + } + mdTypeInt64CumulativeCount = metricdata.Descriptor{ + Name: nameInt64CountM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, + } + mdTypeFloat64CumulativeCount = metricdata.Descriptor{ + Name: nameFloat64CountM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, + } + mdTypeInt64CumulativeSum = metricdata.Descriptor{ + Name: nameInt64SumM1, Description: "", Unit: metricdata.UnitBytes, + Type: metricdata.TypeCumulativeInt64, LabelKeys: labelKeys, + } + mdTypeFloat64CumulativeSum = metricdata.Descriptor{ + Name: nameFloat64SumM1, Description: "", Unit: metricdata.UnitMilliseconds, + Type: metricdata.TypeCumulativeFloat64, LabelKeys: labelKeys, + } + mdTypeInt64CumulativeLastValue = metricdata.Descriptor{ + Name: nameInt64LastValueM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeGaugeInt64, LabelKeys: labelKeys, + } + mdTypeFloat64CumulativeLastValue = metricdata.Descriptor{ + Name: nameFloat64LastValueM1, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeGaugeFloat64, LabelKeys: labelKeys, + } +} + +func Test_ViewToMetric(t *testing.T) { + startTime := time.Now().Add(-time.Duration(60 * time.Second)) + now := time.Now() + tests := []*testToMetrics{ + { + view: viewTypeInt64Distribution, + recordValue: recordsInt64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeInt64CumulativeDistribution, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + {Value: &metricdata.Distribution{ + Count: 2, + Sum: 6.0, + SumOfSquaredDeviation: 2, + BucketOptions: buckOpt, + Buckets: []metricdata.Bucket{ + {Count: 0, Exemplar: nil}, + {Count: 2, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeFloat64Distribution, + recordValue: recordsFloat64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeFloat64CumulativeDistribution, + TimeSeries: []*metricdata.TimeSeries{ + { + Points: []metricdata.Point{ + { + Value: &metricdata.Distribution{ + Count: 2, + Sum: 6.9, + SumOfSquaredDeviation: 7.605000000000001, + BucketOptions: buckOpt, + Buckets: []metricdata.Bucket{ + {Count: 1, Exemplar: nil}, // TODO: [rghetia] add exemplar test. + {Count: 1, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeInt64Count, + recordValue: recordsInt64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeInt64CumulativeCount, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewInt64Point(now, 2), + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeFloat64Count, + recordValue: recordsFloat64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeFloat64CumulativeCount, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewInt64Point(now, 2), + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeInt64Sum, + recordValue: recordsInt64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeInt64CumulativeSum, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewInt64Point(now, 6), + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeFloat64Sum, + recordValue: recordsFloat64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeFloat64CumulativeSum, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewFloat64Point(now, 6.9), + }, + LabelValues: labelValues, + StartTime: startTime, + }, + }, + }, + }, + { + view: viewTypeInt64LastValue, + recordValue: recordsInt64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeInt64CumulativeLastValue, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewInt64Point(now, 4), + }, + LabelValues: labelValues, + StartTime: time.Time{}, + }, + }, + }, + }, + { + view: viewTypeFloat64LastValue, + recordValue: recordsFloat64, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeFloat64CumulativeLastValue, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewFloat64Point(now, 5.4), + }, + LabelValues: labelValues, + StartTime: time.Time{}, + }, + }, + }, + }, + } + + wantMetrics := []*metricdata.Metric{} + for _, tc := range tests { + tc.vi, _ = defaultWorker.tryRegisterView(tc.view) + tc.vi.clearRows() + tc.vi.subscribe() + wantMetrics = append(wantMetrics, tc.wantMetric) + } + + for i, tc := range tests { + for _, r := range tc.recordValue { + mods := []tag.Mutator{} + for _, tg := range r.tags { + mods = append(mods, tag.Insert(tg.Key, tg.Value)) + } + ctx, err := tag.New(context.Background(), mods...) + if err != nil { + t.Errorf("%v: New = %v", tc.view.Name, err) + } + var v float64 + switch i := r.value.(type) { + case float64: + v = float64(i) + case int64: + v = float64(i) + default: + t.Errorf("unexpected value type %v", r.tags) + } + tc.vi.addSample(tag.FromContext(ctx), v, nil, now) + } + + gotMetric := viewToMetric(tc.vi, now, startTime) + if !cmp.Equal(gotMetric, tc.wantMetric) { + // JSON format is strictly for checking the content when test fails. Do not use JSON + // format to determine if the two values are same as it doesn't differentiate between + // int64(2) and float64(2.0) + t.Errorf("#%d: Unmatched \nGot:\n\t%v\nWant:\n\t%v\nGot Serialized:%s\nWant Serialized:%s\n", + i, gotMetric, tc.wantMetric, serializeAsJSON(gotMetric), serializeAsJSON(tc.wantMetric)) + } + } +} + +func serializeAsJSON(v interface{}) string { + blob, _ := json.MarshalIndent(v, "", " ") + return string(blob) +} diff --git a/stats/view/worker.go b/stats/view/worker.go index d29dbae92..37279b39e 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -17,8 +17,11 @@ package view import ( "fmt" + "sync" "time" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" "go.opencensus.io/stats" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" @@ -43,6 +46,7 @@ type worker struct { timer *time.Ticker c chan command quit, done chan bool + mu sync.RWMutex } var defaultWorker *worker @@ -143,6 +147,9 @@ func newWorker() *worker { } func (w *worker) start() { + prodMgr := metricproducer.GlobalManager() + prodMgr.AddProducer(w) + for { select { case cmd := <-w.c: @@ -159,6 +166,9 @@ func (w *worker) start() { } func (w *worker) stop() { + prodMgr := metricproducer.GlobalManager() + prodMgr.DeleteProducer(w) + w.quit <- true <-w.done } @@ -176,6 +186,8 @@ func (w *worker) getMeasureRef(name string) *measureRef { } func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { + w.mu.Lock() + defer w.mu.Unlock() vi, err := newViewInternal(v) if err != nil { return nil, err @@ -195,6 +207,12 @@ func (w *worker) tryRegisterView(v *View) (*viewInternal, error) { return vi, nil } +func (w *worker) unregisterView(viewName string) { + w.mu.Lock() + defer w.mu.Unlock() + delete(w.views, viewName) +} + func (w *worker) reportView(v *viewInternal, now time.Time) { if !v.isSubscribed() { return @@ -222,3 +240,40 @@ func (w *worker) reportUsage(now time.Time) { w.reportView(v, now) } } + +func (w *worker) toMetric(v *viewInternal, now time.Time) *metricdata.Metric { + if !v.isSubscribed() { + return nil + } + + _, ok := w.startTimes[v] + if !ok { + w.startTimes[v] = now + } + + var startTime time.Time + if v.metricDescriptor.Type == metricdata.TypeGaugeInt64 || + v.metricDescriptor.Type == metricdata.TypeGaugeFloat64 { + startTime = time.Time{} + } else { + startTime = w.startTimes[v] + } + + return viewToMetric(v, now, startTime) +} + +// Read reads all view data and returns them as metrics. +// It is typically invoked by metric reader to export stats in metric format. +func (w *worker) Read() []*metricdata.Metric { + w.mu.Lock() + defer w.mu.Unlock() + now := time.Now() + metrics := make([]*metricdata.Metric, 0, len(w.views)) + for _, v := range w.views { + metric := w.toMetric(v, now) + if metric != nil { + metrics = append(metrics, metric) + } + } + return metrics +} diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index e27f29401..ba6203a50 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -103,7 +103,7 @@ func (cmd *unregisterFromViewReq) handleCommand(w *worker) { // The collected data can be cleared. vi.clearRows() } - delete(w.views, name) + w.unregisterView(name) } cmd.done <- struct{}{} } From 41e54b832491efe97a2aafe696f6e7d812f136bb Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 27 Mar 2019 10:06:59 -0700 Subject: [PATCH 145/212] metric type is not set for gauges. (#1082) --- metric/gauge_test.go | 21 +++++++++++++++++++++ metric/registry.go | 16 ++++++++++++++++ 2 files changed, 37 insertions(+) diff --git a/metric/gauge_test.go b/metric/gauge_test.go index 905e7f296..b451f5714 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -41,6 +41,7 @@ func TestGauge(t *testing.T) { Descriptor: metricdata.Descriptor{ Name: "TestGauge", LabelKeys: []string{"k1", "k2"}, + Type: metricdata.TypeGaugeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { @@ -79,6 +80,26 @@ func TestGauge(t *testing.T) { } } +func TestGaugeMetricDescriptor(t *testing.T) { + unit := metricdata.UnitDimensionless + r := NewRegistry() + + gf, _ := r.AddFloat64Gauge("float64_gauge", "", unit) + compareType(gf.g.desc.Type, metricdata.TypeGaugeFloat64, t) + gi, _ := r.AddInt64Gauge("int64_gauge", "", unit) + compareType(gi.g.desc.Type, metricdata.TypeGaugeInt64, t) + dgf, _ := r.AddFloat64DerivedGauge("derived_float64_gauge", "", unit) + compareType(dgf.g.desc.Type, metricdata.TypeGaugeFloat64, t) + dgi, _ := r.AddInt64DerivedGauge("derived_int64_gauge", "", unit) + compareType(dgi.g.desc.Type, metricdata.TypeGaugeInt64, t) +} + +func compareType(got, want metricdata.Type, t *testing.T) { + if got != want { + t.Errorf("metricdata type: got %v, want %v\n", got, want) + } +} + func TestFloat64Entry_Add(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) diff --git a/metric/registry.go b/metric/registry.go index 19cc8874a..6181f45f3 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -102,6 +102,21 @@ func (r *Registry) AddFloat64DerivedGauge(name, description string, unit metricd return f, nil } +func gTypeToMetricType(g *gauge) metricdata.Type { + switch g.gType { + case derivedGaugeFloat64: + return metricdata.TypeGaugeFloat64 + case derivedGaugeInt64: + return metricdata.TypeGaugeInt64 + case gaugeFloat64: + return metricdata.TypeGaugeFloat64 + case gaugeInt64: + return metricdata.TypeGaugeInt64 + default: + panic("unsupported gauge type") + } +} + func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) (*gauge, error) { val, ok := r.gauges.Load(name) if ok { @@ -117,6 +132,7 @@ func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, descript Description: description, Unit: unit, LabelKeys: labelKeys, + Type: gTypeToMetricType(g), } r.gauges.Store(name, g) return g, nil From 948b0cbb528c09f0595e797ff72992d8a9f46be0 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 1 Apr 2019 10:08:52 -0700 Subject: [PATCH 146/212] Refactor gauge api with options. (#1086) * Refactor gauge api with options. * fixed review comments. --- metric/examples_test.go | 5 +- metric/gauge_test.go | 108 ++++++++++++++++++++++------- metric/metricexport/reader_test.go | 5 +- metric/registry.go | 66 ++++++++++++++---- 4 files changed, 145 insertions(+), 39 deletions(-) diff --git a/metric/examples_test.go b/metric/examples_test.go index cc39571ba..c4b2e0aa9 100644 --- a/metric/examples_test.go +++ b/metric/examples_test.go @@ -25,7 +25,10 @@ func ExampleRegistry_AddInt64Gauge() { r := metric.NewRegistry() // TODO: allow exporting from a registry - g, _ := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") + g, _ := r.AddInt64Gauge("active_request", + metric.WithDescription("Number of active requests, per method."), + metric.WithUnit(metricdata.UnitDimensionless), + metric.WithLabelKeys("method")) http.HandleFunc("/", func(writer http.ResponseWriter, request *http.Request) { e, _ := g.GetEntry(metricdata.NewLabelValue(request.Method)) diff --git a/metric/gauge_test.go b/metric/gauge_test.go index b451f5714..8ed358b1e 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -26,7 +26,9 @@ import ( func TestGauge(t *testing.T) { r := NewRegistry() - f, _ := r.AddFloat64Gauge("TestGauge", "", "", "k1", "k2") + + f, _ := r.AddFloat64Gauge("TestGauge", + WithLabelKeys("k1", "k2")) e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) e.Set(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) @@ -81,16 +83,15 @@ func TestGauge(t *testing.T) { } func TestGaugeMetricDescriptor(t *testing.T) { - unit := metricdata.UnitDimensionless r := NewRegistry() - gf, _ := r.AddFloat64Gauge("float64_gauge", "", unit) + gf, _ := r.AddFloat64Gauge("float64_gauge") compareType(gf.g.desc.Type, metricdata.TypeGaugeFloat64, t) - gi, _ := r.AddInt64Gauge("int64_gauge", "", unit) + gi, _ := r.AddInt64Gauge("int64_gauge") compareType(gi.g.desc.Type, metricdata.TypeGaugeInt64, t) - dgf, _ := r.AddFloat64DerivedGauge("derived_float64_gauge", "", unit) + dgf, _ := r.AddFloat64DerivedGauge("derived_float64_gauge") compareType(dgf.g.desc.Type, metricdata.TypeGaugeFloat64, t) - dgi, _ := r.AddInt64DerivedGauge("derived_int64_gauge", "", unit) + dgi, _ := r.AddInt64DerivedGauge("derived_int64_gauge") compareType(dgi.g.desc.Type, metricdata.TypeGaugeInt64, t) } @@ -100,9 +101,68 @@ func compareType(got, want metricdata.Type, t *testing.T) { } } +func TestGaugeMetricOptionDesc(t *testing.T) { + r := NewRegistry() + name := "testOptDesc" + gf, _ := r.AddFloat64Gauge(name, WithDescription("test")) + want := metricdata.Descriptor{ + Name: name, + Description: "test", + Type: metricdata.TypeGaugeFloat64, + } + got := gf.g.desc + if !cmp.Equal(got, want) { + t.Errorf("metric option description: got %v, want %v\n", got, want) + } +} + +func TestGaugeMetricOptionUnit(t *testing.T) { + r := NewRegistry() + name := "testOptUnit" + gf, _ := r.AddFloat64Gauge(name, WithUnit(metricdata.UnitMilliseconds)) + want := metricdata.Descriptor{ + Name: name, + Unit: metricdata.UnitMilliseconds, + Type: metricdata.TypeGaugeFloat64, + } + got := gf.g.desc + if !cmp.Equal(got, want) { + t.Errorf("metric descriptor: got %v, want %v\n", got, want) + } +} + +func TestGaugeMetricOptionLabelKeys(t *testing.T) { + r := NewRegistry() + name := "testOptUnit" + gf, _ := r.AddFloat64Gauge(name, WithLabelKeys("k1", "k3")) + want := metricdata.Descriptor{ + Name: name, + LabelKeys: []string{"k1", "k3"}, + Type: metricdata.TypeGaugeFloat64, + } + got := gf.g.desc + if !cmp.Equal(got, want) { + t.Errorf("metric descriptor: got %v, want %v\n", got, want) + } +} + +func TestGaugeMetricOptionDefault(t *testing.T) { + r := NewRegistry() + name := "testOptUnit" + gf, _ := r.AddFloat64Gauge(name) + want := metricdata.Descriptor{ + Name: name, + Type: metricdata.TypeGaugeFloat64, + } + got := gf.g.desc + if !cmp.Equal(got, want) { + t.Errorf("metric descriptor: got %v, want %v\n", got, want) + } +} + func TestFloat64Entry_Add(t *testing.T) { r := NewRegistry() - g, _ := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + g, _ := r.AddFloat64Gauge("g") e, _ := g.GetEntry() e.Add(0) ms := r.Read() @@ -125,7 +185,7 @@ func TestFloat64Entry_Add(t *testing.T) { func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() - g, _ := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + g, _ := r.AddFloat64Gauge("g") e, _ := g.GetEntry() e.Add(-1.0) ms := r.Read() @@ -136,7 +196,7 @@ func TestFloat64Gauge_Add_NegativeTotals(t *testing.T) { func TestInt64GaugeEntry_Add(t *testing.T) { r := NewRegistry() - g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + g, _ := r.AddInt64Gauge("g") e, _ := g.GetEntry() e.Add(0) ms := r.Read() @@ -153,7 +213,7 @@ func TestInt64GaugeEntry_Add(t *testing.T) { func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { r := NewRegistry() - g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) + g, _ := r.AddInt64Gauge("g") e, _ := g.GetEntry() e.Add(-1) ms := r.Read() @@ -164,16 +224,16 @@ func TestInt64Gauge_Add_NegativeTotals(t *testing.T) { func TestGaugeWithSameNameDiffType(t *testing.T) { r := NewRegistry() - r.AddInt64Gauge("g", "", metricdata.UnitDimensionless) - _, gotErr := r.AddFloat64Gauge("g", "", metricdata.UnitDimensionless) + r.AddInt64Gauge("g") + _, gotErr := r.AddFloat64Gauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) } - _, gotErr = r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless) + _, gotErr = r.AddInt64DerivedGauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) } - _, gotErr = r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless) + _, gotErr = r.AddFloat64DerivedGauge("g") if gotErr == nil { t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) } @@ -181,7 +241,7 @@ func TestGaugeWithSameNameDiffType(t *testing.T) { func TestGaugeWithLabelMismatch(t *testing.T) { r := NewRegistry() - g, _ := r.AddInt64Gauge("g", "", metricdata.UnitDimensionless, "k1") + g, _ := r.AddInt64Gauge("g", WithLabelKeys("k1")) _, gotErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) if gotErr == nil { t.Errorf("got: nil, want error: %v", errKeyValueMismatch) @@ -222,7 +282,7 @@ func TestRaceCondition(t *testing.T) { for i := 0; i < 5; i++ { go func(k int) { for j := 0; j < 5; j++ { - g, _ := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j), "", metricdata.UnitDimensionless) + g, _ := r.AddInt64Gauge(fmt.Sprintf("g%d%d", k, j)) e, _ := g.GetEntry() e.Add(1) } @@ -272,7 +332,7 @@ func (q *queueInt64) ToInt64() int64 { func TestInt64DerivedGaugeEntry_Add(t *testing.T) { r := NewRegistry() q := &queueInt64{3} - g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) @@ -290,7 +350,7 @@ func TestInt64DerivedGaugeEntry_Add(t *testing.T) { func TestInt64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { r := NewRegistry() - g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") @@ -300,7 +360,7 @@ func TestInt64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { func TestInt64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &queueInt64{3} - g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") @@ -311,7 +371,7 @@ func TestInt64DerivedGaugeEntry_Update(t *testing.T) { r := NewRegistry() q := &queueInt64{3} q2 := &queueInt64{5} - g, _ := r.AddInt64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddInt64DerivedGauge("g", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { @@ -334,7 +394,7 @@ func (q *queueFloat64) ToFloat64() float64 { func TestFloat64DerivedGaugeEntry_Add(t *testing.T) { r := NewRegistry() q := &queueFloat64{5.0} - g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) err := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if err != nil { t.Errorf("want: nil, got: %v", err) @@ -352,7 +412,7 @@ func TestFloat64DerivedGaugeEntry_Add(t *testing.T) { func TestFloat64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { r := NewRegistry() - g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr == nil { t.Errorf("expected error but got nil") @@ -362,7 +422,7 @@ func TestFloat64DerivedGaugeEntry_AddWithNilObj(t *testing.T) { func TestFloat64DerivedGaugeEntry_AddWithInvalidLabels(t *testing.T) { r := NewRegistry() q := &queueFloat64{3} - g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) gotErr := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1")) if gotErr == nil { t.Errorf("expected error but got nil") @@ -373,7 +433,7 @@ func TestFloat64DerivedGaugeEntry_Update(t *testing.T) { r := NewRegistry() q := &queueFloat64{3.0} q2 := &queueFloat64{5.0} - g, _ := r.AddFloat64DerivedGauge("g", "", metricdata.UnitDimensionless, "k1", "k2") + g, _ := r.AddFloat64DerivedGauge("g", WithLabelKeys("k1", "k2")) g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) gotErr := g.UpsertEntry(q2.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) if gotErr != nil { diff --git a/metric/metricexport/reader_test.go b/metric/metricexport/reader_test.go index a043530e8..756792486 100644 --- a/metric/metricexport/reader_test.go +++ b/metric/metricexport/reader_test.go @@ -53,7 +53,10 @@ func (e *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdat func init() { r := metric.NewRegistry() metricproducer.GlobalManager().AddProducer(r) - g, _ := r.AddInt64Gauge("active_request", "Number of active requests, per method.", metricdata.UnitDimensionless, "method") + g, _ := r.AddInt64Gauge("active_request", + metric.WithDescription("Number of active requests, per method."), + metric.WithUnit(metricdata.UnitDimensionless), + metric.WithLabelKeys("method")) gaugeEntry, _ = g.GetEntry(metricdata.NewLabelValue("foo")) } diff --git a/metric/registry.go b/metric/registry.go index 6181f45f3..80df54229 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -37,19 +37,50 @@ const ( derivedGaugeFloat64 ) +//TODO: [rghetia] add constant labels. +type metricOptions struct { + unit metricdata.Unit + labelkeys []string + desc string +} + +// Options apply changes to metricOptions. +type Options func(*metricOptions) + +// WithDescription applies provided description. +func WithDescription(desc string) Options { + return func(mo *metricOptions) { + mo.desc = desc + } +} + +// WithUnit applies provided unit. +func WithUnit(unit metricdata.Unit) Options { + return func(mo *metricOptions) { + mo.unit = unit + } +} + +// WithLabelKeys applies provided label. +func WithLabelKeys(labelKeys ...string) Options { + return func(mo *metricOptions) { + mo.labelkeys = labelKeys + } +} + // NewRegistry initializes a new Registry. func NewRegistry() *Registry { return &Registry{} } // AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. -func (r *Registry) AddFloat64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Float64Gauge, error) { +func (r *Registry) AddFloat64Gauge(name string, mos ...Options) (*Float64Gauge, error) { f := &Float64Gauge{ g: gauge{ gType: gaugeFloat64, }, } - _, err := r.initGauge(&f.g, labelKeys, name, description, unit) + _, err := r.initGauge(&f.g, name, mos...) if err != nil { return nil, err } @@ -57,13 +88,13 @@ func (r *Registry) AddFloat64Gauge(name, description string, unit metricdata.Uni } // AddInt64Gauge creates and adds a new int64-valued gauge to this registry. -func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Int64Gauge, error) { +func (r *Registry) AddInt64Gauge(name string, mos ...Options) (*Int64Gauge, error) { i := &Int64Gauge{ g: gauge{ gType: gaugeInt64, }, } - _, err := r.initGauge(&i.g, labelKeys, name, description, unit) + _, err := r.initGauge(&i.g, name, mos...) if err != nil { return nil, err } @@ -73,13 +104,13 @@ func (r *Registry) AddInt64Gauge(name, description string, unit metricdata.Unit, // AddInt64DerivedGauge creates and adds a new derived int64-valued gauge to this registry. // A derived gauge is convenient form of gauge where the object associated with the gauge // provides its value by implementing func() int64. -func (r *Registry) AddInt64DerivedGauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Int64DerivedGauge, error) { +func (r *Registry) AddInt64DerivedGauge(name string, mos ...Options) (*Int64DerivedGauge, error) { i := &Int64DerivedGauge{ g: gauge{ gType: derivedGaugeInt64, }, } - _, err := r.initGauge(&i.g, labelKeys, name, description, unit) + _, err := r.initGauge(&i.g, name, mos...) if err != nil { return nil, err } @@ -89,13 +120,13 @@ func (r *Registry) AddInt64DerivedGauge(name, description string, unit metricdat // AddFloat64DerivedGauge creates and adds a new derived float64-valued gauge to this registry. // A derived gauge is convenient form of gauge where the object associated with the gauge // provides its value by implementing func() float64. -func (r *Registry) AddFloat64DerivedGauge(name, description string, unit metricdata.Unit, labelKeys ...string) (*Float64DerivedGauge, error) { +func (r *Registry) AddFloat64DerivedGauge(name string, mos ...Options) (*Float64DerivedGauge, error) { f := &Float64DerivedGauge{ g: gauge{ gType: derivedGaugeFloat64, }, } - _, err := r.initGauge(&f.g, labelKeys, name, description, unit) + _, err := r.initGauge(&f.g, name, mos...) if err != nil { return nil, err } @@ -117,7 +148,15 @@ func gTypeToMetricType(g *gauge) metricdata.Type { } } -func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, description string, unit metricdata.Unit) (*gauge, error) { +func createMetricOption(mos ...Options) *metricOptions { + o := &metricOptions{} + for _, mo := range mos { + mo(o) + } + return o +} + +func (r *Registry) initGauge(g *gauge, name string, mos ...Options) (*gauge, error) { val, ok := r.gauges.Load(name) if ok { existing := val.(*gauge) @@ -125,13 +164,14 @@ func (r *Registry) initGauge(g *gauge, labelKeys []string, name string, descript return nil, errGaugeExistsWithDiffType } } - g.keys = labelKeys g.start = time.Now() + o := createMetricOption(mos...) + g.keys = o.labelkeys g.desc = metricdata.Descriptor{ Name: name, - Description: description, - Unit: unit, - LabelKeys: labelKeys, + Description: o.desc, + Unit: o.unit, + LabelKeys: o.labelkeys, Type: gTypeToMetricType(g), } r.gauges.Store(name, g) From 46618d076d80f4eab85adbcb3be9a370be1dc4e0 Mon Sep 17 00:00:00 2001 From: Aya Igarashi Date: Tue, 2 Apr 2019 02:24:35 +0900 Subject: [PATCH 147/212] Replace deprecated Endpoint of Jaeger example (#1087) --- exporter/jaeger/example/main.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/exporter/jaeger/example/main.go b/exporter/jaeger/example/main.go index 303bc6ea1..12e7a9052 100644 --- a/exporter/jaeger/example/main.go +++ b/exporter/jaeger/example/main.go @@ -30,7 +30,7 @@ func main() { // Register the Jaeger exporter to be able to retrieve // the collected spans. exporter, err := jaeger.NewExporter(jaeger.Options{ - Endpoint: "http://localhost:14268", + CollectorEndpoint: "http://localhost:14268/api/traces", Process: jaeger.Process{ ServiceName: "trace-demo", }, From d0e431e6d06ef7615294bf1e2e29fc509c0ab03e Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 1 Apr 2019 16:28:54 -0700 Subject: [PATCH 148/212] Bump up the version to 0.21.0 (#1088) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index a52dcd8c6..d2565f1e2 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.20.0" + return "0.21.0" } From 60399d2142e0691c0ebdca89e466a5f555730ffa Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 8 Apr 2019 12:19:34 -0700 Subject: [PATCH 149/212] Fix version dependencies with google.golang.org (#1094) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index b59bf6c13..cf0fbec6c 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,6 @@ require ( github.com/openzipkin/zipkin-go v0.1.6 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/api v0.2.0 + google.golang.org/api v0.3.1+alpha google.golang.org/grpc v1.19.0 ) From eec2a5dd95e7f60a4a4ea640843638c7775a1b75 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 8 Apr 2019 12:28:36 -0700 Subject: [PATCH 150/212] replace +alpha with -alpha. (#1095) --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index cf0fbec6c..7f6e45121 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,6 @@ require ( github.com/openzipkin/zipkin-go v0.1.6 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/api v0.3.1+alpha + google.golang.org/api v0.3.1-alpha google.golang.org/grpc v1.19.0 ) From 8930459677fde1e11e3e1b50bbed1acc850b5665 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 8 Apr 2019 12:42:29 -0700 Subject: [PATCH 151/212] change google.golang.org/api dependency to v0.3.1 (#1096) step 12/15. --- go.mod | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 7f6e45121..cc9febc02 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,6 @@ require ( github.com/openzipkin/zipkin-go v0.1.6 github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/api v0.3.1-alpha + google.golang.org/api v0.3.1 google.golang.org/grpc v1.19.0 ) From a901c1e4e21a3f29577cff385816f219d24aa0c5 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 8 Apr 2019 14:35:30 -0700 Subject: [PATCH 152/212] fix version script to ignore pre-release tags. (#1098) --- internal/check/version.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/internal/check/version.go b/internal/check/version.go index ab57ae73d..1e2ed0be6 100644 --- a/internal/check/version.go +++ b/internal/check/version.go @@ -42,6 +42,10 @@ func main() { if len(vStr) == 0 { continue } + // ignore pre-release versions + if isPreRelease(vStr) { + continue + } versions = append(versions, parseVersion(vStr)) } sort.Slice(versions, func(i, j int) bool { @@ -67,6 +71,11 @@ func versionLess(v1, v2 version) bool { return false } +func isPreRelease(vStr string) bool { + split := strings.Split(vStr[1:], ".") + return strings.Contains(split[2], "-") +} + func parseVersion(vStr string) version { split := strings.Split(vStr[1:], ".") var ( From 75c0cca22312e51bfd4fafdbe9197ae399e18b38 Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 8 Apr 2019 15:24:43 -0700 Subject: [PATCH 153/212] run go mod tidy after fixing dependency on google.golang.org/api (#1097) --- go.sum | 29 +++-------------------------- 1 file changed, 3 insertions(+), 26 deletions(-) diff --git a/go.sum b/go.sum index 300602f17..954fadf79 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,5 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -git.apache.org/thrift.git v0.12.0/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= @@ -16,7 +15,6 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= @@ -24,9 +22,7 @@ github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7a github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= @@ -36,7 +32,6 @@ github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8 github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/grpc-ecosystem/grpc-gateway v1.6.2/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= @@ -50,8 +45,6 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.3 h1:36hTtUTQR/vPX7YVJo2PYexSbHdAJiAkDrjuXw/YlYQ= -github.com/openzipkin/zipkin-go v0.1.3/go.mod h1:NtoC/o8u3JlF1lSlyPNswIbeQH9bJTmOf0Erfk+hxe8= github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= @@ -73,19 +66,16 @@ github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqn github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.19.1/go.mod h1:gug0GbSHa8Pafr0d2urOSgoXHZ6x/RUlaiT0d9pqb4A= +go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= -golang.org/x/lint v0.0.0-20181217174547-8f45f776aaf1/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= @@ -94,7 +84,6 @@ golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73r golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181203162652-d668ce993890/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -109,42 +98,30 @@ golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6 h1:MXtOG7w2ND9qNCUZSDBGll/SpVIq7ftozR9I8/JGBHY= -golang.org/x/sys v0.0.0-20181218192612-074acd46bca6/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181219222714-6e267b5cc78e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.0.0-20181220000619-583d854617af h1:iQMS7JKv/0w/iiWf1M49Cg3dmOkBoBZT5KheqPDpaac= -google.golang.org/api v0.0.0-20181220000619-583d854617af/go.mod h1:4mhQ8q/RsB7i+udVvVy5NUi08OU8ZlA0gRVgrF7VFY0= -google.golang.org/api v0.2.0 h1:B5VXkdjt7K2Gm6fGBC9C9a1OAKJDT95cTqwet+2zib0= -google.golang.org/api v0.2.0/go.mod h1:IfRCZScioGtypHNTlz3gFk67J8uePVW7uDTBzXuIkhU= +google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= +google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb h1:dQshZyyJ5W/Xk8myF4GKBak1pZW6EywJuQ8+44EQhGA= -google.golang.org/genproto v0.0.0-20181219182458-5a97ab628bfb/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 01ba74404d0b299402513bfc98f354a71f1d1a1e Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 3 Apr 2019 10:05:34 -0700 Subject: [PATCH 154/212] Refactor gauge and registry to accommodate cumulative. (#1089) * Refactor gauge and registry to accomodate cummulative. - use common baseMetric type to manage gauge and cumulative. * fix copyright and renamed couple of func. --- metric/common.go | 126 ++++++++++++++++++++++++++++++++++++++++++ metric/doc.go | 2 +- metric/error_const.go | 6 +- metric/gauge.go | 108 +++--------------------------------- metric/gauge_test.go | 28 +++++----- metric/registry.go | 73 +++++++++++------------- 6 files changed, 184 insertions(+), 159 deletions(-) create mode 100644 metric/common.go diff --git a/metric/common.go b/metric/common.go new file mode 100644 index 000000000..3dcaf1971 --- /dev/null +++ b/metric/common.go @@ -0,0 +1,126 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "sync" + "time" + + "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/metric/metricdata" +) + +// baseMetric is common representation for gauge and cumulative metrics. +// +// baseMetric maintains a value for each combination of of label values passed to +// Set, Add, or Inc method. +// +// baseMetric should not be used directly, use metric specific type such as +// Float64Gauge or Int64Gauge. +type baseMetric struct { + vals sync.Map + desc metricdata.Descriptor + start time.Time + keys []string + bmType baseMetricType +} + +type baseMetricType int + +const ( + gaugeInt64 baseMetricType = iota + gaugeFloat64 + derivedGaugeInt64 + derivedGaugeFloat64 + cumulativeInt64 + cumulativeFloat64 + derivedCumulativeInt64 + derivedCumulativeFloat64 +) + +type baseEntry interface { + read(t time.Time) metricdata.Point +} + +// Read returns the current values of the baseMetric as a metric for export. +func (bm *baseMetric) read() *metricdata.Metric { + now := time.Now() + m := &metricdata.Metric{ + Descriptor: bm.desc, + } + bm.vals.Range(func(k, v interface{}) bool { + entry := v.(baseEntry) + key := k.(string) + labelVals := bm.decodeLabelVals(key) + m.TimeSeries = append(m.TimeSeries, &metricdata.TimeSeries{ + StartTime: now, // Gauge value is instantaneous. + LabelValues: labelVals, + Points: []metricdata.Point{ + entry.read(now), + }, + }) + return true + }) + return m +} + +func (bm *baseMetric) encodeLabelVals(labelVals []metricdata.LabelValue) string { + vb := &tagencoding.Values{} + for _, v := range labelVals { + b := make([]byte, 1, len(v.Value)+1) + if v.Present { + b[0] = 1 + b = append(b, []byte(v.Value)...) + } + vb.WriteValue(b) + } + return string(vb.Bytes()) +} + +func (bm *baseMetric) decodeLabelVals(s string) []metricdata.LabelValue { + vals := make([]metricdata.LabelValue, 0, len(bm.keys)) + vb := &tagencoding.Values{Buffer: []byte(s)} + for range bm.keys { + v := vb.ReadValue() + if v[0] == 0 { + vals = append(vals, metricdata.LabelValue{}) + } else { + vals = append(vals, metricdata.NewLabelValue(string(v[1:]))) + } + } + return vals +} + +func (bm *baseMetric) entryForValues(labelVals []metricdata.LabelValue, newEntry func() baseEntry) (interface{}, error) { + if len(labelVals) != len(bm.keys) { + return nil, errKeyValueMismatch + } + mapKey := bm.encodeLabelVals(labelVals) + if entry, ok := bm.vals.Load(mapKey); ok { + return entry, nil + } + entry, _ := bm.vals.LoadOrStore(mapKey, newEntry()) + return entry, nil +} + +func (bm *baseMetric) upsertEntry(labelVals []metricdata.LabelValue, newEntry func() baseEntry) error { + if len(labelVals) != len(bm.keys) { + return errKeyValueMismatch + } + mapKey := bm.encodeLabelVals(labelVals) + bm.vals.Delete(mapKey) + bm.vals.Store(mapKey, newEntry()) + return nil +} diff --git a/metric/doc.go b/metric/doc.go index 485ee8f58..4b69f9aaa 100644 --- a/metric/doc.go +++ b/metric/doc.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package metric support for gauge metrics. +// Package metric support for gauge and cumulative metrics. // // This is an EXPERIMENTAL package, and may change in arbitrary ways without // notice. diff --git a/metric/error_const.go b/metric/error_const.go index c2bdf2b59..da1589419 100644 --- a/metric/error_const.go +++ b/metric/error_const.go @@ -17,7 +17,7 @@ package metric import "errors" var ( - errInvalidParam = errors.New("invalid parameter") - errGaugeExistsWithDiffType = errors.New("gauge with same name exists with a different type") - errKeyValueMismatch = errors.New("must supply the same number of label values as keys used to construct this gauge") + errInvalidParam = errors.New("invalid parameter") + errMetricExistsWithDiffType = errors.New("metric with same name exists with a different type") + errKeyValueMismatch = errors.New("must supply the same number of label values as keys used to construct this metric") ) diff --git a/metric/gauge.go b/metric/gauge.go index 0f5dcba14..d8104844f 100644 --- a/metric/gauge.go +++ b/metric/gauge.go @@ -16,110 +16,18 @@ package metric import ( "math" - "sync" "sync/atomic" "time" - "go.opencensus.io/internal/tagencoding" "go.opencensus.io/metric/metricdata" ) -// gauge represents a quantity that can go up an down, for example queue depth -// or number of outstanding requests. -// -// gauge maintains a value for each combination of of label values passed to -// the Set or Add methods. -// -// gauge should not be used directly, use Float64Gauge or Int64Gauge. -type gauge struct { - vals sync.Map - desc metricdata.Descriptor - start time.Time - keys []string - gType gaugeType -} - -type gaugeEntry interface { - read(t time.Time) metricdata.Point -} - -// Read returns the current values of the gauge as a metric for export. -func (g *gauge) read() *metricdata.Metric { - now := time.Now() - m := &metricdata.Metric{ - Descriptor: g.desc, - } - g.vals.Range(func(k, v interface{}) bool { - entry := v.(gaugeEntry) - key := k.(string) - labelVals := g.labelValues(key) - m.TimeSeries = append(m.TimeSeries, &metricdata.TimeSeries{ - StartTime: now, // Gauge value is instantaneous. - LabelValues: labelVals, - Points: []metricdata.Point{ - entry.read(now), - }, - }) - return true - }) - return m -} - -func (g *gauge) mapKey(labelVals []metricdata.LabelValue) string { - vb := &tagencoding.Values{} - for _, v := range labelVals { - b := make([]byte, 1, len(v.Value)+1) - if v.Present { - b[0] = 1 - b = append(b, []byte(v.Value)...) - } - vb.WriteValue(b) - } - return string(vb.Bytes()) -} - -func (g *gauge) labelValues(s string) []metricdata.LabelValue { - vals := make([]metricdata.LabelValue, 0, len(g.keys)) - vb := &tagencoding.Values{Buffer: []byte(s)} - for range g.keys { - v := vb.ReadValue() - if v[0] == 0 { - vals = append(vals, metricdata.LabelValue{}) - } else { - vals = append(vals, metricdata.NewLabelValue(string(v[1:]))) - } - } - return vals -} - -func (g *gauge) entryForValues(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) (interface{}, error) { - if len(labelVals) != len(g.keys) { - return nil, errKeyValueMismatch - } - mapKey := g.mapKey(labelVals) - if entry, ok := g.vals.Load(mapKey); ok { - return entry, nil - } - entry, _ := g.vals.LoadOrStore(mapKey, newEntry()) - return entry, nil -} - -func (g *gauge) upsertEntry(labelVals []metricdata.LabelValue, newEntry func() gaugeEntry) error { - if len(labelVals) != len(g.keys) { - return errKeyValueMismatch - } - mapKey := g.mapKey(labelVals) - g.vals.Delete(mapKey) - g.vals.Store(mapKey, newEntry()) - return nil -} - // Float64Gauge represents a float64 value that can go up and down. // // Float64Gauge maintains a float64 value for each combination of of label values // passed to the Set or Add methods. type Float64Gauge struct { - g gauge + bm baseMetric } // Float64Entry represents a single value of the gauge corresponding to a set @@ -142,7 +50,7 @@ func (e *Float64Entry) read(t time.Time) metricdata.Point { // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. func (g *Float64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Float64Entry, error) { - entry, err := g.g.entryForValues(labelVals, func() gaugeEntry { + entry, err := g.bm.entryForValues(labelVals, func() baseEntry { return &Float64Entry{} }) if err != nil { @@ -171,7 +79,7 @@ func (e *Float64Entry) Add(val float64) { // Int64Gauge maintains an int64 value for each combination of label values passed to the // Set or Add methods. type Int64Gauge struct { - g gauge + bm baseMetric } // Int64GaugeEntry represents a single value of the gauge corresponding to a set @@ -194,7 +102,7 @@ func (e *Int64GaugeEntry) read(t time.Time) metricdata.Point { // The number of label values supplied must be exactly the same as the number // of keys supplied when this gauge was created. func (g *Int64Gauge) GetEntry(labelVals ...metricdata.LabelValue) (*Int64GaugeEntry, error) { - entry, err := g.g.entryForValues(labelVals, func() gaugeEntry { + entry, err := g.bm.entryForValues(labelVals, func() baseEntry { return &Int64GaugeEntry{} }) if err != nil { @@ -219,7 +127,7 @@ func (e *Int64GaugeEntry) Add(val int64) { // These objects implement Int64DerivedGaugeInterface to read instantaneous value // representing the object. type Int64DerivedGauge struct { - g gauge + bm baseMetric } type int64DerivedGaugeEntry struct { @@ -241,7 +149,7 @@ func (g *Int64DerivedGauge) UpsertEntry(fn func() int64, labelVals ...metricdata if fn == nil { return errInvalidParam } - return g.g.upsertEntry(labelVals, func() gaugeEntry { + return g.bm.upsertEntry(labelVals, func() baseEntry { return &int64DerivedGaugeEntry{fn} }) } @@ -252,7 +160,7 @@ func (g *Int64DerivedGauge) UpsertEntry(fn func() int64, labelVals ...metricdata // These objects implement Float64DerivedGaugeInterface to read instantaneous value // representing the object. type Float64DerivedGauge struct { - g gauge + bm baseMetric } type float64DerivedGaugeEntry struct { @@ -274,7 +182,7 @@ func (g *Float64DerivedGauge) UpsertEntry(fn func() float64, labelVals ...metric if fn == nil { return errInvalidParam } - return g.g.upsertEntry(labelVals, func() gaugeEntry { + return g.bm.upsertEntry(labelVals, func() baseEntry { return &float64DerivedGaugeEntry{fn} }) } diff --git a/metric/gauge_test.go b/metric/gauge_test.go index 8ed358b1e..b8c41d14d 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -86,13 +86,13 @@ func TestGaugeMetricDescriptor(t *testing.T) { r := NewRegistry() gf, _ := r.AddFloat64Gauge("float64_gauge") - compareType(gf.g.desc.Type, metricdata.TypeGaugeFloat64, t) + compareType(gf.bm.desc.Type, metricdata.TypeGaugeFloat64, t) gi, _ := r.AddInt64Gauge("int64_gauge") - compareType(gi.g.desc.Type, metricdata.TypeGaugeInt64, t) + compareType(gi.bm.desc.Type, metricdata.TypeGaugeInt64, t) dgf, _ := r.AddFloat64DerivedGauge("derived_float64_gauge") - compareType(dgf.g.desc.Type, metricdata.TypeGaugeFloat64, t) + compareType(dgf.bm.desc.Type, metricdata.TypeGaugeFloat64, t) dgi, _ := r.AddInt64DerivedGauge("derived_int64_gauge") - compareType(dgi.g.desc.Type, metricdata.TypeGaugeInt64, t) + compareType(dgi.bm.desc.Type, metricdata.TypeGaugeInt64, t) } func compareType(got, want metricdata.Type, t *testing.T) { @@ -110,7 +110,7 @@ func TestGaugeMetricOptionDesc(t *testing.T) { Description: "test", Type: metricdata.TypeGaugeFloat64, } - got := gf.g.desc + got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric option description: got %v, want %v\n", got, want) } @@ -125,7 +125,7 @@ func TestGaugeMetricOptionUnit(t *testing.T) { Unit: metricdata.UnitMilliseconds, Type: metricdata.TypeGaugeFloat64, } - got := gf.g.desc + got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } @@ -140,7 +140,7 @@ func TestGaugeMetricOptionLabelKeys(t *testing.T) { LabelKeys: []string{"k1", "k3"}, Type: metricdata.TypeGaugeFloat64, } - got := gf.g.desc + got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } @@ -154,7 +154,7 @@ func TestGaugeMetricOptionDefault(t *testing.T) { Name: name, Type: metricdata.TypeGaugeFloat64, } - got := gf.g.desc + got := gf.bm.desc if !cmp.Equal(got, want) { t.Errorf("metric descriptor: got %v, want %v\n", got, want) } @@ -227,15 +227,15 @@ func TestGaugeWithSameNameDiffType(t *testing.T) { r.AddInt64Gauge("g") _, gotErr := r.AddFloat64Gauge("g") if gotErr == nil { - t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddInt64DerivedGauge("g") if gotErr == nil { - t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } _, gotErr = r.AddFloat64DerivedGauge("g") if gotErr == nil { - t.Errorf("got: nil, want error: %v", errGaugeExistsWithDiffType) + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) } } @@ -262,11 +262,11 @@ func TestMapKey(t *testing.T) { } for i, tc := range cases { t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { - g := &gauge{ + g := &baseMetric{ keys: make([]string, len(tc)), } - mk := g.mapKey(tc) - vals := g.labelValues(mk) + mk := g.encodeLabelVals(tc) + vals := g.decodeLabelVals(mk) if diff := cmp.Diff(vals, tc); diff != "" { t.Errorf("values differ after serialization -got +want: %s", diff) } diff --git a/metric/registry.go b/metric/registry.go index 80df54229..6b1ff323f 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -21,22 +21,13 @@ import ( "go.opencensus.io/metric/metricdata" ) -// Registry creates and manages a set of gauges. -// External synchronization is required if you want to add gauges to the same +// Registry creates and manages a set of gauges and cumulative. +// External synchronization is required if you want to add gauges and cumulative to the same // registry from multiple goroutines. type Registry struct { - gauges sync.Map + baseMetrics sync.Map } -type gaugeType int - -const ( - gaugeInt64 gaugeType = iota - gaugeFloat64 - derivedGaugeInt64 - derivedGaugeFloat64 -) - //TODO: [rghetia] add constant labels. type metricOptions struct { unit metricdata.Unit @@ -76,11 +67,11 @@ func NewRegistry() *Registry { // AddFloat64Gauge creates and adds a new float64-valued gauge to this registry. func (r *Registry) AddFloat64Gauge(name string, mos ...Options) (*Float64Gauge, error) { f := &Float64Gauge{ - g: gauge{ - gType: gaugeFloat64, + bm: baseMetric{ + bmType: gaugeFloat64, }, } - _, err := r.initGauge(&f.g, name, mos...) + _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } @@ -90,11 +81,11 @@ func (r *Registry) AddFloat64Gauge(name string, mos ...Options) (*Float64Gauge, // AddInt64Gauge creates and adds a new int64-valued gauge to this registry. func (r *Registry) AddInt64Gauge(name string, mos ...Options) (*Int64Gauge, error) { i := &Int64Gauge{ - g: gauge{ - gType: gaugeInt64, + bm: baseMetric{ + bmType: gaugeInt64, }, } - _, err := r.initGauge(&i.g, name, mos...) + _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } @@ -106,11 +97,11 @@ func (r *Registry) AddInt64Gauge(name string, mos ...Options) (*Int64Gauge, erro // provides its value by implementing func() int64. func (r *Registry) AddInt64DerivedGauge(name string, mos ...Options) (*Int64DerivedGauge, error) { i := &Int64DerivedGauge{ - g: gauge{ - gType: derivedGaugeInt64, + bm: baseMetric{ + bmType: derivedGaugeInt64, }, } - _, err := r.initGauge(&i.g, name, mos...) + _, err := r.initBaseMetric(&i.bm, name, mos...) if err != nil { return nil, err } @@ -122,19 +113,19 @@ func (r *Registry) AddInt64DerivedGauge(name string, mos ...Options) (*Int64Deri // provides its value by implementing func() float64. func (r *Registry) AddFloat64DerivedGauge(name string, mos ...Options) (*Float64DerivedGauge, error) { f := &Float64DerivedGauge{ - g: gauge{ - gType: derivedGaugeFloat64, + bm: baseMetric{ + bmType: derivedGaugeFloat64, }, } - _, err := r.initGauge(&f.g, name, mos...) + _, err := r.initBaseMetric(&f.bm, name, mos...) if err != nil { return nil, err } return f, nil } -func gTypeToMetricType(g *gauge) metricdata.Type { - switch g.gType { +func bmTypeToMetricType(bm *baseMetric) metricdata.Type { + switch bm.bmType { case derivedGaugeFloat64: return metricdata.TypeGaugeFloat64 case derivedGaugeInt64: @@ -144,7 +135,7 @@ func gTypeToMetricType(g *gauge) metricdata.Type { case gaugeInt64: return metricdata.TypeGaugeInt64 default: - panic("unsupported gauge type") + panic("unsupported metric type") } } @@ -156,34 +147,34 @@ func createMetricOption(mos ...Options) *metricOptions { return o } -func (r *Registry) initGauge(g *gauge, name string, mos ...Options) (*gauge, error) { - val, ok := r.gauges.Load(name) +func (r *Registry) initBaseMetric(bm *baseMetric, name string, mos ...Options) (*baseMetric, error) { + val, ok := r.baseMetrics.Load(name) if ok { - existing := val.(*gauge) - if existing.gType != g.gType { - return nil, errGaugeExistsWithDiffType + existing := val.(*baseMetric) + if existing.bmType != bm.bmType { + return nil, errMetricExistsWithDiffType } } - g.start = time.Now() + bm.start = time.Now() o := createMetricOption(mos...) - g.keys = o.labelkeys - g.desc = metricdata.Descriptor{ + bm.keys = o.labelkeys + bm.desc = metricdata.Descriptor{ Name: name, Description: o.desc, Unit: o.unit, LabelKeys: o.labelkeys, - Type: gTypeToMetricType(g), + Type: bmTypeToMetricType(bm), } - r.gauges.Store(name, g) - return g, nil + r.baseMetrics.Store(name, bm) + return bm, nil } // Read reads all gauges in this registry and returns their values as metrics. func (r *Registry) Read() []*metricdata.Metric { ms := []*metricdata.Metric{} - r.gauges.Range(func(k, v interface{}) bool { - g := v.(*gauge) - ms = append(ms, g.read()) + r.baseMetrics.Range(func(k, v interface{}) bool { + bm := v.(*baseMetric) + ms = append(ms, bm.read()) return true }) return ms From df5e77171e18725e4a690c45f8f7021e5aa64332 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 3 Apr 2019 20:49:04 -0700 Subject: [PATCH 155/212] Add support for cumulative. (#1090) * Add support for cumulative. * fix review comments. --- metric/common.go | 18 ++- metric/cumulative.go | 224 +++++++++++++++++++++++++++++ metric/cumulative_test.go | 290 ++++++++++++++++++++++++++++++++++++++ metric/registry.go | 70 ++++++++- 4 files changed, 599 insertions(+), 3 deletions(-) create mode 100644 metric/cumulative.go create mode 100644 metric/cumulative_test.go diff --git a/metric/common.go b/metric/common.go index 3dcaf1971..c370f7b52 100644 --- a/metric/common.go +++ b/metric/common.go @@ -24,7 +24,7 @@ import ( // baseMetric is common representation for gauge and cumulative metrics. // -// baseMetric maintains a value for each combination of of label values passed to +// baseMetric maintains a value for each combination of label values passed to // Set, Add, or Inc method. // // baseMetric should not be used directly, use metric specific type such as @@ -54,9 +54,23 @@ type baseEntry interface { read(t time.Time) metricdata.Point } +func (bm *baseMetric) startTime() *time.Time { + switch bm.bmType { + case cumulativeInt64, cumulativeFloat64, derivedCumulativeInt64, derivedCumulativeFloat64: + return &bm.start + default: + // gauges don't have start time. + return nil + } +} + // Read returns the current values of the baseMetric as a metric for export. func (bm *baseMetric) read() *metricdata.Metric { now := time.Now() + startTime := bm.startTime() + if startTime == nil { + startTime = &now + } m := &metricdata.Metric{ Descriptor: bm.desc, } @@ -65,7 +79,7 @@ func (bm *baseMetric) read() *metricdata.Metric { key := k.(string) labelVals := bm.decodeLabelVals(key) m.TimeSeries = append(m.TimeSeries, &metricdata.TimeSeries{ - StartTime: now, // Gauge value is instantaneous. + StartTime: *startTime, LabelValues: labelVals, Points: []metricdata.Point{ entry.read(now), diff --git a/metric/cumulative.go b/metric/cumulative.go new file mode 100644 index 000000000..6d3be3f88 --- /dev/null +++ b/metric/cumulative.go @@ -0,0 +1,224 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "math" + "sync/atomic" + "time" + + "go.opencensus.io/metric/metricdata" +) + +// Float64Cumulative represents a float64 value that can only go up. +// +// Float64Cumulative maintains a float64 value for each combination of label values +// passed to the Set or Inc methods. +type Float64Cumulative struct { + bm baseMetric +} + +// Float64CumulativeEntry represents a single value of the cumulative corresponding to a set +// of label values. +type Float64CumulativeEntry struct { + val uint64 // needs to be uint64 for atomic access, interpret with math.Float64frombits +} + +func (e *Float64CumulativeEntry) read(t time.Time) metricdata.Point { + v := math.Float64frombits(atomic.LoadUint64(&e.val)) + if v < 0 { + v = 0 + } + return metricdata.NewFloat64Point(t, v) +} + +// GetEntry returns a cumulative entry where each key for this cumulative has the value +// given. +// +// The number of label values supplied must be exactly the same as the number +// of keys supplied when this cumulative was created. +func (c *Float64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Float64CumulativeEntry, error) { + entry, err := c.bm.entryForValues(labelVals, func() baseEntry { + return &Float64CumulativeEntry{} + }) + if err != nil { + return nil, err + } + return entry.(*Float64CumulativeEntry), nil +} + +// Set sets the cumulative entry value to provided val. It returns without updating if the value is +// negative or lower than previously stored value. +func (e *Float64CumulativeEntry) Set(val float64) { + var swapped, equalOrLess bool + if val <= 0.0 { + return + } + for !swapped && !equalOrLess { + oldBits := atomic.LoadUint64(&e.val) + oldVal := math.Float64frombits(oldBits) + if val > oldVal { + valBits := math.Float64bits(val) + swapped = atomic.CompareAndSwapUint64(&e.val, oldBits, valBits) + } else { + equalOrLess = true + } + } +} + +// Inc increments the cumulative entry value by val. It returns without incrementing if the val +// is negative. +func (e *Float64CumulativeEntry) Inc(val float64) { + var swapped bool + if val <= 0.0 { + return + } + for !swapped { + oldVal := atomic.LoadUint64(&e.val) + newVal := math.Float64bits(math.Float64frombits(oldVal) + val) + swapped = atomic.CompareAndSwapUint64(&e.val, oldVal, newVal) + } +} + +// Int64Cumulative represents a int64 cumulative value that can only go up. +// +// Int64Cumulative maintains an int64 value for each combination of label values passed to the +// Set or Inc methods. +type Int64Cumulative struct { + bm baseMetric +} + +// Int64CumulativeEntry represents a single value of the cumulative corresponding to a set +// of label values. +type Int64CumulativeEntry struct { + val int64 +} + +func (e *Int64CumulativeEntry) read(t time.Time) metricdata.Point { + v := atomic.LoadInt64(&e.val) + if v < 0 { + v = 0.0 + } + return metricdata.NewInt64Point(t, v) +} + +// GetEntry returns a cumulative entry where each key for this cumulative has the value +// given. +// +// The number of label values supplied must be exactly the same as the number +// of keys supplied when this cumulative was created. +func (c *Int64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Int64CumulativeEntry, error) { + entry, err := c.bm.entryForValues(labelVals, func() baseEntry { + return &Int64CumulativeEntry{} + }) + if err != nil { + return nil, err + } + return entry.(*Int64CumulativeEntry), nil +} + +// Set sets the value of the cumulative entry to the provided value. It returns without updating +// if the val is negative or if the val is lower than previously stored value. +func (e *Int64CumulativeEntry) Set(val int64) { + var swapped, equalOrLess bool + if val <= 0 { + return + } + for !swapped && !equalOrLess { + old := atomic.LoadInt64(&e.val) + if val > old { + swapped = atomic.CompareAndSwapInt64(&e.val, old, val) + } else { + equalOrLess = true + } + } +} + +// Inc increments the current cumulative entry value by val. It returns without incrementing if +// the val is negative. +func (e *Int64CumulativeEntry) Inc(val int64) { + if val <= 0 { + return + } + atomic.AddInt64(&e.val, int64(val)) +} + +// Int64DerivedCumulative represents int64 cumulative value that is derived from an object. +// +// Int64DerivedCumulative maintains objects for each combination of label values. +// These objects implement Int64DerivedCumulativeInterface to read instantaneous value +// representing the object. +type Int64DerivedCumulative struct { + bm baseMetric +} + +type int64DerivedCumulativeEntry struct { + fn func() int64 +} + +func (e *int64DerivedCumulativeEntry) read(t time.Time) metricdata.Point { + // TODO: [rghetia] handle a condition where new value return by fn is lower than previous call. + // It requires that we maintain the old values. + return metricdata.NewInt64Point(t, e.fn()) +} + +// UpsertEntry inserts or updates a derived cumulative entry for the given set of label values. +// The object for which this cumulative entry is inserted or updated, must implement func() int64 +// +// It returns an error if +// 1. The number of label values supplied are not the same as the number +// of keys supplied when this cumulative was created. +// 2. fn func() int64 is nil. +func (c *Int64DerivedCumulative) UpsertEntry(fn func() int64, labelVals ...metricdata.LabelValue) error { + if fn == nil { + return errInvalidParam + } + return c.bm.upsertEntry(labelVals, func() baseEntry { + return &int64DerivedCumulativeEntry{fn} + }) +} + +// Float64DerivedCumulative represents float64 cumulative value that is derived from an object. +// +// Float64DerivedCumulative maintains objects for each combination of label values. +// These objects implement Float64DerivedCumulativeInterface to read instantaneous value +// representing the object. +type Float64DerivedCumulative struct { + bm baseMetric +} + +type float64DerivedCumulativeEntry struct { + fn func() float64 +} + +func (e *float64DerivedCumulativeEntry) read(t time.Time) metricdata.Point { + return metricdata.NewFloat64Point(t, e.fn()) +} + +// UpsertEntry inserts or updates a derived cumulative entry for the given set of label values. +// The object for which this cumulative entry is inserted or updated, must implement func() float64 +// +// It returns an error if +// 1. The number of label values supplied are not the same as the number +// of keys supplied when this cumulative was created. +// 2. fn func() float64 is nil. +func (c *Float64DerivedCumulative) UpsertEntry(fn func() float64, labelVals ...metricdata.LabelValue) error { + if fn == nil { + return errInvalidParam + } + return c.bm.upsertEntry(labelVals, func() baseEntry { + return &float64DerivedCumulativeEntry{fn} + }) +} diff --git a/metric/cumulative_test.go b/metric/cumulative_test.go new file mode 100644 index 000000000..2e0d3c249 --- /dev/null +++ b/metric/cumulative_test.go @@ -0,0 +1,290 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric + +import ( + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "go.opencensus.io/metric/metricdata" +) + +func TestCumulative(t *testing.T) { + r := NewRegistry() + + f, _ := r.AddFloat64Cumulative("TestCumulative", + WithLabelKeys("k1", "k2")) + e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) + e.Set(5) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + e.Inc(1) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + e.Inc(1) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) + e.Inc(1) + m := r.Read() + want := []*metricdata.Metric{ + { + Descriptor: metricdata.Descriptor{ + Name: "TestCumulative", + LabelKeys: []string{"k1", "k2"}, + Type: metricdata.TypeCumulativeFloat64, + }, + TimeSeries: []*metricdata.TimeSeries{ + { + LabelValues: []metricdata.LabelValue{ + {}, {}, + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 5), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("k1v1"), + {}, + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 2), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("k1v2"), + metricdata.NewLabelValue("k2v2"), + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 1), + }, + }, + }, + }, + } + canonicalize(m) + canonicalize(want) + if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { + t.Errorf("-got +want: %s", diff) + } +} + +func TestCumulativeMetricDescriptor(t *testing.T) { + r := NewRegistry() + + gf, _ := r.AddFloat64Cumulative("float64_gauge") + compareType(gf.bm.desc.Type, metricdata.TypeCumulativeFloat64, t) + gi, _ := r.AddInt64Cumulative("int64_gauge") + compareType(gi.bm.desc.Type, metricdata.TypeCumulativeInt64, t) + dgf, _ := r.AddFloat64DerivedCumulative("derived_float64_gauge") + compareType(dgf.bm.desc.Type, metricdata.TypeCumulativeFloat64, t) + dgi, _ := r.AddInt64DerivedCumulative("derived_int64_gauge") + compareType(dgi.bm.desc.Type, metricdata.TypeCumulativeInt64, t) +} + +func readAndCompareInt64Val(testname string, r *Registry, want int64, t *testing.T) { + ms := r.Read() + if got := ms[0].TimeSeries[0].Points[0].Value.(int64); got != want { + t.Errorf("testname: %s, got = %v, want %v\n", testname, got, want) + } +} + +func TestInt64CumulativeEntry_IncAndSet(t *testing.T) { + r := NewRegistry() + g, _ := r.AddInt64Cumulative("bm") + e, _ := g.GetEntry() + e.Inc(5) + readAndCompareInt64Val("inc", r, 5, t) + e.Inc(-2) + readAndCompareInt64Val("inc negative", r, 5, t) + e.Set(-2) + readAndCompareInt64Val("set negative", r, 5, t) + e.Set(4) + readAndCompareInt64Val("set lower", r, 5, t) + e.Set(9) + readAndCompareInt64Val("set higher", r, 9, t) +} + +func readAndCompareFloat64Val(testname string, r *Registry, want float64, t *testing.T) { + ms := r.Read() + if got := ms[0].TimeSeries[0].Points[0].Value.(float64); got != want { + t.Errorf("testname: %s, got = %v, want %v\n", testname, got, want) + } +} + +func TestFloat64CumulativeEntry_IncAndSet(t *testing.T) { + r := NewRegistry() + g, _ := r.AddFloat64Cumulative("bm") + e, _ := g.GetEntry() + e.Inc(5.0) + readAndCompareFloat64Val("inc", r, 5.0, t) + e.Inc(-2.0) + readAndCompareFloat64Val("inc negative", r, 5.0, t) + e.Set(-2.0) + readAndCompareFloat64Val("set negative", r, 5.0, t) + e.Set(4.0) + readAndCompareFloat64Val("set lower", r, 5.0, t) + e.Set(9.9) + readAndCompareFloat64Val("set higher", r, 9.9, t) +} + +func TestCumulativeWithSameNameDiffType(t *testing.T) { + r := NewRegistry() + r.AddInt64Cumulative("bm") + _, gotErr := r.AddFloat64Cumulative("bm") + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) + } + _, gotErr = r.AddInt64DerivedCumulative("bm") + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) + } + _, gotErr = r.AddFloat64DerivedCumulative("bm") + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errMetricExistsWithDiffType) + } +} + +func TestCumulativeWithLabelMismatch(t *testing.T) { + r := NewRegistry() + g, _ := r.AddInt64Cumulative("bm", WithLabelKeys("k1")) + _, gotErr := g.GetEntry(metricdata.NewLabelValue("k1v2"), metricdata.NewLabelValue("k2v2")) + if gotErr == nil { + t.Errorf("got: nil, want error: %v", errKeyValueMismatch) + } +} + +type sysUpTimeInNanoSecs struct { + size int64 +} + +func (q *sysUpTimeInNanoSecs) ToInt64() int64 { + return q.size +} + +func TestInt64DerivedCumulativeEntry_Inc(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInNanoSecs{3} + g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + err := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(3); got != want { + t.Errorf("value = %v, want %v", got, want) + } + q.size = 5 + ms = r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestInt64DerivedCumulativeEntry_IncWithNilObj(t *testing.T) { + r := NewRegistry() + g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestInt64DerivedCumulativeEntry_IncWithInvalidLabels(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInNanoSecs{3} + g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + gotErr := g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1")) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestInt64DerivedCumulativeEntry_Update(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInNanoSecs{3} + q2 := &sysUpTimeInNanoSecs{5} + g, _ := r.AddInt64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + g.UpsertEntry(q.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + gotErr := g.UpsertEntry(q2.ToInt64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr != nil { + t.Errorf("got: %v, want: nil", gotErr) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(int64), int64(5); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +type sysUpTimeInSeconds struct { + size float64 +} + +func (q *sysUpTimeInSeconds) ToFloat64() float64 { + return q.size +} + +func TestFloat64DerivedCumulativeEntry_Inc(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInSeconds{5.0} + g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + err := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } + q.size = 7 + ms = r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(7.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} + +func TestFloat64DerivedCumulativeEntry_IncWithNilObj(t *testing.T) { + r := NewRegistry() + g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + gotErr := g.UpsertEntry(nil, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestFloat64DerivedCumulativeEntry_IncWithInvalidLabels(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInSeconds{3} + g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + gotErr := g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1")) + if gotErr == nil { + t.Errorf("expected error but got nil") + } +} + +func TestFloat64DerivedCumulativeEntry_Update(t *testing.T) { + r := NewRegistry() + q := &sysUpTimeInSeconds{3.0} + q2 := &sysUpTimeInSeconds{5.0} + g, _ := r.AddFloat64DerivedCumulative("bm", WithLabelKeys("k1", "k2")) + g.UpsertEntry(q.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + gotErr := g.UpsertEntry(q2.ToFloat64, metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) + if gotErr != nil { + t.Errorf("got: %v, want: nil", gotErr) + } + ms := r.Read() + if got, want := ms[0].TimeSeries[0].Points[0].Value.(float64), float64(5.0); got != want { + t.Errorf("value = %v, want %v", got, want) + } +} diff --git a/metric/registry.go b/metric/registry.go index 6b1ff323f..5dc41631d 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -134,11 +134,79 @@ func bmTypeToMetricType(bm *baseMetric) metricdata.Type { return metricdata.TypeGaugeFloat64 case gaugeInt64: return metricdata.TypeGaugeInt64 + case derivedCumulativeFloat64: + return metricdata.TypeCumulativeFloat64 + case derivedCumulativeInt64: + return metricdata.TypeCumulativeInt64 + case cumulativeFloat64: + return metricdata.TypeCumulativeFloat64 + case cumulativeInt64: + return metricdata.TypeCumulativeInt64 default: panic("unsupported metric type") } } +// AddFloat64Cumulative creates and adds a new float64-valued cumulative to this registry. +func (r *Registry) AddFloat64Cumulative(name string, mos ...Options) (*Float64Cumulative, error) { + f := &Float64Cumulative{ + bm: baseMetric{ + bmType: cumulativeFloat64, + }, + } + _, err := r.initBaseMetric(&f.bm, name, mos...) + if err != nil { + return nil, err + } + return f, nil +} + +// AddInt64Cumulative creates and adds a new int64-valued cumulative to this registry. +func (r *Registry) AddInt64Cumulative(name string, mos ...Options) (*Int64Cumulative, error) { + i := &Int64Cumulative{ + bm: baseMetric{ + bmType: cumulativeInt64, + }, + } + _, err := r.initBaseMetric(&i.bm, name, mos...) + if err != nil { + return nil, err + } + return i, nil +} + +// AddInt64DerivedCumulative creates and adds a new derived int64-valued cumulative to this registry. +// A derived cumulative is convenient form of cumulative where the object associated with the cumulative +// provides its value by implementing func() int64. +func (r *Registry) AddInt64DerivedCumulative(name string, mos ...Options) (*Int64DerivedCumulative, error) { + i := &Int64DerivedCumulative{ + bm: baseMetric{ + bmType: derivedCumulativeInt64, + }, + } + _, err := r.initBaseMetric(&i.bm, name, mos...) + if err != nil { + return nil, err + } + return i, nil +} + +// AddFloat64DerivedCumulative creates and adds a new derived float64-valued gauge to this registry. +// A derived cumulative is convenient form of cumulative where the object associated with the cumulative +// provides its value by implementing func() float64. +func (r *Registry) AddFloat64DerivedCumulative(name string, mos ...Options) (*Float64DerivedCumulative, error) { + f := &Float64DerivedCumulative{ + bm: baseMetric{ + bmType: derivedCumulativeFloat64, + }, + } + _, err := r.initBaseMetric(&f.bm, name, mos...) + if err != nil { + return nil, err + } + return f, nil +} + func createMetricOption(mos ...Options) *metricOptions { o := &metricOptions{} for _, mo := range mos { @@ -169,7 +237,7 @@ func (r *Registry) initBaseMetric(bm *baseMetric, name string, mos ...Options) ( return bm, nil } -// Read reads all gauges in this registry and returns their values as metrics. +// Read reads all gauges and cumulatives in this registry and returns their values as metrics. func (r *Registry) Read() []*metricdata.Metric { ms := []*metricdata.Metric{} r.baseMetrics.Range(func(k, v interface{}) bool { From bf1b28d373689ebd1638037342c8ba677c05d576 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 11 Apr 2019 13:34:35 -0700 Subject: [PATCH 156/212] replace missing tag with empty values during metric export. (#1102) * replace missing tag with empty values during metric export. * use map. --- stats/view/view_to_metric.go | 15 +++++-- stats/view/view_to_metric_test.go | 65 ++++++++++++++++++++++++------- 2 files changed, 62 insertions(+), 18 deletions(-) diff --git a/stats/view/view_to_metric.go b/stats/view/view_to_metric.go index 284299faf..557c19085 100644 --- a/stats/view/view_to_metric.go +++ b/stats/view/view_to_metric.go @@ -91,10 +91,19 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { } } -func toLabelValues(row *Row) []metricdata.LabelValue { +func toLabelValues(row *Row, expectedKeys []string) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} + tagMap := make(map[string]string) for _, tag := range row.Tags { - labelValues = append(labelValues, metricdata.NewLabelValue(tag.Value)) + tagMap[tag.Key.Name()] = tag.Value + } + + for _, key := range expectedKeys { + if val, ok := tagMap[key]; ok { + labelValues = append(labelValues, metricdata.NewLabelValue(val)) + } else { + labelValues = append(labelValues, metricdata.LabelValue{}) + } } return labelValues } @@ -102,7 +111,7 @@ func toLabelValues(row *Row) []metricdata.LabelValue { func rowToTimeseries(v *viewInternal, row *Row, now time.Time, startTime time.Time) *metricdata.TimeSeries { return &metricdata.TimeSeries{ Points: []metricdata.Point{row.Data.toPoint(v.metricDescriptor.Type, now)}, - LabelValues: toLabelValues(row), + LabelValues: toLabelValues(row, v.metricDescriptor.LabelKeys), StartTime: startTime, } } diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index d9a5a3de0..2f2c72372 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -40,19 +40,21 @@ type testToMetrics struct { var ( // tag objects. - tk1 tag.Key - tk2 tag.Key - tk3 tag.Key - tk1v1 tag.Tag - tk1v2 tag.Tag - tk2v2 tag.Tag - tk3v3 tag.Tag - tags []tag.Tag - labelValues []metricdata.LabelValue - labelKeys []string + tk1 tag.Key + tk2 tag.Key + tk3 tag.Key + tk1v1 tag.Tag + tk2v2 tag.Tag + tags []tag.Tag + + labelValues []metricdata.LabelValue + emptyLabelValues []metricdata.LabelValue + + labelKeys []string - recordsInt64 []recordValWithTag - recordsFloat64 []recordValWithTag + recordsInt64 []recordValWithTag + recordsFloat64 []recordValWithTag + recordsFloat64WoTag []recordValWithTag // distribution objects. aggDist *Aggregation @@ -73,6 +75,7 @@ var ( viewTypeInt64Sum *View viewTypeFloat64LastValue *View viewTypeInt64LastValue *View + viewRecordWithoutLabel *View mdTypeFloat64CumulativeDistribution metricdata.Descriptor mdTypeInt64CumulativeDistribution metricdata.Descriptor mdTypeInt64CumulativeCount metricdata.Descriptor @@ -81,6 +84,7 @@ var ( mdTypeFloat64CumulativeSum metricdata.Descriptor mdTypeInt64CumulativeLastValue metricdata.Descriptor mdTypeFloat64CumulativeLastValue metricdata.Descriptor + mdTypeRecordWithoutLabel metricdata.Descriptor ) const ( @@ -92,9 +96,9 @@ const ( nameFloat64SumM1 = "viewToMetricTest_Float64_Sum/m1" nameInt64LastValueM1 = "viewToMetricTest_Int64_LastValue/m1" nameFloat64LastValueM1 = "viewToMetricTest_Float64_LastValue/m1" + nameRecordWithoutLabel = "viewToMetricTest_RecordWithoutLabel/m1" v1 = "v1" v2 = "v2" - v3 = "v3" ) func init() { @@ -110,15 +114,17 @@ func initTags() { tk2, _ = tag.NewKey("k2") tk3, _ = tag.NewKey("k3") tk1v1 = tag.Tag{Key: tk1, Value: v1} - tk1v2 = tag.Tag{Key: tk1, Value: v2} tk2v2 = tag.Tag{Key: tk2, Value: v2} - tk3v3 = tag.Tag{Key: tk3, Value: v3} tags = []tag.Tag{tk1v1, tk2v2} labelValues = []metricdata.LabelValue{ {Value: v1, Present: true}, {Value: v2, Present: true}, } + emptyLabelValues = []metricdata.LabelValue{ + {Value: "", Present: false}, + {Value: "", Present: false}, + } labelKeys = []string{tk1.Name(), tk2.Name()} recordsInt64 = []recordValWithTag{ @@ -129,6 +135,10 @@ func initTags() { {tags: tags, value: float64(1.5)}, {tags: tags, value: float64(5.4)}, } + recordsFloat64WoTag = []recordValWithTag{ + {value: float64(1.5)}, + {value: float64(5.4)}, + } } func initAgg() { @@ -189,6 +199,12 @@ func initViews() { Measure: stats.Float64(nameFloat64LastValueM1, "", stats.UnitDimensionless), Aggregation: aggL, } + viewRecordWithoutLabel = &View{ + Name: nameRecordWithoutLabel, + TagKeys: []tag.Key{tk1, tk2}, + Measure: stats.Float64(nameRecordWithoutLabel, "", stats.UnitDimensionless), + Aggregation: aggL, + } } func initMetricDescriptors() { @@ -225,6 +241,10 @@ func initMetricDescriptors() { Name: nameFloat64LastValueM1, Description: "", Unit: metricdata.UnitDimensionless, Type: metricdata.TypeGaugeFloat64, LabelKeys: labelKeys, } + mdTypeRecordWithoutLabel = metricdata.Descriptor{ + Name: nameRecordWithoutLabel, Description: "", Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeGaugeFloat64, LabelKeys: labelKeys, + } } func Test_ViewToMetric(t *testing.T) { @@ -375,6 +395,21 @@ func Test_ViewToMetric(t *testing.T) { }, }, }, + { + view: viewRecordWithoutLabel, + recordValue: recordsFloat64WoTag, + wantMetric: &metricdata.Metric{ + Descriptor: mdTypeRecordWithoutLabel, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + metricdata.NewFloat64Point(now, 5.4), + }, + LabelValues: emptyLabelValues, + StartTime: time.Time{}, + }, + }, + }, + }, } wantMetrics := []*metricdata.Metric{} From ef4afeb0d0cf4fc08868683603cd6e474be9be18 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 11 Apr 2019 21:06:30 -0700 Subject: [PATCH 157/212] fix race condition in reading record and updating record. (#1104) --- stats/view/worker.go | 2 + stats/view/worker_commands.go | 4 ++ stats/view/worker_test.go | 87 +++++++++++++++++++++++++++++++++++ 3 files changed, 93 insertions(+) diff --git a/stats/view/worker.go b/stats/view/worker.go index 37279b39e..2f3c018af 100644 --- a/stats/view/worker.go +++ b/stats/view/worker.go @@ -236,6 +236,8 @@ func (w *worker) reportView(v *viewInternal, now time.Time) { } func (w *worker) reportUsage(now time.Time) { + w.mu.Lock() + defer w.mu.Unlock() for _, v := range w.views { w.reportView(v, now) } diff --git a/stats/view/worker_commands.go b/stats/view/worker_commands.go index ba6203a50..0267e179a 100644 --- a/stats/view/worker_commands.go +++ b/stats/view/worker_commands.go @@ -121,6 +121,8 @@ type retrieveDataResp struct { } func (cmd *retrieveDataReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() vi, ok := w.views[cmd.v] if !ok { cmd.c <- &retrieveDataResp{ @@ -153,6 +155,8 @@ type recordReq struct { } func (cmd *recordReq) handleCommand(w *worker) { + w.mu.Lock() + defer w.mu.Unlock() for _, m := range cmd.ms { if (m == stats.Measurement{}) { // not registered continue diff --git a/stats/view/worker_test.go b/stats/view/worker_test.go index d43014648..8d4546ea4 100644 --- a/stats/view/worker_test.go +++ b/stats/view/worker_test.go @@ -22,6 +22,8 @@ import ( "testing" "time" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -397,6 +399,91 @@ func TestUnregisterReportsUsage(t *testing.T) { } } +func TestWorkerRace(t *testing.T) { + restart() + ctx := context.Background() + + m1 := stats.Int64("measure", "desc", "unit") + view1 := &View{Name: "count", Measure: m1, Aggregation: Count()} + m2 := stats.Int64("measure2", "desc", "unit") + view2 := &View{Name: "count2", Measure: m2, Aggregation: Count()} + + // 1. This will export every microsecond. + SetReportingPeriod(time.Microsecond) + + if err := Register(view1, view2); err != nil { + t.Fatalf("cannot register: %v", err) + } + + e := &countExporter{} + RegisterExporter(e) + + // Synchronize and make sure every goroutine has terminated before we exit + var waiter sync.WaitGroup + waiter.Add(3) + defer waiter.Wait() + + doneCh := make(chan bool) + // 2. Record write routine at 700ns + go func() { + defer waiter.Done() + tick := time.NewTicker(700 * time.Nanosecond) + defer tick.Stop() + + defer func() { + close(doneCh) + }() + + for i := 0; i < 1e3; i++ { + stats.Record(ctx, m1.M(1)) + stats.Record(ctx, m2.M(1)) + stats.Record(ctx, m2.M(1)) + <-tick.C + } + }() + + // 2. Simulating RetrieveData 900ns + go func() { + defer waiter.Done() + tick := time.NewTicker(900 * time.Nanosecond) + defer tick.Stop() + + for { + select { + case <-doneCh: + return + case <-tick.C: + RetrieveData(view1.Name) + } + } + }() + + // 4. Export via Reader routine at 800ns + go func() { + defer waiter.Done() + tick := time.NewTicker(800 * time.Nanosecond) + defer tick.Stop() + + reader := metricexport.Reader{} + for { + select { + case <-doneCh: + return + case <-tick.C: + // Perform some collection here + reader.ReadAndExport(&testExporter{}) + } + } + }() +} + +type testExporter struct { +} + +func (te *testExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + return nil +} + type countExporter struct { sync.Mutex count int64 From aca3e8a2679285ba3a7401f60cf559754ed6a88d Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 15 Apr 2019 10:19:48 -0700 Subject: [PATCH 158/212] Add support for metrics in prometheus exporter (#1105) * Add prometheus support. * remove view related code and refactor metric specific code. * fix review comments. * remove dead code and fix comments. * fix error message. --- exporter/prometheus/example_test.go | 2 - exporter/prometheus/prometheus.go | 281 ++++++++++++------------- exporter/prometheus/prometheus_test.go | 151 +------------ 3 files changed, 136 insertions(+), 298 deletions(-) diff --git a/exporter/prometheus/example_test.go b/exporter/prometheus/example_test.go index 073a8bdd5..182ad2003 100644 --- a/exporter/prometheus/example_test.go +++ b/exporter/prometheus/example_test.go @@ -19,7 +19,6 @@ import ( "net/http" "go.opencensus.io/exporter/prometheus" - "go.opencensus.io/stats/view" ) func Example() { @@ -27,7 +26,6 @@ func Example() { if err != nil { log.Fatal(err) } - view.RegisterExporter(exporter) // Serve the scrape endpoint on port 9999. http.Handle("/metrics", exporter) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 203bd38ad..53ff6ba6e 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -17,18 +17,18 @@ package prometheus // import "go.opencensus.io/exporter/prometheus" import ( - "bytes" "fmt" "log" "net/http" "sync" - "go.opencensus.io/internal" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - + "context" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promhttp" + "go.opencensus.io/internal" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats/view" ) // Exporter exports stats to Prometheus, users need @@ -61,39 +61,12 @@ func NewExporter(o Options) (*Exporter, error) { c: collector, handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), } + collector.ensureRegisteredOnce() + return e, nil } var _ http.Handler = (*Exporter)(nil) -var _ view.Exporter = (*Exporter)(nil) - -func (c *collector) registerViews(views ...*view.View) { - count := 0 - for _, view := range views { - sig := viewSignature(c.opts.Namespace, view) - c.registeredViewsMu.Lock() - _, ok := c.registeredViews[sig] - c.registeredViewsMu.Unlock() - - if !ok { - desc := prometheus.NewDesc( - viewName(c.opts.Namespace, view), - view.Description, - tagKeysToLabels(view.TagKeys), - c.opts.ConstLabels, - ) - c.registeredViewsMu.Lock() - c.registeredViews[sig] = desc - c.registeredViewsMu.Unlock() - count++ - } - } - if count == 0 { - return - } - - c.ensureRegisteredOnce() -} // ensureRegisteredOnce invokes reg.Register on the collector itself // exactly once to ensure that we don't get errors such as @@ -123,11 +96,8 @@ func (o *Options) onError(err error) { // corresponding Prometheus Metric: SumData will be converted // to Untyped Metric, CountData will be a Counter Metric, // DistributionData will be a Histogram Metric. +// Deprecated in lieu of metricexport.Reader interface. func (e *Exporter) ExportView(vd *view.Data) { - if len(vd.Rows) == 0 { - return - } - e.c.addViewData(vd) } // ServeHTTP serves the Prometheus endpoint. @@ -145,151 +115,164 @@ type collector struct { // reg helps collector register views dynamically. reg *prometheus.Registry - // viewData are accumulated and atomically - // appended to on every Export invocation, from - // stats. These views are cleared out when - // Collect is invoked and the cycle is repeated. - viewData map[string]*view.Data - - registeredViewsMu sync.Mutex - // registeredViews maps a view to a prometheus desc. - registeredViews map[string]*prometheus.Desc -} - -func (c *collector) addViewData(vd *view.Data) { - c.registerViews(vd.View) - sig := viewSignature(c.opts.Namespace, vd.View) - - c.mu.Lock() - c.viewData[sig] = vd - c.mu.Unlock() + // reader reads metrics from all registered producers. + reader *metricexport.Reader } func (c *collector) Describe(ch chan<- *prometheus.Desc) { - c.registeredViewsMu.Lock() - registered := make(map[string]*prometheus.Desc) - for k, desc := range c.registeredViews { - registered[k] = desc - } - c.registeredViewsMu.Unlock() - - for _, desc := range registered { - ch <- desc - } + de := &descExporter{c: c, descCh: ch} + c.reader.ReadAndExport(de) } // Collect fetches the statistics from OpenCensus // and delivers them as Prometheus Metrics. -// Collect is invoked everytime a prometheus.Gatherer is run +// Collect is invoked every time a prometheus.Gatherer is run // for example when the HTTP endpoint is invoked by Prometheus. func (c *collector) Collect(ch chan<- prometheus.Metric) { - // We need a copy of all the view data up until this point. - viewData := c.cloneViewData() - - for _, vd := range viewData { - sig := viewSignature(c.opts.Namespace, vd.View) - c.registeredViewsMu.Lock() - desc := c.registeredViews[sig] - c.registeredViewsMu.Unlock() - - for _, row := range vd.Rows { - metric, err := c.toMetric(desc, vd.View, row) - if err != nil { - c.opts.onError(err) - } else { - ch <- metric - } - } - } - + me := &metricExporter{c: c, metricCh: ch} + c.reader.ReadAndExport(me) } -func (c *collector) toMetric(desc *prometheus.Desc, v *view.View, row *view.Row) (prometheus.Metric, error) { - switch data := row.Data.(type) { - case *view.CountData: - return prometheus.NewConstMetric(desc, prometheus.CounterValue, float64(data.Value), tagValues(row.Tags, v.TagKeys)...) - - case *view.DistributionData: - points := make(map[float64]uint64) - // Histograms are cumulative in Prometheus. - // Get cumulative bucket counts. - cumCount := uint64(0) - for i, b := range v.Aggregation.Buckets { - cumCount += uint64(data.CountPerBucket[i]) - points[b] = cumCount - } - return prometheus.NewConstHistogram(desc, uint64(data.Count), data.Sum(), points, tagValues(row.Tags, v.TagKeys)...) +func newCollector(opts Options, registrar *prometheus.Registry) *collector { + return &collector{ + reg: registrar, + opts: opts, + reader: metricexport.NewReader()} +} - case *view.SumData: - return prometheus.NewConstMetric(desc, prometheus.UntypedValue, data.Value, tagValues(row.Tags, v.TagKeys)...) +func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { + return prometheus.NewDesc( + metricName(c.opts.Namespace, metric), + metric.Descriptor.Description, + toPromLabels(metric.Descriptor.LabelKeys), + c.opts.ConstLabels) +} - case *view.LastValueData: - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, data.Value, tagValues(row.Tags, v.TagKeys)...) +type metricExporter struct { + c *collector + metricCh chan<- prometheus.Metric +} - default: - return nil, fmt.Errorf("aggregation %T is not yet supported", v.Aggregation) +// ExportMetrics exports to the Prometheus. +// Each OpenCensus Metric will be converted to +// corresponding Prometheus Metric: +// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric, +// TypeCumulativeDistribution will be a Histogram Metric. +// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric +func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + for _, ts := range metric.TimeSeries { + tvs := toLabelValues(ts.LabelValues) + for _, point := range ts.Points { + metric, err := toPromMetric(desc, metric, point, tvs) + if err != nil { + me.c.opts.onError(err) + } else if metric != nil { + me.metricCh <- metric + } + } + } } + return nil } -func tagKeysToLabels(keys []tag.Key) (labels []string) { - for _, key := range keys { - labels = append(labels, internal.Sanitize(key.Name())) - } - return labels +type descExporter struct { + c *collector + descCh chan<- *prometheus.Desc } -func newCollector(opts Options, registrar *prometheus.Registry) *collector { - return &collector{ - reg: registrar, - opts: opts, - registeredViews: make(map[string]*prometheus.Desc), - viewData: make(map[string]*view.Data), +// ExportMetrics exports descriptor to the Prometheus. +// It is invoked when request to scrape descriptors is received. +func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + desc := me.c.toDesc(metric) + me.descCh <- desc } + return nil } -func tagValues(t []tag.Tag, expectedKeys []tag.Key) []string { - var values []string - // Add empty string for all missing keys in the tags map. - idx := 0 - for _, t := range t { - for t.Key != expectedKeys[idx] { - idx++ - values = append(values, "") - } - values = append(values, t.Value) - idx++ - } - for idx < len(expectedKeys) { - idx++ - values = append(values, "") +func toPromLabels(mls []string) (labels []string) { + for _, ml := range mls { + labels = append(labels, internal.Sanitize(ml)) } - return values + return labels } -func viewName(namespace string, v *view.View) string { +func metricName(namespace string, m *metricdata.Metric) string { var name string if namespace != "" { name = namespace + "_" } - return name + internal.Sanitize(v.Name) + return name + internal.Sanitize(m.Descriptor.Name) +} + +func toPromMetric( + desc *prometheus.Desc, + metric *metricdata.Metric, + point metricdata.Point, + labelValues []string) (prometheus.Metric, error) { + switch metric.Descriptor.Type { + case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...) + + case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64: + pv, err := toPromValue(point) + if err != nil { + return nil, err + } + return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...) + + case metricdata.TypeCumulativeDistribution: + switch v := point.Value.(type) { + case *metricdata.Distribution: + points := make(map[float64]uint64) + // Histograms are cumulative in Prometheus. + // Get cumulative bucket counts. + cumCount := uint64(0) + for i, b := range v.BucketOptions.Bounds { + cumCount += uint64(v.Buckets[i].Count) + points[b] = cumCount + } + return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...) + default: + return nil, typeMismatchError(point) + } + case metricdata.TypeSummary: + // TODO: [rghetia] add support for TypeSummary. + return nil, nil + default: + return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type) + } } -func viewSignature(namespace string, v *view.View) string { - var buf bytes.Buffer - buf.WriteString(viewName(namespace, v)) - for _, k := range v.TagKeys { - buf.WriteString("-" + k.Name()) +func toLabelValues(labelValues []metricdata.LabelValue) (values []string) { + for _, lv := range labelValues { + if lv.Present { + values = append(values, lv.Value) + } else { + values = append(values, "") + } } - return buf.String() + return values } -func (c *collector) cloneViewData() map[string]*view.Data { - c.mu.Lock() - defer c.mu.Unlock() +func typeMismatchError(point metricdata.Point) error { + return fmt.Errorf("point type %T does not match metric type", point) - viewDataCopy := make(map[string]*view.Data) - for sig, viewData := range c.viewData { - viewDataCopy[sig] = viewData +} + +func toPromValue(point metricdata.Point) (float64, error) { + switch v := point.Value.(type) { + case float64: + return v, nil + case int64: + return float64(v), nil + default: + return 0.0, typeMismatchError(point) } - return viewDataCopy } diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go index 4042e68b3..83fc90abb 100644 --- a/exporter/prometheus/prometheus_test.go +++ b/exporter/prometheus/prometheus_test.go @@ -16,12 +16,10 @@ package prometheus import ( "context" - "fmt" "io/ioutil" "net/http" "net/http/httptest" "strings" - "sync" "testing" "time" @@ -32,136 +30,6 @@ import ( "github.com/prometheus/client_golang/prometheus" ) -func newView(measureName string, agg *view.Aggregation) *view.View { - m := stats.Int64(measureName, "bytes", stats.UnitBytes) - return &view.View{ - Name: "foo", - Description: "bar", - Measure: m, - Aggregation: agg, - } -} - -func TestOnlyCumulativeWindowSupported(t *testing.T) { - // See Issue https://github.com/census-instrumentation/opencensus-go/issues/214. - count1 := &view.CountData{Value: 1} - lastValue1 := &view.LastValueData{Value: 56.7} - tests := []struct { - vds *view.Data - want int - }{ - 0: { - vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m1", view.Count()), - }, - want: 0, // no rows present - }, - 1: { - vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m2", view.Count()), - Rows: []*view.Row{ - {Data: count1}, - }, - }, - want: 1, - }, - 2: { - vds: &view.Data{ - View: newView("TestOnlyCumulativeWindowSupported/m3", view.LastValue()), - Rows: []*view.Row{ - {Data: lastValue1}, - }, - }, - want: 1, - }, - } - - for i, tt := range tests { - reg := prometheus.NewRegistry() - collector := newCollector(Options{}, reg) - collector.addViewData(tt.vds) - mm, err := reg.Gather() - if err != nil { - t.Errorf("#%d: Gather err: %v", i, err) - } - reg.Unregister(collector) - if got, want := len(mm), tt.want; got != want { - t.Errorf("#%d: got nil %v want nil %v", i, got, want) - } - } -} - -func TestCollectNonRacy(t *testing.T) { - // Despite enforcing the singleton, for this case we - // need an exporter hence won't be using NewExporter. - exp, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("NewExporter: %v", err) - } - collector := exp.c - - // Synchronize and make sure every goroutine has terminated before we exit - var waiter sync.WaitGroup - waiter.Add(3) - defer waiter.Wait() - - doneCh := make(chan bool) - // 1. Viewdata write routine at 700ns - go func() { - defer waiter.Done() - tick := time.NewTicker(700 * time.Nanosecond) - defer tick.Stop() - - defer func() { - close(doneCh) - }() - - for i := 0; i < 1e3; i++ { - count1 := &view.CountData{Value: 1} - vds := []*view.Data{ - {View: newView(fmt.Sprintf("TestCollectNonRacy/m2-%d", i), view.Count()), Rows: []*view.Row{{Data: count1}}}, - } - for _, v := range vds { - exp.ExportView(v) - } - <-tick.C - } - }() - - inMetricsChan := make(chan prometheus.Metric, 1000) - // 2. Simulating the Prometheus metrics consumption routine running at 900ns - go func() { - defer waiter.Done() - tick := time.NewTicker(900 * time.Nanosecond) - defer tick.Stop() - - for { - select { - case <-doneCh: - return - case <-inMetricsChan: - } - } - }() - - // 3. Collect/Read routine at 800ns - go func() { - defer waiter.Done() - tick := time.NewTicker(800 * time.Nanosecond) - defer tick.Stop() - - for { - select { - case <-doneCh: - return - case <-tick.C: - // Perform some collection here - collector.Collect(inMetricsChan) - } - } - }() -} - type mSlice []*stats.Int64Measure func (measures *mSlice) createAndAppend(name, desc, unit string) { @@ -187,7 +55,6 @@ func TestMetricsEndpointOutput(t *testing.T) { if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } - view.RegisterExporter(exporter) names := []string{"foo", "bar", "baz"} @@ -261,9 +128,6 @@ func TestCumulativenessFromHistograms(t *testing.T) { if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } - view.RegisterExporter(exporter) - reportPeriod := time.Millisecond - view.SetReportingPeriod(reportPeriod) m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless) v := &view.View{ @@ -282,7 +146,7 @@ func TestCumulativenessFromHistograms(t *testing.T) { defer view.Unregister(v) // Give the reporter ample time to process registration - <-time.After(10 * reportPeriod) + //<-time.After(10 * reportPeriod) values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12} // We want the results that look like this: @@ -315,7 +179,7 @@ func TestCumulativenessFromHistograms(t *testing.T) { stats.Record(ctx, ms...) // Give the recorder ample time to process recording - <-time.After(10 * reportPeriod) + //<-time.After(10 * reportPeriod) cst := httptest.NewServer(exporter) defer cst.Close() @@ -348,9 +212,6 @@ func TestHistogramUnorderedBucketBounds(t *testing.T) { if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } - view.RegisterExporter(exporter) - reportPeriod := time.Millisecond - view.SetReportingPeriod(reportPeriod) m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless) v := &view.View{ @@ -369,7 +230,7 @@ func TestHistogramUnorderedBucketBounds(t *testing.T) { defer view.Unregister(v) // Give the reporter ample time to process registration - <-time.After(10 * reportPeriod) + //<-time.After(10 * reportPeriod) values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12} // We want the results that look like this: @@ -402,7 +263,7 @@ func TestHistogramUnorderedBucketBounds(t *testing.T) { stats.Record(ctx, ms...) // Give the recorder ample time to process recording - <-time.After(10 * reportPeriod) + //<-time.After(10 * reportPeriod) cst := httptest.NewServer(exporter) defer cst.Close() @@ -442,8 +303,6 @@ func TestConstLabelsIncluded(t *testing.T) { if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) names := []string{"foo", "bar", "baz"} @@ -526,8 +385,6 @@ func TestViewMeasureWithoutTag(t *testing.T) { if err != nil { t.Fatalf("failed to create prometheus exporter: %v", err) } - view.RegisterExporter(exporter) - defer view.UnregisterExporter(exporter) m := stats.Int64("tests/foo", "foo", stats.UnitDimensionless) k1, _ := tag.NewKey("key/1") k2, _ := tag.NewKey("key/2") From 5d1b8afbde42a61198d10d147afec11355dbee8d Mon Sep 17 00:00:00 2001 From: rghetia Date: Mon, 15 Apr 2019 15:24:00 -0700 Subject: [PATCH 159/212] Add gauges example. (#1107) * Add gauges example. * use _ instead of / --- Makefile | 3 +- examples/gauges/README.md | 289 ++++++++++++++++++++++++++++++++++++++ examples/gauges/gauge.go | 199 ++++++++++++++++++++++++++ 3 files changed, 490 insertions(+), 1 deletion(-) create mode 100644 examples/gauges/README.md create mode 100644 examples/gauges/gauge.go diff --git a/Makefile b/Makefile index e2f2ed59e..457866cb1 100644 --- a/Makefile +++ b/Makefile @@ -15,6 +15,7 @@ EMBEDMD=embedmd # TODO decide if we need to change these names. TRACE_ID_LINT_EXCEPTION="type name will be used as trace.TraceID by other packages" TRACE_OPTION_LINT_EXCEPTION="type name will be used as trace.TraceOptions by other packages" +README_FILES := $(shell find . -name '*README.md' | sort | tr '\n' ' ') .DEFAULT_GOAL := fmt-lint-vet-embedmd-test @@ -79,7 +80,7 @@ vet: .PHONY: embedmd embedmd: - @EMBEDMDOUT=`$(EMBEDMD) -d README.md 2>&1`; \ + @EMBEDMDOUT=`$(EMBEDMD) -d $(README_FILES) 2>&1`; \ if [ "$$EMBEDMDOUT" ]; then \ echo "$(EMBEDMD) FAILED => embedmd the following files:\n"; \ echo "$$EMBEDMDOUT\n"; \ diff --git a/examples/gauges/README.md b/examples/gauges/README.md new file mode 100644 index 000000000..c96c97959 --- /dev/null +++ b/examples/gauges/README.md @@ -0,0 +1,289 @@ +# Gauges Example + + +Table of Contents +================= + +* [Gauges](#gauges) + * [Run the example](#run-the-example) + * [How to use gauges?](#how-to-use-gauge?) + * [Initialize metric registry](#initialize-metric-registry) + * [Create gauge metric](#create-gauge-metric) + * [Create gauge entry](#create-gauge-entry) + * [Set gauge value](#set-gauge-values) + * [Complete example](#complete-example) + +# Summary +[top](#Table-of-Contents) + +This example shows how to use gauge metrics. The program records two gauges. + +1. **process_heap_alloc (int64)**: Total bytes used by objects allocated in the heap. It includes objects currently used and objects that are freed but not garbage collected. +1. **process_heap_idle_to_alloc_ratio (float64)**: It is the ratio of Idle bytes to allocated bytes in the heap. + +It periodically runs a function that retrieves the memory stats and updates the above two metrics. These metrics are then exported using prometheus exporter. +Metrics can be viewed at [http://localhost:9090/metrcs](http://localhost:9090/metrcs) once the program is running. +The program lets you choose the amount of memory (in MB) to consume. Choose different values and query the metrics to see the change in metrics. + +## Run the example. + +``` +$ go get go.opencensus.io/examples/gauges/... +``` + +then: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/gauges/gauge.go +``` + +## How to use gauge? + +### Initialize Metric Registry +Create a new metric registry for all your metrics. +This step is a general step for any kind of metrics and not specific to gauges. +Register newly created registry with global producer manager. + +[embedmd]:# (gauge.go reg) +```go +r := metric.NewRegistry() +metricproducer.GlobalManager().AddProducer(r) +``` + + +### Create gauge metric +Create a gauge metric. In this example we have two metrics. + +**process_heap_alloc** + +[embedmd]:# (gauge.go alloc) +```go +allocGauge, err := r.AddInt64Gauge( + "process_heap_alloc", + metric.WithDescription("Process heap allocation"), + metric.WithUnit(metricdata.UnitBytes)) +if err != nil { + log.Fatalf("error creating heap allocation gauge, error%v\n", err) +} +``` + +**process_heap_idle_to_alloc_ratio** + +[embedmd]:# (gauge.go idle) +```go +ratioGauge, err := r.AddFloat64Gauge( + "process_heap_idle_to_alloc_ratio", + metric.WithDescription("process heap idle to allocate ratio"), + metric.WithUnit(metricdata.UnitDimensionless)) +if err != nil { + log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) +} +``` + +### Create gauge entry +Now, create or get a unique entry (equivalent of a row in a table) for a given set of tags. Since we are not using any tags in this example we only have one entry for each gauge metric. + +**entry for process_heap_alloc** + +[embedmd]:# (gauge.go entryAlloc) +```go +allocEntry, err = allocGauge.GetEntry() +if err != nil { + log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) +} +``` + +**entry for process_heap_idle_to_alloc_ratio** + +[embedmd]:# (gauge.go entryIdle) +```go +ratioEntry, err = ratioGauge.GetEntry() +if err != nil { + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) +} +``` + + +### Set gauge values +Use `Set` or `Add` function to update the value of gauge entries. You can call these methods anytime based on your metric and your application. In this example, `Set` is called periodically. + +[embedmd]:# (gauge.go record) +```go + allocEntry.Set(int64(getAlloc())) // int64 gauge + ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge +``` + +### Complete Example + +[embedmd]:# (gauge.go entire) +```go +package main + +import ( + "bufio" + "fmt" + "log" + "os" + "runtime" + "strconv" + "strings" + "time" + + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "net/http" +) + +var ( + mem = &runtime.MemStats{} +) + +type memObj struct { + size int + b []byte +} + +func newMemObj(size int) *memObj { + n := &memObj{size: size, b: make([]byte, size)} + for i := 0; i < n.size; i++ { + n.b[i] = byte(i) + } + return n +} + +var allocEntry *metric.Int64GaugeEntry +var ratioEntry *metric.Float64Entry +var arr []*memObj + +func getAlloc() uint64 { + runtime.ReadMemStats(mem) + return mem.HeapAlloc +} + +func getIdleToAllocRatio() float64 { + runtime.ReadMemStats(mem) + return float64(mem.HeapIdle) / float64(mem.HeapAlloc) +} + +func consumeMem(sizeMB int) { + arr = make([]*memObj, sizeMB) + for i := 0; i < sizeMB; i++ { + arr = append(arr, newMemObj(1000000)) + } +} + +func doSomeWork(sizeMB int) { + // do some work + consumeMem(sizeMB) +} + +func recordMetrics(delay int, done chan int) { + tick := time.NewTicker(time.Duration(delay) * time.Second) + for { + select { + case <-done: + return + case <-tick.C: + // record heap allocation and idle to allocation ratio. + allocEntry.Set(int64(getAlloc())) // int64 gauge + ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge + } + } +} + +func getInput() int { + reader := bufio.NewReader(os.Stdin) + limit := 50 + for { + fmt.Printf("Enter memory (in MB between 1-%d): ", limit) + text, _ := reader.ReadString('\n') + sizeMB, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) + if err == nil { + if sizeMB < 1 || sizeMB > limit { + fmt.Printf("invalid value %s\n", text) + continue + } + fmt.Printf("consuming %dMB\n", sizeMB) + return sizeMB + } + fmt.Printf("error %v\n", err) + } +} + +func work() { + fmt.Printf("Program periodically records following gauge metrics\n") + fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") + fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") + fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metris.\n") + + // Do some work and record gauge metrics. + for { + sizeMB := getInput() + doSomeWork(sizeMB) + fmt.Printf("press CTRL+C to terminate the program\n") + } +} + +func createAndStartExporter() { + // Create Prometheus metrics exporter to verify gauge metrics in this example. + exporter, err := prometheus.NewExporter(prometheus.Options{}) + if err != nil { + log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + } + http.Handle("/metrics", exporter) + go func() { + log.Fatal(http.ListenAndServe(":9090", nil)) + + }() +} + +func main() { + createAndStartExporter() + + // Create metric registry and register it with global producer manager. + r := metric.NewRegistry() + metricproducer.GlobalManager().AddProducer(r) + + // Create Int64Gauge to report memory usage of a process. + allocGauge, err := r.AddInt64Gauge( + "process_heap_alloc", + metric.WithDescription("Process heap allocation"), + metric.WithUnit(metricdata.UnitBytes)) + if err != nil { + log.Fatalf("error creating heap allocation gauge, error%v\n", err) + } + + allocEntry, err = allocGauge.GetEntry() + if err != nil { + log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) + } + + // Create Float64Gauge to report fractional cpu consumed by Garbage Collection. + ratioGauge, err := r.AddFloat64Gauge( + "process_heap_idle_to_alloc_ratio", + metric.WithDescription("process heap idle to allocate ratio"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) + } + + ratioEntry, err = ratioGauge.GetEntry() + if err != nil { + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) + } + + // record gauge metrics every 5 seconds. This example records the gauges periodically. However, + // depending on the application it can be non-periodic and can be recorded at any time. + done := make(chan int) + defer close(done) + go recordMetrics(1, done) + + // do your work. + work() + +} + +``` diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go new file mode 100644 index 000000000..cb2b31974 --- /dev/null +++ b/examples/gauges/gauge.go @@ -0,0 +1,199 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Command stats implements the stats Quick Start example from: +// https://opencensus.io/quickstart/go/metrics/ +// START entire +package main + +import ( + "bufio" + "fmt" + "log" + "os" + "runtime" + "strconv" + "strings" + "time" + + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "net/http" +) + +var ( + mem = &runtime.MemStats{} +) + +type memObj struct { + size int + b []byte +} + +func newMemObj(size int) *memObj { + n := &memObj{size: size, b: make([]byte, size)} + for i := 0; i < n.size; i++ { + n.b[i] = byte(i) + } + return n +} + +var allocEntry *metric.Int64GaugeEntry +var ratioEntry *metric.Float64Entry +var arr []*memObj + +func getAlloc() uint64 { + runtime.ReadMemStats(mem) + return mem.HeapAlloc +} + +func getIdleToAllocRatio() float64 { + runtime.ReadMemStats(mem) + return float64(mem.HeapIdle) / float64(mem.HeapAlloc) +} + +func consumeMem(sizeMB int) { + arr = make([]*memObj, sizeMB) + for i := 0; i < sizeMB; i++ { + arr = append(arr, newMemObj(1000000)) + } +} + +func doSomeWork(sizeMB int) { + // do some work + consumeMem(sizeMB) +} + +func recordMetrics(delay int, done chan int) { + tick := time.NewTicker(time.Duration(delay) * time.Second) + for { + select { + case <-done: + return + case <-tick.C: + // record heap allocation and idle to allocation ratio. + // START record + allocEntry.Set(int64(getAlloc())) // int64 gauge + ratioEntry.Set(getIdleToAllocRatio()) // float64 gauge + // END record + } + } +} + +func getInput() int { + reader := bufio.NewReader(os.Stdin) + limit := 50 + for { + fmt.Printf("Enter memory (in MB between 1-%d): ", limit) + text, _ := reader.ReadString('\n') + sizeMB, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) + if err == nil { + if sizeMB < 1 || sizeMB > limit { + fmt.Printf("invalid value %s\n", text) + continue + } + fmt.Printf("consuming %dMB\n", sizeMB) + return sizeMB + } + fmt.Printf("error %v\n", err) + } +} + +func work() { + fmt.Printf("Program periodically records following gauge metrics\n") + fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") + fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") + fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metris.\n") + + // Do some work and record gauge metrics. + for { + sizeMB := getInput() + doSomeWork(sizeMB) + fmt.Printf("press CTRL+C to terminate the program\n") + } +} + +func createAndStartExporter() { + // Create Prometheus metrics exporter to verify gauge metrics in this example. + exporter, err := prometheus.NewExporter(prometheus.Options{}) + if err != nil { + log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + } + http.Handle("/metrics", exporter) + go func() { + log.Fatal(http.ListenAndServe(":9090", nil)) + + }() +} + +func main() { + createAndStartExporter() + + // Create metric registry and register it with global producer manager. + // START reg + r := metric.NewRegistry() + metricproducer.GlobalManager().AddProducer(r) + // END reg + + // Create Int64Gauge to report memory usage of a process. + // START alloc + allocGauge, err := r.AddInt64Gauge( + "process_heap_alloc", + metric.WithDescription("Process heap allocation"), + metric.WithUnit(metricdata.UnitBytes)) + if err != nil { + log.Fatalf("error creating heap allocation gauge, error%v\n", err) + } + // END alloc + + // START entryAlloc + allocEntry, err = allocGauge.GetEntry() + if err != nil { + log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) + } + // END entryAlloc + + // Create Float64Gauge to report fractional cpu consumed by Garbage Collection. + // START idle + ratioGauge, err := r.AddFloat64Gauge( + "process_heap_idle_to_alloc_ratio", + metric.WithDescription("process heap idle to allocate ratio"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) + } + // END idle + + // START entryIdle + ratioEntry, err = ratioGauge.GetEntry() + if err != nil { + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) + } + // END entryIdle + + // record gauge metrics every 5 seconds. This example records the gauges periodically. However, + // depending on the application it can be non-periodic and can be recorded at any time. + done := make(chan int) + defer close(done) + go recordMetrics(1, done) + + // do your work. + work() + +} + +// END entire From a209e5872e89ea675c5e1f46fe3e7a94876c82e3 Mon Sep 17 00:00:00 2001 From: Anand Desai Date: Mon, 15 Apr 2019 16:18:41 -0700 Subject: [PATCH 160/212] Remove extraneous err check. (#1109) Found using nogo static checker. --- tag/map_codec.go | 3 --- 1 file changed, 3 deletions(-) diff --git a/tag/map_codec.go b/tag/map_codec.go index e88e72777..c14c7f6db 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -229,9 +229,6 @@ func DecodeEach(bytes []byte, fn func(key Key, val string)) error { return errInvalidValue } fn(key, val) - if err != nil { - return err - } } return nil } From 1c036dfa995bd1204e960d294380056a040b3ddf Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 16 Apr 2019 10:47:04 -0700 Subject: [PATCH 161/212] Fix TOC and some typos. (#1111) --- examples/gauges/README.md | 28 ++++++++++++++-------------- examples/gauges/gauge.go | 4 ++-- 2 files changed, 16 insertions(+), 16 deletions(-) diff --git a/examples/gauges/README.md b/examples/gauges/README.md index c96c97959..03650f885 100644 --- a/examples/gauges/README.md +++ b/examples/gauges/README.md @@ -4,16 +4,16 @@ Table of Contents ================= -* [Gauges](#gauges) - * [Run the example](#run-the-example) - * [How to use gauges?](#how-to-use-gauge?) - * [Initialize metric registry](#initialize-metric-registry) - * [Create gauge metric](#create-gauge-metric) - * [Create gauge entry](#create-gauge-entry) - * [Set gauge value](#set-gauge-values) - * [Complete example](#complete-example) - -# Summary +- [Summary](#summary) +- [Run the example](#run-the-example) +- [How to use gauges?](#how-to-use-gauges-) + * [Initialize Metric Registry](#initialize-metric-registry) + * [Create gauge metric](#create-gauge-metric) + * [Create gauge entry](#create-gauge-entry) + * [Set gauge values](#set-gauge-values) + * [Complete Example](#complete-example) + +## Summary [top](#Table-of-Contents) This example shows how to use gauge metrics. The program records two gauges. @@ -25,7 +25,7 @@ It periodically runs a function that retrieves the memory stats and updates the Metrics can be viewed at [http://localhost:9090/metrcs](http://localhost:9090/metrcs) once the program is running. The program lets you choose the amount of memory (in MB) to consume. Choose different values and query the metrics to see the change in metrics. -## Run the example. +## Run the example ``` $ go get go.opencensus.io/examples/gauges/... @@ -37,7 +37,7 @@ then: $ go run $(go env GOPATH)/src/go.opencensus.io/examples/gauges/gauge.go ``` -## How to use gauge? +## How to use gauges? ### Initialize Metric Registry Create a new metric registry for all your metrics. @@ -213,11 +213,11 @@ func getInput() int { } func work() { - fmt.Printf("Program periodically records following gauge metrics\n") + fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") - fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metris.\n") + fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. for { diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go index cb2b31974..99ae9090e 100644 --- a/examples/gauges/gauge.go +++ b/examples/gauges/gauge.go @@ -113,11 +113,11 @@ func getInput() int { } func work() { - fmt.Printf("Program periodically records following gauge metrics\n") + fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") - fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metris.\n") + fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. for { From a7c47d30fa4a933d83c39d7a5c66c9d2f712c96b Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 16 Apr 2019 13:32:33 -0700 Subject: [PATCH 162/212] Add derived gauge example. (#1110) * Add derived gauge example. * fix fmt error and unreachable code error. * fix typos. --- examples/derived_gauges/README.md | 348 +++++++++++++++++++++++ examples/derived_gauges/derived_gauge.go | 237 +++++++++++++++ 2 files changed, 585 insertions(+) create mode 100644 examples/derived_gauges/README.md create mode 100644 examples/derived_gauges/derived_gauge.go diff --git a/examples/derived_gauges/README.md b/examples/derived_gauges/README.md new file mode 100644 index 000000000..3315d3847 --- /dev/null +++ b/examples/derived_gauges/README.md @@ -0,0 +1,348 @@ +# Derived Gauge Example + +Table of Contents +================= +- [Summary](#summary) +- [Run the example](#run-the-example) +- [How to use derived gauges?](#how-to-use-derived-gauges-) + * [Initialize Metric Registry](#initialize-metric-registry) + * [Create derived gauge metric](#create-derived-gauge-metric) + * [Create derived gauge entry](#create-derived-gauge-entry) + * [Implement derived gauge interface](#implement-derived-gauge-interface) + * [Complete Example](#complete-example) + + + +## Summary +[top](#Table-of-Contents) + +This example demonstrates the use of derived gauges. It is a simple interactive program of consumer +and producer. User can input number of items to produce. Producer produces specified number of +items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly +between 1-10 seconds before the next attempt. + +There are two metrics collected to monitor the queue. +1. **queue_size**: It is an instantaneous queue size represented using derived gauge int64. +1. **queue_seconds_since_processed_last**: It is the time elaspsed in seconds since the last time + when the queue was consumed. It is represented using derived gauge float64. +This example shows how to use gauge metrics. The program records two gauges. + +These metrics are read when exporter scrapes them. In this example prometheus exporter is used to +scrape the data. Metrics can be viewed at [http://localhost:9090/metrics](http://localhost:9090/metrics) once the program is running. + +Enter different value for number of items to queue and fetch the metrics using above url to see the variation in the metrics. + +## Run the example + +``` +$ go get go.opencensus.io/examples/derived_gauges/... +``` + +then: + +``` +$ go run $(go env GOPATH)/src/go.opencensus.io/examples/derived_gauges/derived_gauge.go +``` + +## How to use derived gauges? + +### Initialize Metric Registry +Create a new metric registry for all your metrics. +This step is a general step for any kind of metrics and not specific to gauges. +Register newly created registry with global producer manager. + +[embedmd]:# (derived_gauge.go reg) +```go +r := metric.NewRegistry() +metricproducer.GlobalManager().AddProducer(r) +``` + + +### Create derived gauge metric +Create a gauge metric. In this example we have two metrics. + +**queue_size** + +[embedmd]:# (derived_gauge.go size) +```go +queueSizeGauge, err := r.AddInt64DerivedGauge( + "queue_size", + metric.WithDescription("Instantaneous queue size"), + metric.WithUnit(metricdata.UnitDimensionless)) +if err != nil { + log.Fatalf("error creating queue size derived gauge, error %v\n", err) +} +``` + +**queue_seconds_since_processed_last** + +[embedmd]:# (derived_gauge.go elapsed) +```go +elapsedSeconds, err := r.AddFloat64DerivedGauge( + "queue_seconds_since_processed_last", + metric.WithDescription("time elapsed since last time the queue was processed"), + metric.WithUnit(metricdata.UnitDimensionless)) +if err != nil { + log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) +} +``` + +### Create derived gauge entry +Now, create or insert a unique entry an interface `ToInt64` for a given set of tags. Since we are not using any tags in this example we only insert one entry for each derived gauge metric. + +**insert interface for queue_size** + +[embedmd]:# (derived_gauge.go entrySize) +```go +err = queueSizeGauge.UpsertEntry(q.Size) +if err != nil { + log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) +} +``` + +**insert interface for queue_seconds_since_processed_lasto** + +[embedmd]:# (derived_gauge.go entryElapsed) +```go +err = elapsedSeconds.UpsertEntry(q.Elapsed) +if err != nil { + log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) +} +``` + + +### Implement derived gauge interface +In order for metrics reader to read the value of your dervied gauge it must +implement ToFloat64 or ToInt64 + +[embedmd]:# (derived_gauge.go toint64) +```go +func (q *queue) Size() int64 { + q.mu.Lock() + defer q.mu.Unlock() + return int64(q.size) +} + +``` + +[embedmd]:# (derived_gauge.go tofloat64) +```go +func (q *queue) Elapsed() float64 { + q.mu.Lock() + defer q.mu.Unlock() + return time.Now().Sub(q.lastConsumed).Seconds() +} + +``` + + +### Complete Example + +[embedmd]:# (derived_gauge.go entire) +```go +package main + +import ( + "fmt" + "log" + "math/rand" + "sync" + "time" + + "bufio" + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "net/http" + "os" + "strconv" + "strings" +) + +// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer +// and producer. User can input number of items to produce. Producer produces specified number of +// items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly +// between 1-10 seconds before the next attempt. +// +// There are two metrics collected to monitor the queue. +// 1. queue_size: It is an instantaneous queue size represented using derived gauge int64. +// 2. queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time +// when the queue was consumed. It is represented using derived gauge float64. +type queue struct { + size int + q []int + lastConsumed time.Time + mu sync.Mutex +} + +var q = &queue{} + +const ( + maxItemsToConsumePerAttempt = 25 +) + +func init() { + q.q = make([]int, 100) +} + +// consume randomly dequeues upto 5 items from the queue +func (q *queue) consume() { + q.mu.Lock() + defer q.mu.Unlock() + + consumeCount := rand.Int() % maxItemsToConsumePerAttempt + i := 0 + for i = 0; i < consumeCount; i++ { + if q.size > 0 { + q.q = q.q[1:] + q.size-- + } else { + break + } + } + if i > 0 { + q.lastConsumed = time.Now() + } +} + +// produce randomly enqueues upto 5 items from the queue +func (q *queue) produce(count int) { + q.mu.Lock() + defer q.mu.Unlock() + + for i := 0; i < count; i++ { + v := rand.Int() % 100 + q.q = append(q.q, v) + q.size++ + } + fmt.Printf("queued %d items, queue size is %d\n", count, q.size) +} + +func (q *queue) runConsumer(interval int, cQuit chan bool) { + t := time.NewTicker(time.Duration(interval) * time.Second) + for { + select { + case <-t.C: + q.consume() + case <-cQuit: + t.Stop() + return + } + } +} + +// Size reports instantaneous queue size. +// This is the interface supplied while creating an entry for derived gauge int64. +func (q *queue) Size() int64 { + q.mu.Lock() + defer q.mu.Unlock() + return int64(q.size) +} + + +// Elapsed reports time elapsed since the last time an item was consumed from the queue. +// This is the interface supplied while creating an entry for derived gauge float64. +func (q *queue) Elapsed() float64 { + q.mu.Lock() + defer q.mu.Unlock() + return time.Now().Sub(q.lastConsumed).Seconds() +} + + +func getInput() int { + reader := bufio.NewReader(os.Stdin) + limit := 100 + for { + fmt.Printf("Enter number of items to put in consumer queue? [1-%d]: ", limit) + text, _ := reader.ReadString('\n') + count, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) + if err == nil { + if count < 1 || count > limit { + fmt.Printf("invalid value %s\n", text) + continue + } + return count + } + fmt.Printf("error %v\n", err) + } +} + +func doWork() { + fmt.Printf("Program monitors queue using two derived gauge metrics.\n") + fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") + fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") + fmt.Printf("Go to http://localhost:9090/metrics to see the metrics.\n\n\n") + + // Take a number of items to queue as an input from the user + // and enqueue the same number of items on to the consumer queue. + for { + count := getInput() + q.produce(count) + fmt.Printf("press CTRL+C to terminate the program\n") + } +} + +func createAndStartExporter() { + // Create Prometheus metrics exporter to verify derived gauge metrics in this example. + exporter, err := prometheus.NewExporter(prometheus.Options{}) + if err != nil { + log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + } + http.Handle("/metrics", exporter) + go func() { + log.Fatal(http.ListenAndServe(":9090", nil)) + + }() +} + +func main() { + createAndStartExporter() + + // Create metric registry and register it with global producer manager. + r := metric.NewRegistry() + metricproducer.GlobalManager().AddProducer(r) + + // Create Int64DerviedGauge + queueSizeGauge, err := r.AddInt64DerivedGauge( + "queue_size", + metric.WithDescription("Instantaneous queue size"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating queue size derived gauge, error %v\n", err) + } + + err = queueSizeGauge.UpsertEntry(q.Size) + if err != nil { + log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) + } + + // Create Float64DerviedGauge + elapsedSeconds, err := r.AddFloat64DerivedGauge( + "queue_seconds_since_processed_last", + metric.WithDescription("time elapsed since last time the queue was processed"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) + } + + err = elapsedSeconds.UpsertEntry(q.Elapsed) + if err != nil { + log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) + } + + cQuit := make(chan bool) + defer func() { + cQuit <- true + close(cQuit) + }() + + // Run consumer and producer + go q.runConsumer(5, cQuit) + + for { + doWork() + } +} + +``` diff --git a/examples/derived_gauges/derived_gauge.go b/examples/derived_gauges/derived_gauge.go new file mode 100644 index 000000000..ea3b81e89 --- /dev/null +++ b/examples/derived_gauges/derived_gauge.go @@ -0,0 +1,237 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Command stats implements the stats Quick Start example from: +// https://opencensus.io/quickstart/go/metrics/ +// START entire +package main + +import ( + "fmt" + "log" + "math/rand" + "sync" + "time" + + "bufio" + "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "net/http" + "os" + "strconv" + "strings" +) + +// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer +// and producer. User can input number of items to produce. Producer produces specified number of +// items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly +// between 1-10 seconds before the next attempt. +// +// There are two metrics collected to monitor the queue. +// 1. queue_size: It is an instantaneous queue size represented using derived gauge int64. +// 2. queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time +// when the queue was consumed. It is represented using derived gauge float64. +type queue struct { + size int + q []int + lastConsumed time.Time + mu sync.Mutex +} + +var q = &queue{} + +const ( + maxItemsToConsumePerAttempt = 25 +) + +func init() { + q.q = make([]int, 100) +} + +// consume randomly dequeues upto 5 items from the queue +func (q *queue) consume() { + q.mu.Lock() + defer q.mu.Unlock() + + consumeCount := rand.Int() % maxItemsToConsumePerAttempt + i := 0 + for i = 0; i < consumeCount; i++ { + if q.size > 0 { + q.q = q.q[1:] + q.size-- + } else { + break + } + } + if i > 0 { + q.lastConsumed = time.Now() + } +} + +// produce randomly enqueues upto 5 items from the queue +func (q *queue) produce(count int) { + q.mu.Lock() + defer q.mu.Unlock() + + for i := 0; i < count; i++ { + v := rand.Int() % 100 + q.q = append(q.q, v) + q.size++ + } + fmt.Printf("queued %d items, queue size is %d\n", count, q.size) +} + +func (q *queue) runConsumer(interval int, cQuit chan bool) { + t := time.NewTicker(time.Duration(interval) * time.Second) + for { + select { + case <-t.C: + q.consume() + case <-cQuit: + t.Stop() + return + } + } +} + +// Size reports instantaneous queue size. +// This is the interface supplied while creating an entry for derived gauge int64. +// START toint64 +func (q *queue) Size() int64 { + q.mu.Lock() + defer q.mu.Unlock() + return int64(q.size) +} + +// END toint64 + +// Elapsed reports time elapsed since the last time an item was consumed from the queue. +// This is the interface supplied while creating an entry for derived gauge float64. +// START tofloat64 +func (q *queue) Elapsed() float64 { + q.mu.Lock() + defer q.mu.Unlock() + return time.Now().Sub(q.lastConsumed).Seconds() +} + +// END tofloat64 + +func getInput() int { + reader := bufio.NewReader(os.Stdin) + limit := 100 + for { + fmt.Printf("Enter number of items to put in consumer queue? [1-%d]: ", limit) + text, _ := reader.ReadString('\n') + count, err := strconv.Atoi(strings.TrimSuffix(text, "\n")) + if err == nil { + if count < 1 || count > limit { + fmt.Printf("invalid value %s\n", text) + continue + } + return count + } + fmt.Printf("error %v\n", err) + } +} + +func doWork() { + fmt.Printf("Program monitors queue using two derived gauge metrics.\n") + fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") + fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") + fmt.Printf("Go to http://localhost:9090/metrics to see the metrics.\n\n\n") + + // Take a number of items to queue as an input from the user + // and enqueue the same number of items on to the consumer queue. + for { + count := getInput() + q.produce(count) + fmt.Printf("press CTRL+C to terminate the program\n") + } +} + +func createAndStartExporter() { + // Create Prometheus metrics exporter to verify derived gauge metrics in this example. + exporter, err := prometheus.NewExporter(prometheus.Options{}) + if err != nil { + log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + } + http.Handle("/metrics", exporter) + go func() { + log.Fatal(http.ListenAndServe(":9090", nil)) + + }() +} + +func main() { + createAndStartExporter() + + // Create metric registry and register it with global producer manager. + // START reg + r := metric.NewRegistry() + metricproducer.GlobalManager().AddProducer(r) + // END reg + + // Create Int64DerviedGauge + // START size + queueSizeGauge, err := r.AddInt64DerivedGauge( + "queue_size", + metric.WithDescription("Instantaneous queue size"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating queue size derived gauge, error %v\n", err) + } + // END size + + // START entrySize + err = queueSizeGauge.UpsertEntry(q.Size) + if err != nil { + log.Fatalf("error getting queue size derived gauge entry, error %v\n", err) + } + // END entrySize + + // Create Float64DerviedGauge + // START elapsed + elapsedSeconds, err := r.AddFloat64DerivedGauge( + "queue_seconds_since_processed_last", + metric.WithDescription("time elapsed since last time the queue was processed"), + metric.WithUnit(metricdata.UnitDimensionless)) + if err != nil { + log.Fatalf("error creating queue_seconds_since_processed_last derived gauge, error %v\n", err) + } + // END elapsed + + // START entryElapsed + err = elapsedSeconds.UpsertEntry(q.Elapsed) + if err != nil { + log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) + } + // END entryElapsed + + cQuit := make(chan bool) + defer func() { + cQuit <- true + close(cQuit) + }() + + // Run consumer and producer + go q.runConsumer(5, cQuit) + + for { + doWork() + } +} + +// END entire From 6bebf5675638c2b62e7e1085de9d939440a00b3c Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 17 Apr 2019 20:22:21 -0700 Subject: [PATCH 163/212] Fix comment received for #1110 after it was merged. (#1113) --- examples/derived_gauges/README.md | 47 +++++++++++----------- examples/derived_gauges/derived_gauge.go | 50 ++++++++++++------------ examples/gauges/README.md | 18 ++++++++- examples/gauges/gauge.go | 20 ++++++++-- 4 files changed, 85 insertions(+), 50 deletions(-) diff --git a/examples/derived_gauges/README.md b/examples/derived_gauges/README.md index 3315d3847..9dda6809c 100644 --- a/examples/derived_gauges/README.md +++ b/examples/derived_gauges/README.md @@ -140,40 +140,44 @@ func (q *queue) Elapsed() float64 { [embedmd]:# (derived_gauge.go entire) ```go + +// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer +// and producer. User can input number of items to produce. Producer produces specified number of +// items. Consumer randomly consumes 1-5 items in each attempt. It then sleeps randomly +// between 1-10 seconds before the next attempt. Two metrics collected to monitor the queue. +// +// Metrics +// +// * queue_size: It is an instantaneous queue size represented using derived gauge int64. +// +// * queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time +// when the queue was consumed. It is represented using derived gauge float64. package main import ( + "bufio" "fmt" "log" "math/rand" + "net/http" + "os" + "strconv" + "strings" "sync" "time" - "bufio" "go.opencensus.io/exporter/prometheus" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" - "net/http" - "os" - "strconv" - "strings" ) -// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer -// and producer. User can input number of items to produce. Producer produces specified number of -// items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly -// between 1-10 seconds before the next attempt. -// -// There are two metrics collected to monitor the queue. -// 1. queue_size: It is an instantaneous queue size represented using derived gauge int64. -// 2. queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time -// when the queue was consumed. It is represented using derived gauge float64. type queue struct { size int - q []int lastConsumed time.Time - mu sync.Mutex + + mu sync.Mutex + q []int } var q = &queue{} @@ -219,8 +223,8 @@ func (q *queue) produce(count int) { fmt.Printf("queued %d items, queue size is %d\n", count, q.size) } -func (q *queue) runConsumer(interval int, cQuit chan bool) { - t := time.NewTicker(time.Duration(interval) * time.Second) +func (q *queue) runConsumer(interval time.Duration, cQuit chan bool) { + t := time.NewTicker(interval) for { select { case <-t.C: @@ -331,14 +335,13 @@ func main() { log.Fatalf("error getting queue_seconds_since_processed_last derived gauge entry, error %v\n", err) } - cQuit := make(chan bool) + quit := make(chan bool) defer func() { - cQuit <- true - close(cQuit) + close(quit) }() // Run consumer and producer - go q.runConsumer(5, cQuit) + go q.runConsumer(5*time.Second, quit) for { doWork() diff --git a/examples/derived_gauges/derived_gauge.go b/examples/derived_gauges/derived_gauge.go index ea3b81e89..97f8de474 100644 --- a/examples/derived_gauges/derived_gauge.go +++ b/examples/derived_gauges/derived_gauge.go @@ -11,44 +11,47 @@ // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. +// -// Command stats implements the stats Quick Start example from: -// https://opencensus.io/quickstart/go/metrics/ // START entire + +// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer +// and producer. User can input number of items to produce. Producer produces specified number of +// items. Consumer randomly consumes 1-5 items in each attempt. It then sleeps randomly +// between 1-10 seconds before the next attempt. Two metrics collected to monitor the queue. +// +// Metrics +// +// * queue_size: It is an instantaneous queue size represented using derived gauge int64. +// +// * queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time +// when the queue was consumed. It is represented using derived gauge float64. package main import ( + "bufio" "fmt" "log" "math/rand" + "net/http" + "os" + "strconv" + "strings" "sync" "time" - "bufio" "go.opencensus.io/exporter/prometheus" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" - "net/http" - "os" - "strconv" - "strings" ) -// This example demonstrates the use of derived gauges. It is a simple interactive program of consumer -// and producer. User can input number of items to produce. Producer produces specified number of -// items. Consumer consumes randomly consumes 1-5 items in each attempt. It then sleeps randomly -// between 1-10 seconds before the next attempt. -// -// There are two metrics collected to monitor the queue. -// 1. queue_size: It is an instantaneous queue size represented using derived gauge int64. -// 2. queue_seconds_since_processed_last: It is the time elaspsed in seconds since the last time -// when the queue was consumed. It is represented using derived gauge float64. type queue struct { size int - q []int lastConsumed time.Time - mu sync.Mutex + + mu sync.Mutex + q []int } var q = &queue{} @@ -94,8 +97,8 @@ func (q *queue) produce(count int) { fmt.Printf("queued %d items, queue size is %d\n", count, q.size) } -func (q *queue) runConsumer(interval int, cQuit chan bool) { - t := time.NewTicker(time.Duration(interval) * time.Second) +func (q *queue) runConsumer(interval time.Duration, cQuit chan bool) { + t := time.NewTicker(interval) for { select { case <-t.C: @@ -220,14 +223,13 @@ func main() { } // END entryElapsed - cQuit := make(chan bool) + quit := make(chan bool) defer func() { - cQuit <- true - close(cQuit) + close(quit) }() // Run consumer and producer - go q.runConsumer(5, cQuit) + go q.runConsumer(5*time.Second, quit) for { doWork() diff --git a/examples/gauges/README.md b/examples/gauges/README.md index 03650f885..5ae3d7fc1 100644 --- a/examples/gauges/README.md +++ b/examples/gauges/README.md @@ -117,12 +117,29 @@ Use `Set` or `Add` function to update the value of gauge entries. You can call t [embedmd]:# (gauge.go entire) ```go + +// This example shows how to use gauge metrics. The program records two gauges, one to demonstrate +// a gauge with int64 value and the other to demonstrate a gauge with float64 value. +// +// Metrics +// +// 1. process_heap_alloc (int64): Total bytes used by objects allocated in the heap. +// It includes objects currently used and objects that are freed but not garbage collected. +// +// 2. process_heap_idle_to_alloc_ratio (float64): It is the ratio of Idle bytes to allocated +// bytes in the heap. +// +// It periodically runs a function that retrieves the memory stats and updates the above two +// metrics. These metrics are then exported using prometheus exporter. +// The program lets you choose the amount of memory (in MB) to consume. Choose different values +// and query the metrics to see the change in metrics. package main import ( "bufio" "fmt" "log" + "net/http" "os" "runtime" "strconv" @@ -133,7 +150,6 @@ import ( "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" - "net/http" ) var ( diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go index 99ae9090e..287d0b202 100644 --- a/examples/gauges/gauge.go +++ b/examples/gauges/gauge.go @@ -12,15 +12,30 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Command stats implements the stats Quick Start example from: -// https://opencensus.io/quickstart/go/metrics/ // START entire + +// This example shows how to use gauge metrics. The program records two gauges, one to demonstrate +// a gauge with int64 value and the other to demonstrate a gauge with float64 value. +// +// Metrics +// +// 1. process_heap_alloc (int64): Total bytes used by objects allocated in the heap. +// It includes objects currently used and objects that are freed but not garbage collected. +// +// 2. process_heap_idle_to_alloc_ratio (float64): It is the ratio of Idle bytes to allocated +// bytes in the heap. +// +// It periodically runs a function that retrieves the memory stats and updates the above two +// metrics. These metrics are then exported using prometheus exporter. +// The program lets you choose the amount of memory (in MB) to consume. Choose different values +// and query the metrics to see the change in metrics. package main import ( "bufio" "fmt" "log" + "net/http" "os" "runtime" "strconv" @@ -31,7 +46,6 @@ import ( "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" - "net/http" ) var ( From c82de269c502c4fba8e9d85d313cf900185551a9 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 18 Apr 2019 14:37:29 -0700 Subject: [PATCH 164/212] add space between error and %v in gauge example. (#1116) --- examples/gauges/README.md | 16 ++++++++-------- examples/gauges/gauge.go | 8 ++++---- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/examples/gauges/README.md b/examples/gauges/README.md index 5ae3d7fc1..80ed0396b 100644 --- a/examples/gauges/README.md +++ b/examples/gauges/README.md @@ -63,7 +63,7 @@ allocGauge, err := r.AddInt64Gauge( metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { - log.Fatalf("error creating heap allocation gauge, error%v\n", err) + log.Fatalf("error creating heap allocation gauge, error %v\n", err) } ``` @@ -76,7 +76,7 @@ ratioGauge, err := r.AddFloat64Gauge( metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { - log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) + log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } ``` @@ -89,7 +89,7 @@ Now, create or get a unique entry (equivalent of a row in a table) for a given s ```go allocEntry, err = allocGauge.GetEntry() if err != nil { - log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) + log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } ``` @@ -99,7 +99,7 @@ if err != nil { ```go ratioEntry, err = ratioGauge.GetEntry() if err != nil { - log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } ``` @@ -269,12 +269,12 @@ func main() { metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { - log.Fatalf("error creating heap allocation gauge, error%v\n", err) + log.Fatalf("error creating heap allocation gauge, error %v\n", err) } allocEntry, err = allocGauge.GetEntry() if err != nil { - log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) + log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } // Create Float64Gauge to report fractional cpu consumed by Garbage Collection. @@ -283,12 +283,12 @@ func main() { metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { - log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) + log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } ratioEntry, err = ratioGauge.GetEntry() if err != nil { - log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } // record gauge metrics every 5 seconds. This example records the gauges periodically. However, diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go index 287d0b202..2b744e79d 100644 --- a/examples/gauges/gauge.go +++ b/examples/gauges/gauge.go @@ -170,14 +170,14 @@ func main() { metric.WithDescription("Process heap allocation"), metric.WithUnit(metricdata.UnitBytes)) if err != nil { - log.Fatalf("error creating heap allocation gauge, error%v\n", err) + log.Fatalf("error creating heap allocation gauge, error %v\n", err) } // END alloc // START entryAlloc allocEntry, err = allocGauge.GetEntry() if err != nil { - log.Fatalf("error getting heap allocation gauge entry, error%v\n", err) + log.Fatalf("error getting heap allocation gauge entry, error %v\n", err) } // END entryAlloc @@ -188,14 +188,14 @@ func main() { metric.WithDescription("process heap idle to allocate ratio"), metric.WithUnit(metricdata.UnitDimensionless)) if err != nil { - log.Fatalf("error creating process heap idle to allocate ratio gauge, error%v\n", err) + log.Fatalf("error creating process heap idle to allocate ratio gauge, error %v\n", err) } // END idle // START entryIdle ratioEntry, err = ratioGauge.GetEntry() if err != nil { - log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error%v\n", err) + log.Fatalf("error getting process heap idle to allocate ratio gauge entry, error %v\n", err) } // END entryIdle From 4651f24ee1b45bd729a51adb0512c6c6ab8abe24 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 18 Apr 2019 14:38:05 -0700 Subject: [PATCH 165/212] Add description field to LabelKey (#1114) * Add description field to LabelKey * keep description as "" when not specified. --- exporter/prometheus/prometheus.go | 4 +-- metric/common.go | 2 +- metric/cumulative_test.go | 9 ++++--- metric/gauge_test.go | 41 +++++++++++++++++++++++++------ metric/metricdata/label.go | 7 ++++++ metric/metricdata/metric.go | 10 ++++---- metric/registry.go | 15 +++++++++-- stats/view/view_to_metric.go | 10 ++++---- stats/view/view_to_metric_test.go | 7 ++++-- 9 files changed, 78 insertions(+), 27 deletions(-) diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go index 53ff6ba6e..9fbffb306 100644 --- a/exporter/prometheus/prometheus.go +++ b/exporter/prometheus/prometheus.go @@ -192,9 +192,9 @@ func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata return nil } -func toPromLabels(mls []string) (labels []string) { +func toPromLabels(mls []metricdata.LabelKey) (labels []string) { for _, ml := range mls { - labels = append(labels, internal.Sanitize(ml)) + labels = append(labels, internal.Sanitize(ml.Key)) } return labels } diff --git a/metric/common.go b/metric/common.go index c370f7b52..f5716c9f5 100644 --- a/metric/common.go +++ b/metric/common.go @@ -33,7 +33,7 @@ type baseMetric struct { vals sync.Map desc metricdata.Descriptor start time.Time - keys []string + keys []metricdata.LabelKey bmType baseMetricType } diff --git a/metric/cumulative_test.go b/metric/cumulative_test.go index 2e0d3c249..538320c93 100644 --- a/metric/cumulative_test.go +++ b/metric/cumulative_test.go @@ -39,9 +39,12 @@ func TestCumulative(t *testing.T) { want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ - Name: "TestCumulative", - LabelKeys: []string{"k1", "k2"}, - Type: metricdata.TypeCumulativeFloat64, + Name: "TestCumulative", + LabelKeys: []metricdata.LabelKey{ + {Key: "k1"}, + {Key: "k2"}, + }, + Type: metricdata.TypeCumulativeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { diff --git a/metric/gauge_test.go b/metric/gauge_test.go index b8c41d14d..a352d2d7c 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -41,9 +41,12 @@ func TestGauge(t *testing.T) { want := []*metricdata.Metric{ { Descriptor: metricdata.Descriptor{ - Name: "TestGauge", - LabelKeys: []string{"k1", "k2"}, - Type: metricdata.TypeGaugeFloat64, + Name: "TestGauge", + LabelKeys: []metricdata.LabelKey{ + {Key: "k1"}, + {Key: "k2"}, + }, + Type: metricdata.TypeGaugeFloat64, }, TimeSeries: []*metricdata.TimeSeries{ { @@ -136,9 +139,33 @@ func TestGaugeMetricOptionLabelKeys(t *testing.T) { name := "testOptUnit" gf, _ := r.AddFloat64Gauge(name, WithLabelKeys("k1", "k3")) want := metricdata.Descriptor{ - Name: name, - LabelKeys: []string{"k1", "k3"}, - Type: metricdata.TypeGaugeFloat64, + Name: name, + LabelKeys: []metricdata.LabelKey{ + {Key: "k1"}, + {Key: "k3"}, + }, + Type: metricdata.TypeGaugeFloat64, + } + got := gf.bm.desc + if !cmp.Equal(got, want) { + t.Errorf("metric descriptor: got %v, want %v\n", got, want) + } +} + +func TestGaugeMetricOptionLabelKeysAndDesc(t *testing.T) { + r := NewRegistry() + name := "testOptUnit" + lks := []metricdata.LabelKey{} + lks = append(lks, metricdata.LabelKey{Key: "k1", Description: "desc k1"}, + metricdata.LabelKey{Key: "k3", Description: "desc k3"}) + gf, _ := r.AddFloat64Gauge(name, WithLabelKeysAndDescription(lks...)) + want := metricdata.Descriptor{ + Name: name, + LabelKeys: []metricdata.LabelKey{ + {Key: "k1", Description: "desc k1"}, + {Key: "k3", Description: "desc k3"}, + }, + Type: metricdata.TypeGaugeFloat64, } got := gf.bm.desc if !cmp.Equal(got, want) { @@ -263,7 +290,7 @@ func TestMapKey(t *testing.T) { for i, tc := range cases { t.Run(fmt.Sprintf("case %d", i), func(t *testing.T) { g := &baseMetric{ - keys: make([]string, len(tc)), + keys: make([]metricdata.LabelKey, len(tc)), } mk := g.encodeLabelVals(tc) vals := g.decodeLabelVals(mk) diff --git a/metric/metricdata/label.go b/metric/metricdata/label.go index 87c55b9c8..aadae41e6 100644 --- a/metric/metricdata/label.go +++ b/metric/metricdata/label.go @@ -14,6 +14,13 @@ package metricdata +// LabelKey represents key of a label. It has optional +// description attribute. +type LabelKey struct { + Key string + Description string +} + // LabelValue represents the value of a label. // The zero value represents a missing label value, which may be treated // differently to an empty string value by some back ends. diff --git a/metric/metricdata/metric.go b/metric/metricdata/metric.go index 6ccdec583..8293712c7 100644 --- a/metric/metricdata/metric.go +++ b/metric/metricdata/metric.go @@ -22,11 +22,11 @@ import ( // Descriptor holds metadata about a metric. type Descriptor struct { - Name string // full name of the metric - Description string // human-readable description - Unit Unit // units for the measure - Type Type // type of measure - LabelKeys []string // label keys + Name string // full name of the metric + Description string // human-readable description + Unit Unit // units for the measure + Type Type // type of measure + LabelKeys []LabelKey // label keys } // Metric represents a quantity measured against a resource with different diff --git a/metric/registry.go b/metric/registry.go index 5dc41631d..ceea7e91d 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -31,7 +31,7 @@ type Registry struct { //TODO: [rghetia] add constant labels. type metricOptions struct { unit metricdata.Unit - labelkeys []string + labelkeys []metricdata.LabelKey desc string } @@ -53,7 +53,18 @@ func WithUnit(unit metricdata.Unit) Options { } // WithLabelKeys applies provided label. -func WithLabelKeys(labelKeys ...string) Options { +func WithLabelKeys(keys ...string) Options { + return func(mo *metricOptions) { + labelKeys := make([]metricdata.LabelKey, 0) + for _, key := range keys { + labelKeys = append(labelKeys, metricdata.LabelKey{Key: key}) + } + mo.labelkeys = labelKeys + } +} + +// WithLabelKeysAndDescription applies provided label. +func WithLabelKeysAndDescription(labelKeys ...metricdata.LabelKey) Options { return func(mo *metricOptions) { mo.labelkeys = labelKeys } diff --git a/stats/view/view_to_metric.go b/stats/view/view_to_metric.go index 557c19085..010f81bab 100644 --- a/stats/view/view_to_metric.go +++ b/stats/view/view_to_metric.go @@ -73,10 +73,10 @@ func getType(v *View) metricdata.Type { } } -func getLableKeys(v *View) []string { - labelKeys := []string{} +func getLableKeys(v *View) []metricdata.LabelKey { + labelKeys := []metricdata.LabelKey{} for _, k := range v.TagKeys { - labelKeys = append(labelKeys, k.Name()) + labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) } return labelKeys } @@ -91,7 +91,7 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { } } -func toLabelValues(row *Row, expectedKeys []string) []metricdata.LabelValue { +func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} tagMap := make(map[string]string) for _, tag := range row.Tags { @@ -99,7 +99,7 @@ func toLabelValues(row *Row, expectedKeys []string) []metricdata.LabelValue { } for _, key := range expectedKeys { - if val, ok := tagMap[key]; ok { + if val, ok := tagMap[key.Key]; ok { labelValues = append(labelValues, metricdata.NewLabelValue(val)) } else { labelValues = append(labelValues, metricdata.LabelValue{}) diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index 2f2c72372..c73d9a248 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -50,7 +50,7 @@ var ( labelValues []metricdata.LabelValue emptyLabelValues []metricdata.LabelValue - labelKeys []string + labelKeys []metricdata.LabelKey recordsInt64 []recordValWithTag recordsFloat64 []recordValWithTag @@ -125,7 +125,10 @@ func initTags() { {Value: "", Present: false}, {Value: "", Present: false}, } - labelKeys = []string{tk1.Name(), tk2.Name()} + labelKeys = []metricdata.LabelKey{ + {Key: tk1.Name()}, + {Key: tk2.Name()}, + } recordsInt64 = []recordValWithTag{ {tags: tags, value: int64(2)}, From cc78dac756f754b43d8d1c7df0f4d736679511cd Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 18 Apr 2019 21:28:10 -0700 Subject: [PATCH 166/212] update issue template. (#1117) * update issue template. * replace Node with Go. --- .github/ISSUE_TEMPLATE/bug_report.md | 29 +++++++++++++---------- .github/ISSUE_TEMPLATE/feature_request.md | 3 ++- 2 files changed, 19 insertions(+), 13 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 19947e34c..9d1067b51 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,21 +1,26 @@ --- name: Bug report about: Create a report to help us improve - +labels: bug, triage-me --- -**Describe the bug** -A clear and concise description of what the bug is. +Please answer these questions before submitting a bug report. + +### What version of OpenCensus are you using? + + +### What version of Go are you using? + + +### What did you do? +If possible, provide a recipe for reproducing the error. + + +### What did you expect to see? + -**To Reproduce** -Steps to reproduce the behavior: -1. Go to '...' -2. Click on '....' -3. Scroll down to '....' -4. See error +### What did you see instead? -**Expected behavior** -A clear and concise description of what you expected to happen. -**Additional context** +### Additional context Add any other context about the problem here. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index fc4444384..8c774e2fb 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,12 +1,13 @@ --- name: Feature request about: Suggest an idea for this project - +labels: feature-request, triage-me --- **NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in other the OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. + **Is your feature request related to a problem? Please describe.** A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] From 9a306f3a1b7db2a11844afd293db163e2a6c6098 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 19 Apr 2019 12:10:53 -0700 Subject: [PATCH 167/212] fix typo in feature request template. (#1119) --- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 8c774e2fb..64f3d4678 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -4,7 +4,7 @@ about: Suggest an idea for this project labels: feature-request, triage-me --- -**NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in other the OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. +**NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. From 18733e46898ab8de9e6ab77c5831bb4d60b184bf Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 19 Apr 2019 14:54:36 -0700 Subject: [PATCH 168/212] remove Set method for cumulatives. (#1120) --- metric/cumulative.go | 36 ------------------------------------ metric/cumulative_test.go | 18 +++--------------- 2 files changed, 3 insertions(+), 51 deletions(-) diff --git a/metric/cumulative.go b/metric/cumulative.go index 6d3be3f88..549d09199 100644 --- a/metric/cumulative.go +++ b/metric/cumulative.go @@ -59,25 +59,6 @@ func (c *Float64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Float return entry.(*Float64CumulativeEntry), nil } -// Set sets the cumulative entry value to provided val. It returns without updating if the value is -// negative or lower than previously stored value. -func (e *Float64CumulativeEntry) Set(val float64) { - var swapped, equalOrLess bool - if val <= 0.0 { - return - } - for !swapped && !equalOrLess { - oldBits := atomic.LoadUint64(&e.val) - oldVal := math.Float64frombits(oldBits) - if val > oldVal { - valBits := math.Float64bits(val) - swapped = atomic.CompareAndSwapUint64(&e.val, oldBits, valBits) - } else { - equalOrLess = true - } - } -} - // Inc increments the cumulative entry value by val. It returns without incrementing if the val // is negative. func (e *Float64CumulativeEntry) Inc(val float64) { @@ -129,23 +110,6 @@ func (c *Int64Cumulative) GetEntry(labelVals ...metricdata.LabelValue) (*Int64Cu return entry.(*Int64CumulativeEntry), nil } -// Set sets the value of the cumulative entry to the provided value. It returns without updating -// if the val is negative or if the val is lower than previously stored value. -func (e *Int64CumulativeEntry) Set(val int64) { - var swapped, equalOrLess bool - if val <= 0 { - return - } - for !swapped && !equalOrLess { - old := atomic.LoadInt64(&e.val) - if val > old { - swapped = atomic.CompareAndSwapInt64(&e.val, old, val) - } else { - equalOrLess = true - } - } -} - // Inc increments the current cumulative entry value by val. It returns without incrementing if // the val is negative. func (e *Int64CumulativeEntry) Inc(val int64) { diff --git a/metric/cumulative_test.go b/metric/cumulative_test.go index 538320c93..6b2110d6c 100644 --- a/metric/cumulative_test.go +++ b/metric/cumulative_test.go @@ -28,7 +28,7 @@ func TestCumulative(t *testing.T) { f, _ := r.AddFloat64Cumulative("TestCumulative", WithLabelKeys("k1", "k2")) e, _ := f.GetEntry(metricdata.LabelValue{}, metricdata.LabelValue{}) - e.Set(5) + e.Inc(5) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) e.Inc(1) e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1"), metricdata.LabelValue{}) @@ -103,7 +103,7 @@ func readAndCompareInt64Val(testname string, r *Registry, want int64, t *testing } } -func TestInt64CumulativeEntry_IncAndSet(t *testing.T) { +func TestInt64CumulativeEntry_IncNegative(t *testing.T) { r := NewRegistry() g, _ := r.AddInt64Cumulative("bm") e, _ := g.GetEntry() @@ -111,12 +111,6 @@ func TestInt64CumulativeEntry_IncAndSet(t *testing.T) { readAndCompareInt64Val("inc", r, 5, t) e.Inc(-2) readAndCompareInt64Val("inc negative", r, 5, t) - e.Set(-2) - readAndCompareInt64Val("set negative", r, 5, t) - e.Set(4) - readAndCompareInt64Val("set lower", r, 5, t) - e.Set(9) - readAndCompareInt64Val("set higher", r, 9, t) } func readAndCompareFloat64Val(testname string, r *Registry, want float64, t *testing.T) { @@ -126,7 +120,7 @@ func readAndCompareFloat64Val(testname string, r *Registry, want float64, t *tes } } -func TestFloat64CumulativeEntry_IncAndSet(t *testing.T) { +func TestFloat64CumulativeEntry_IncNegative(t *testing.T) { r := NewRegistry() g, _ := r.AddFloat64Cumulative("bm") e, _ := g.GetEntry() @@ -134,12 +128,6 @@ func TestFloat64CumulativeEntry_IncAndSet(t *testing.T) { readAndCompareFloat64Val("inc", r, 5.0, t) e.Inc(-2.0) readAndCompareFloat64Val("inc negative", r, 5.0, t) - e.Set(-2.0) - readAndCompareFloat64Val("set negative", r, 5.0, t) - e.Set(4.0) - readAndCompareFloat64Val("set lower", r, 5.0, t) - e.Set(9.9) - readAndCompareFloat64Val("set higher", r, 9.9, t) } func TestCumulativeWithSameNameDiffType(t *testing.T) { From 0ac3701b0da6461885aa3ae14c555221d967c2c8 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Tue, 23 Apr 2019 09:49:24 -0700 Subject: [PATCH 169/212] Exemplar: Add new record APIs that take exemplar attachments and SpanContext key. (#1123) * Exemplar: Add SpanContext Attachment key. * Exemplar: Add new record APIs that take exemplar attachments. * Use RetrieveData instead of fake exporter to fix race. * Change map[string]interface to metricdata.Attachments * Use nil instead of empty map. * Update to use options for recording attachments. --- metric/metricdata/exemplar.go | 5 ++ stats/record.go | 88 ++++++++++++++++++++++++-------- stats/record_test.go | 95 +++++++++++++++++++++++++++++++++++ 3 files changed, 168 insertions(+), 20 deletions(-) create mode 100644 stats/record_test.go diff --git a/metric/metricdata/exemplar.go b/metric/metricdata/exemplar.go index cdbeef058..12695ce2d 100644 --- a/metric/metricdata/exemplar.go +++ b/metric/metricdata/exemplar.go @@ -18,6 +18,11 @@ import ( "time" ) +// Exemplars keys. +const ( + AttachmentKeySpanContext = "SpanContext" +) + // Exemplar is an example data point associated with each bucket of a // distribution type aggregation. // diff --git a/stats/record.go b/stats/record.go index d2af0a60d..ad4691184 100644 --- a/stats/record.go +++ b/stats/record.go @@ -18,6 +18,7 @@ package stats import ( "context" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/internal" "go.opencensus.io/tag" ) @@ -30,40 +31,87 @@ func init() { } } +type recordOptions struct { + attachments metricdata.Attachments + mutators []tag.Mutator + measurements []Measurement +} + +// WithAttachments applies provided exemplar attachments. +func WithAttachments(attachments metricdata.Attachments) Options { + return func(ro *recordOptions) { + ro.attachments = attachments + } +} + +// WithTags applies provided tag mutators. +func WithTags(mutators ...tag.Mutator) Options { + return func(ro *recordOptions) { + ro.mutators = mutators + } +} + +// WithMeasurements applies provided measurements. +func WithMeasurements(measurements ...Measurement) Options { + return func(ro *recordOptions) { + ro.measurements = measurements + } +} + +// Options apply changes to recordOptions. +type Options func(*recordOptions) + +func createRecordOption(ros ...Options) *recordOptions { + o := &recordOptions{} + for _, ro := range ros { + ro(o) + } + return o +} + // Record records one or multiple measurements with the same context at once. // If there are any tags in the context, measurements will be tagged with them. func Record(ctx context.Context, ms ...Measurement) { + RecordWithOptions(ctx, WithMeasurements(ms...)) +} + +// RecordWithTags records one or multiple measurements at once. +// +// Measurements will be tagged with the tags in the context mutated by the mutators. +// RecordWithTags is useful if you want to record with tag mutations but don't want +// to propagate the mutations in the context. +func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { + return RecordWithOptions(ctx, WithTags(mutators...), WithMeasurements(ms...)) +} + +// RecordWithOptions records measurements from the given options (if any) against context +// and tags and attachments in the options (if any). +// If there are any tags in the context, measurements will be tagged with them. +func RecordWithOptions(ctx context.Context, ros ...Options) error { + o := createRecordOption(ros...) + if len(o.measurements) == 0 { + return nil + } recorder := internal.DefaultRecorder if recorder == nil { - return - } - if len(ms) == 0 { - return + return nil } record := false - for _, m := range ms { + for _, m := range o.measurements { if m.desc.subscribed() { record = true break } } if !record { - return + return nil } - // TODO(songy23): fix attachments. - recorder(tag.FromContext(ctx), ms, map[string]interface{}{}) -} - -// RecordWithTags records one or multiple measurements at once. -// -// Measurements will be tagged with the tags in the context mutated by the mutators. -// RecordWithTags is useful if you want to record with tag mutations but don't want -// to propagate the mutations in the context. -func RecordWithTags(ctx context.Context, mutators []tag.Mutator, ms ...Measurement) error { - ctx, err := tag.New(ctx, mutators...) - if err != nil { - return err + if len(o.mutators) > 0 { + var err error + if ctx, err = tag.New(ctx, o.mutators...); err != nil { + return err + } } - Record(ctx, ms...) + recorder(tag.FromContext(ctx), o.measurements, o.attachments) return nil } diff --git a/stats/record_test.go b/stats/record_test.go new file mode 100644 index 000000000..ca46ed540 --- /dev/null +++ b/stats/record_test.go @@ -0,0 +1,95 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stats_test + +import ( + "context" + "log" + "reflect" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" + "go.opencensus.io/trace" +) + +var ( + tid = trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 4, 8, 16, 32, 64, 128} + sid = trace.SpanID{1, 2, 4, 8, 16, 32, 64, 128} + spanCtx = trace.SpanContext{ + TraceID: tid, + SpanID: sid, + TraceOptions: 1, + } +) + +func TestRecordWithAttachments(t *testing.T) { + k1, _ := tag.NewKey("k1") + k2, _ := tag.NewKey("k2") + distribution := view.Distribution(5, 10) + m := stats.Int64("TestRecordWithAttachments/m1", "", stats.UnitDimensionless) + v := &view.View{ + Name: "test_view", + TagKeys: []tag.Key{k1, k2}, + Measure: m, + Aggregation: distribution, + } + view.SetReportingPeriod(100 * time.Millisecond) + if err := view.Register(v); err != nil { + log.Fatalf("Failed to register views: %v", err) + } + + attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: spanCtx} + stats.RecordWithOptions(context.Background(), stats.WithAttachments(attachments), stats.WithMeasurements(m.M(12))) + rows, err := view.RetrieveData("test_view") + if err != nil { + t.Errorf("Failed to retrieve data %v", err) + } + if len(rows) == 0 { + t.Errorf("No data was recorded.") + } + data := rows[0].Data + dis, ok := data.(*view.DistributionData) + if !ok { + t.Errorf("want DistributionData, got %+v", data) + } + wantBuckets := []int64{0, 0, 1} + if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { + t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) + } + for i, e := range dis.ExemplarsPerBucket { + // Exemplar slice should be [nil, nil, exemplar] + if i != 2 && e != nil { + t.Errorf("want nil exemplar, got %v", e) + } + if i == 2 { + wantExemplar := &metricdata.Exemplar{Value: 12, Attachments: attachments} + if diff := cmpExemplar(e, wantExemplar); diff != "" { + t.Fatalf("Unexpected Exemplar -got +want: %s", diff) + } + } + } +} + +// Compare exemplars while ignoring exemplar timestamp, since timestamp is non-deterministic. +func cmpExemplar(got, want *metricdata.Exemplar) string { + return cmp.Diff(got, want, cmpopts.IgnoreFields(metricdata.Exemplar{}, "Timestamp"), cmpopts.IgnoreUnexported(metricdata.Exemplar{})) +} From 295a4b8b79f868eed42df5700d1f63fd4d295ae0 Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 23 Apr 2019 11:44:16 -0700 Subject: [PATCH 170/212] Add log exporter. (#1126) * Add log exporter. * close log files when program terminates. * split stats into multiple lines. * fix review comments. * one more comment. --- examples/exporter/logexporter.go | 199 +++++++++++++++++++++++++++++++ 1 file changed, 199 insertions(+) create mode 100644 examples/exporter/logexporter.go diff --git a/examples/exporter/logexporter.go b/examples/exporter/logexporter.go new file mode 100644 index 000000000..d868b5a65 --- /dev/null +++ b/examples/exporter/logexporter.go @@ -0,0 +1,199 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package exporter contains a log exporter that supports exporting +// OpenCensus metrics and spans to a logging framework. +package exporter // import "go.opencensus.io/examples/exporter" + +import ( + "context" + "encoding/hex" + "fmt" + "log" + "os" + "sync" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/trace" +) + +// LogExporter exports metrics and span to log file +type LogExporter struct { + reader *metricexport.Reader + ir *metricexport.IntervalReader + initReaderOnce sync.Once + o Options + tFile *os.File + mFile *os.File + tLogger *log.Logger + mLogger *log.Logger +} + +// Options provides options for LogExporter +type Options struct { + // ReportingInterval is a time interval between two successive metrics + // export. + ReportingInterval time.Duration + + // MetricsLogFile is path where exported metrics are logged. + // If it is nil then the metrics are logged on console + MetricsLogFile string + + // TracesLogFile is path where exported span data are logged. + // If it is nil then the span data are logged on console + TracesLogFile string +} + +func getLogger(filepath string) (*log.Logger, *os.File, error) { + if filepath == "" { + return log.New(os.Stdout, "", 0), nil, nil + } + f, err := os.OpenFile(filepath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, nil, err + } + return log.New(f, "", 0), f, nil +} + +// NewLogExporter creates new log exporter. +func NewLogExporter(options Options) (*LogExporter, error) { + e := &LogExporter{reader: metricexport.NewReader(), + o: options} + var err error + e.tLogger, e.tFile, err = getLogger(options.TracesLogFile) + if err != nil { + return nil, err + } + e.mLogger, e.mFile, err = getLogger(options.MetricsLogFile) + if err != nil { + return nil, err + } + return e, nil +} + +func printMetricDescriptor(metric *metricdata.Metric) string { + d := metric.Descriptor + return fmt.Sprintf("name: %s, type: %s, unit: %s ", + d.Name, d.Type, d.Unit) +} + +func printLabels(metric *metricdata.Metric, values []metricdata.LabelValue) string { + d := metric.Descriptor + kv := []string{} + for i, k := range d.LabelKeys { + kv = append(kv, fmt.Sprintf("%s=%v", k, values[i])) + } + return fmt.Sprintf("%v", kv) +} + +func printPoint(point metricdata.Point) string { + switch v := point.Value.(type) { + case *metricdata.Distribution: + dv := v + return fmt.Sprintf("count=%v sum=%v sum_sq_dev=%v, buckets=%v", dv.Count, + dv.Sum, dv.SumOfSquaredDeviation, dv.Buckets) + default: + return fmt.Sprintf("value=%v", point.Value) + } +} + +// Start starts the metric and span data exporter. +func (e *LogExporter) Start() error { + trace.RegisterExporter(e) + e.initReaderOnce.Do(func() { + e.ir, _ = metricexport.NewIntervalReader(&metricexport.Reader{}, e) + }) + e.ir.ReportingInterval = e.o.ReportingInterval + return e.ir.Start() +} + +// Stop stops the metric and span data exporter. +func (e *LogExporter) Stop() { + trace.UnregisterExporter(e) + e.ir.Stop() +} + +// Close closes any files that were opened for logging. +func (e *LogExporter) Close() { + if e.tFile != nil { + e.tFile.Close() + e.tFile = nil + } + if e.mFile != nil { + e.mFile.Close() + e.mFile = nil + } +} + +// ExportMetrics exports to log. +func (e *LogExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + for _, metric := range metrics { + for _, ts := range metric.TimeSeries { + for _, point := range ts.Points { + e.mLogger.Println("#----------------------------------------------") + e.mLogger.Println() + e.mLogger.Printf("Metric: %s\n Labels: %s\n Value : %s\n", + printMetricDescriptor(metric), + printLabels(metric, ts.LabelValues), + printPoint(point)) + e.mLogger.Println() + } + } + } + return nil +} + +// ExportSpan exports a SpanData to log +func (e *LogExporter) ExportSpan(sd *trace.SpanData) { + var ( + traceID = hex.EncodeToString(sd.SpanContext.TraceID[:]) + spanID = hex.EncodeToString(sd.SpanContext.SpanID[:]) + parentSpanID = hex.EncodeToString(sd.ParentSpanID[:]) + ) + e.tLogger.Println() + e.tLogger.Println("#----------------------------------------------") + e.tLogger.Println() + e.tLogger.Println("TraceID: ", traceID) + e.tLogger.Println("SpanID: ", spanID) + if !reZero.MatchString(parentSpanID) { + e.tLogger.Println("ParentSpanID:", parentSpanID) + } + + e.tLogger.Println() + e.tLogger.Printf("Span: %v\n", sd.Name) + e.tLogger.Printf("Status: %v [%v]\n", sd.Status.Message, sd.Status.Code) + e.tLogger.Printf("Elapsed: %v\n", sd.EndTime.Sub(sd.StartTime).Round(time.Millisecond)) + + if len(sd.Annotations) > 0 { + e.tLogger.Println() + e.tLogger.Println("Annotations:") + for _, item := range sd.Annotations { + e.tLogger.Print(indent, item.Message) + for k, v := range item.Attributes { + e.tLogger.Printf(" %v=%v", k, v) + } + e.tLogger.Println() + } + } + + if len(sd.Attributes) > 0 { + e.tLogger.Println() + e.tLogger.Println("Attributes:") + for k, v := range sd.Attributes { + e.tLogger.Printf("%v- %v=%v\n", indent, k, v) + } + } +} From 3e65bcba0dcec645795cc8621180839401c18a0b Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 23 Apr 2019 13:07:32 -0700 Subject: [PATCH 171/212] update example to use log exporter. (#1128) --- examples/derived_gauges/README.md | 38 +++++++++++---------- examples/derived_gauges/derived_gauge.go | 33 ++++++++++--------- examples/gauges/README.md | 42 +++++++++++++----------- examples/gauges/gauge.go | 35 ++++++++++---------- examples/helloworld/main.go | 15 ++++++--- examples/quickstart/stats.go | 25 +++++++------- 6 files changed, 102 insertions(+), 86 deletions(-) diff --git a/examples/derived_gauges/README.md b/examples/derived_gauges/README.md index 9dda6809c..8d6f0db1a 100644 --- a/examples/derived_gauges/README.md +++ b/examples/derived_gauges/README.md @@ -27,8 +27,9 @@ There are two metrics collected to monitor the queue. when the queue was consumed. It is represented using derived gauge float64. This example shows how to use gauge metrics. The program records two gauges. -These metrics are read when exporter scrapes them. In this example prometheus exporter is used to -scrape the data. Metrics can be viewed at [http://localhost:9090/metrics](http://localhost:9090/metrics) once the program is running. +These metrics are read when exporter scrapes them. In this example log exporter is used to +log the data into a file. Metrics can be viewed at [file:///tmp/metrics.log](file:///tmp/metrics.log) +once the program is running. Alternatively you could do `tail -f /tmp/metrics.log` on Linux/OSx. Enter different value for number of items to queue and fetch the metrics using above url to see the variation in the metrics. @@ -159,19 +160,22 @@ import ( "fmt" "log" "math/rand" - "net/http" "os" "strconv" "strings" "sync" "time" - "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) +const ( + metricsLogFile = "/tmp/metrics.log" +) + type queue struct { size int lastConsumed time.Time @@ -276,7 +280,8 @@ func doWork() { fmt.Printf("Program monitors queue using two derived gauge metrics.\n") fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") - fmt.Printf("Go to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", + metricsLogFile, metricsLogFile) // Take a number of items to queue as an input from the user // and enqueue the same number of items on to the consumer queue. @@ -287,21 +292,18 @@ func doWork() { } } -func createAndStartExporter() { - // Create Prometheus metrics exporter to verify derived gauge metrics in this example. - exporter, err := prometheus.NewExporter(prometheus.Options{}) +func main() { + // Using logexporter but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + }) if err != nil { - log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + log.Fatalf("Error creating log exporter: %v", err) } - http.Handle("/metrics", exporter) - go func() { - log.Fatal(http.ListenAndServe(":9090", nil)) - - }() -} - -func main() { - createAndStartExporter() + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Create metric registry and register it with global producer manager. r := metric.NewRegistry() diff --git a/examples/derived_gauges/derived_gauge.go b/examples/derived_gauges/derived_gauge.go index 97f8de474..721742de5 100644 --- a/examples/derived_gauges/derived_gauge.go +++ b/examples/derived_gauges/derived_gauge.go @@ -33,19 +33,22 @@ import ( "fmt" "log" "math/rand" - "net/http" "os" "strconv" "strings" "sync" "time" - "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) +const ( + metricsLogFile = "/tmp/metrics.log" +) + type queue struct { size int lastConsumed time.Time @@ -154,7 +157,8 @@ func doWork() { fmt.Printf("Program monitors queue using two derived gauge metrics.\n") fmt.Printf(" 1. queue_size = the instantaneous size of the queue.\n") fmt.Printf(" 2. queue_seconds_since_processed_last = the number of seconds elapsed since last time the queue was processed.\n") - fmt.Printf("Go to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", + metricsLogFile, metricsLogFile) // Take a number of items to queue as an input from the user // and enqueue the same number of items on to the consumer queue. @@ -165,21 +169,18 @@ func doWork() { } } -func createAndStartExporter() { - // Create Prometheus metrics exporter to verify derived gauge metrics in this example. - exporter, err := prometheus.NewExporter(prometheus.Options{}) +func main() { + // Using logexporter but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + }) if err != nil { - log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + log.Fatalf("Error creating log exporter: %v", err) } - http.Handle("/metrics", exporter) - go func() { - log.Fatal(http.ListenAndServe(":9090", nil)) - - }() -} - -func main() { - createAndStartExporter() + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Create metric registry and register it with global producer manager. // START reg diff --git a/examples/gauges/README.md b/examples/gauges/README.md index 80ed0396b..c1853b173 100644 --- a/examples/gauges/README.md +++ b/examples/gauges/README.md @@ -21,8 +21,11 @@ This example shows how to use gauge metrics. The program records two gauges. 1. **process_heap_alloc (int64)**: Total bytes used by objects allocated in the heap. It includes objects currently used and objects that are freed but not garbage collected. 1. **process_heap_idle_to_alloc_ratio (float64)**: It is the ratio of Idle bytes to allocated bytes in the heap. -It periodically runs a function that retrieves the memory stats and updates the above two metrics. These metrics are then exported using prometheus exporter. -Metrics can be viewed at [http://localhost:9090/metrcs](http://localhost:9090/metrcs) once the program is running. +It periodically runs a function that retrieves the memory stats and updates the above two metrics. +These metrics are then exported using log exporter. Metrics can be viewed at +[file:///tmp/metrics.log](file:///tmp/metrics.log) +once the program is running. Alternatively you could do `tail -f /tmp/metrics.log` on Linux/OSx. + The program lets you choose the amount of memory (in MB) to consume. Choose different values and query the metrics to see the change in metrics. ## Run the example @@ -130,7 +133,7 @@ Use `Set` or `Add` function to update the value of gauge entries. You can call t // bytes in the heap. // // It periodically runs a function that retrieves the memory stats and updates the above two -// metrics. These metrics are then exported using prometheus exporter. +// metrics. These metrics are then exported using log exporter. // The program lets you choose the amount of memory (in MB) to consume. Choose different values // and query the metrics to see the change in metrics. package main @@ -139,19 +142,22 @@ import ( "bufio" "fmt" "log" - "net/http" "os" "runtime" "strconv" "strings" "time" - "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) +const ( + metricsLogFile = "/tmp/metrics.log" +) + var ( mem = &runtime.MemStats{} ) @@ -232,7 +238,8 @@ func work() { fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") - fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", + metricsLogFile, metricsLogFile) fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. @@ -243,21 +250,18 @@ func work() { } } -func createAndStartExporter() { - // Create Prometheus metrics exporter to verify gauge metrics in this example. - exporter, err := prometheus.NewExporter(prometheus.Options{}) +func main() { + // Using log exporter to export metrics but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + }) if err != nil { - log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + log.Fatalf("Error creating log exporter: %v", err) } - http.Handle("/metrics", exporter) - go func() { - log.Fatal(http.ListenAndServe(":9090", nil)) - - }() -} - -func main() { - createAndStartExporter() + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Create metric registry and register it with global producer manager. r := metric.NewRegistry() diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go index 2b744e79d..896effe95 100644 --- a/examples/gauges/gauge.go +++ b/examples/gauges/gauge.go @@ -26,7 +26,7 @@ // bytes in the heap. // // It periodically runs a function that retrieves the memory stats and updates the above two -// metrics. These metrics are then exported using prometheus exporter. +// metrics. These metrics are then exported using log exporter. // The program lets you choose the amount of memory (in MB) to consume. Choose different values // and query the metrics to see the change in metrics. package main @@ -35,19 +35,22 @@ import ( "bufio" "fmt" "log" - "net/http" "os" "runtime" "strconv" "strings" "time" - "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/examples/exporter" "go.opencensus.io/metric" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" ) +const ( + metricsLogFile = "/tmp/metrics.log" +) + var ( mem = &runtime.MemStats{} ) @@ -130,7 +133,8 @@ func work() { fmt.Printf("Program periodically records following gauge metrics.\n") fmt.Printf(" 1. process_heap_alloc = the heap allocation (used + freed but not garbage collected)\n") fmt.Printf(" 2. process_idle_to_alloc_ratio = heap idle (unused) /allocation ratio\n") - fmt.Printf("\nGo to http://localhost:9090/metrics to see the metrics.\n\n\n") + fmt.Printf("\nGo to file://%s to see the metrics. OR do `tail -f %s` in another terminal\n\n\n", + metricsLogFile, metricsLogFile) fmt.Printf("Enter memory you would like to allocate in MB to change the value of above metrics.\n") // Do some work and record gauge metrics. @@ -141,21 +145,18 @@ func work() { } } -func createAndStartExporter() { - // Create Prometheus metrics exporter to verify gauge metrics in this example. - exporter, err := prometheus.NewExporter(prometheus.Options{}) +func main() { + // Using log exporter to export metrics but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + }) if err != nil { - log.Fatalf("Failed to create the prometheus metrics exporter: %v", err) + log.Fatalf("Error creating log exporter: %v", err) } - http.Handle("/metrics", exporter) - go func() { - log.Fatal(http.ListenAndServe(":9090", nil)) - - }() -} - -func main() { - createAndStartExporter() + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Create metric registry and register it with global producer manager. // START reg diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index 0f26636dc..5428d6e2a 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -44,11 +44,16 @@ func main() { // Register an exporter to be able to retrieve // the data from the subscribed views. - e := &exporter.PrintExporter{} - view.RegisterExporter(e) - trace.RegisterExporter(e) + e, err := exporter.NewLogExporter(exporter.Options{ReportingInterval: time.Duration(time.Second)}) + if err != nil { + log.Fatal(err) + } + e.Start() + defer e.Stop() + defer e.Close() + + trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - var err error frontendKey, err = tag.NewKey("example.com/keys/frontend") if err != nil { log.Fatal(err) @@ -75,7 +80,7 @@ func main() { // Wait for a duration longer than reporting duration to ensure the stats // library reports the collected data. fmt.Println("Wait longer than the reporting duration...") - time.Sleep(2 * time.Second) + time.Sleep(4 * time.Second) } // process processes the video and instruments the processing diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 8811ba17d..35208774d 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -28,13 +28,17 @@ import ( "net/http" - "go.opencensus.io/exporter/prometheus" + "go.opencensus.io/examples/exporter" "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" "go.opencensus.io/zpages" ) +const ( + metricsLogFile = "/tmp/metrics.log" +) + // Measures for the stats quickstart. var ( // The latency in milliseconds @@ -94,24 +98,23 @@ func main() { zpages.Handle(nil, "/debug") go http.ListenAndServe("localhost:8080", nil) - // Create that Stackdriver stats exporter - exporter, err := prometheus.NewExporter(prometheus.Options{}) + // Using log exporter here to export metrics but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + }) if err != nil { - log.Fatalf("Failed to create the Stackdriver stats exporter: %v", err) + log.Fatalf("Error creating log exporter: %v", err) } - http.Handle("/metrics", exporter) - - // Register the stats exporter - view.RegisterExporter(exporter) + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Register the views if err := view.Register(latencyView, lineCountView, errorCountView, lineLengthView); err != nil { log.Fatalf("Failed to register views: %v", err) } - // But also we can change the metrics reporting period to 2 seconds - //view.SetReportingPeriod(2 * time.Second) - // In a REPL: // 1. Read input // 2. process input From c31d2681e2c7549cefb261aa836cbcb0bb579938 Mon Sep 17 00:00:00 2001 From: Gustavo Silva Paiva Date: Tue, 23 Apr 2019 17:14:05 -0300 Subject: [PATCH 172/212] add constant labels to gauges and cumulative metrics (#1122) * Remove unused GetEntry. * adds support for constant labels on Gauge and CumulativeMetric * fixing format on tests. * remove unused getentry --- metric/common.go | 13 +++--- metric/cumulative_test.go | 56 ++++++++++++++++++++++++++ metric/gauge_test.go | 83 ++++++++++++++++++++++++++++++++------- metric/registry.go | 36 ++++++++++++++--- 4 files changed, 163 insertions(+), 25 deletions(-) diff --git a/metric/common.go b/metric/common.go index f5716c9f5..bd6e7719e 100644 --- a/metric/common.go +++ b/metric/common.go @@ -19,6 +19,7 @@ import ( "time" "go.opencensus.io/internal/tagencoding" + "go.opencensus.io/metric/metricdata" ) @@ -30,11 +31,12 @@ import ( // baseMetric should not be used directly, use metric specific type such as // Float64Gauge or Int64Gauge. type baseMetric struct { - vals sync.Map - desc metricdata.Descriptor - start time.Time - keys []metricdata.LabelKey - bmType baseMetricType + vals sync.Map + desc metricdata.Descriptor + start time.Time + keys []metricdata.LabelKey + constLabelValues []metricdata.LabelValue + bmType baseMetricType } type baseMetricType int @@ -118,6 +120,7 @@ func (bm *baseMetric) decodeLabelVals(s string) []metricdata.LabelValue { } func (bm *baseMetric) entryForValues(labelVals []metricdata.LabelValue, newEntry func() baseEntry) (interface{}, error) { + labelVals = append(bm.constLabelValues, labelVals...) if len(labelVals) != len(bm.keys) { return nil, errKeyValueMismatch } diff --git a/metric/cumulative_test.go b/metric/cumulative_test.go index 6b2110d6c..98bda94f7 100644 --- a/metric/cumulative_test.go +++ b/metric/cumulative_test.go @@ -19,6 +19,7 @@ import ( "time" "github.com/google/go-cmp/cmp" + "go.opencensus.io/metric/metricdata" ) @@ -83,6 +84,61 @@ func TestCumulative(t *testing.T) { } } +func TestCumulativeConstLabel(t *testing.T) { + r := NewRegistry() + + f, _ := r.AddFloat64Cumulative("TestCumulativeWithConstLabel", + WithLabelKeys("k1"), + WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ + {Key: "const"}: metricdata.NewLabelValue("same"), + {Key: "const2"}: metricdata.NewLabelValue("same2"), + })) + + e, _ := f.GetEntry(metricdata.LabelValue{}) + e.Inc(5) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1")) + e.Inc(1) + m := r.Read() + want := []*metricdata.Metric{ + { + Descriptor: metricdata.Descriptor{ + Name: "TestCumulativeWithConstLabel", + LabelKeys: []metricdata.LabelKey{ + {Key: "const"}, + {Key: "const2"}, + {Key: "k1"}}, + Type: metricdata.TypeCumulativeFloat64, + }, + TimeSeries: []*metricdata.TimeSeries{ + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("same"), + metricdata.NewLabelValue("same2"), + {}}, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 5), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("same"), + metricdata.NewLabelValue("same2"), + metricdata.NewLabelValue("k1v1"), + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 1), + }, + }, + }, + }, + } + canonicalize(m) + canonicalize(want) + if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { + t.Errorf("-got +want: %s", diff) + } +} + func TestCumulativeMetricDescriptor(t *testing.T) { r := NewRegistry() diff --git a/metric/gauge_test.go b/metric/gauge_test.go index a352d2d7c..9c4f269ca 100644 --- a/metric/gauge_test.go +++ b/metric/gauge_test.go @@ -16,12 +16,13 @@ package metric import ( "fmt" - "go.opencensus.io/metric/metricdata" "sort" "testing" "time" "github.com/google/go-cmp/cmp" + + "go.opencensus.io/metric/metricdata" ) func TestGauge(t *testing.T) { @@ -85,6 +86,62 @@ func TestGauge(t *testing.T) { } } +func TestGaugeConstLabel(t *testing.T) { + r := NewRegistry() + + f, _ := r.AddFloat64Gauge("TestGaugeWithConstLabel", + WithLabelKeys("k1"), + WithConstLabel(map[metricdata.LabelKey]metricdata.LabelValue{ + {Key: "const"}: metricdata.NewLabelValue("same"), + {Key: "const2"}: metricdata.NewLabelValue("same2"), + })) + + e, _ := f.GetEntry(metricdata.LabelValue{}) + e.Set(5) + e, _ = f.GetEntry(metricdata.NewLabelValue("k1v1")) + e.Add(1) + m := r.Read() + want := []*metricdata.Metric{ + { + Descriptor: metricdata.Descriptor{ + Name: "TestGaugeWithConstLabel", + LabelKeys: []metricdata.LabelKey{ + {Key: "const"}, + {Key: "const2"}, + {Key: "k1"}}, + Type: metricdata.TypeGaugeFloat64, + }, + TimeSeries: []*metricdata.TimeSeries{ + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("same"), + metricdata.NewLabelValue("same2"), + {}, + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 5), + }, + }, + { + LabelValues: []metricdata.LabelValue{ + metricdata.NewLabelValue("same"), + metricdata.NewLabelValue("same2"), + metricdata.NewLabelValue("k1v1"), + }, + Points: []metricdata.Point{ + metricdata.NewFloat64Point(time.Time{}, 1), + }, + }, + }, + }, + } + canonicalize(m) + canonicalize(want) + if diff := cmp.Diff(m, want, cmp.Comparer(ignoreTimes)); diff != "" { + t.Errorf("-got +want: %s", diff) + } +} + func TestGaugeMetricDescriptor(t *testing.T) { r := NewRegistry() @@ -330,20 +387,18 @@ func canonicalize(ms []*metricdata.Metric) { for _, m := range ms { sort.Slice(m.TimeSeries, func(i, j int) bool { // sort time series by their label values - iLabels := m.TimeSeries[i].LabelValues - jLabels := m.TimeSeries[j].LabelValues - for k := 0; k < len(iLabels); k++ { - if !iLabels[k].Present { - if jLabels[k].Present { - return true - } - } else if !jLabels[k].Present { - return false - } else { - return iLabels[k].Value < jLabels[k].Value - } + iStr := "" + + for _, label := range m.TimeSeries[i].LabelValues { + iStr += fmt.Sprintf("%+v", label) } - panic("should have returned") + + jStr := "" + for _, label := range m.TimeSeries[j].LabelValues { + jStr += fmt.Sprintf("%+v", label) + } + + return iStr < jStr }) } } diff --git a/metric/registry.go b/metric/registry.go index ceea7e91d..6c58ff9a6 100644 --- a/metric/registry.go +++ b/metric/registry.go @@ -15,6 +15,7 @@ package metric import ( + "sort" "sync" "time" @@ -28,11 +29,11 @@ type Registry struct { baseMetrics sync.Map } -//TODO: [rghetia] add constant labels. type metricOptions struct { - unit metricdata.Unit - labelkeys []metricdata.LabelKey - desc string + unit metricdata.Unit + labelkeys []metricdata.LabelKey + constLabels map[metricdata.LabelKey]metricdata.LabelValue + desc string } // Options apply changes to metricOptions. @@ -70,6 +71,13 @@ func WithLabelKeysAndDescription(labelKeys ...metricdata.LabelKey) Options { } } +// WithConstLabel applies provided constant label. +func WithConstLabel(constLabels map[metricdata.LabelKey]metricdata.LabelValue) Options { + return func(mo *metricOptions) { + mo.constLabels = constLabels + } +} + // NewRegistry initializes a new Registry. func NewRegistry() *Registry { return &Registry{} @@ -236,12 +244,28 @@ func (r *Registry) initBaseMetric(bm *baseMetric, name string, mos ...Options) ( } bm.start = time.Now() o := createMetricOption(mos...) - bm.keys = o.labelkeys + + var constLabelKeys []metricdata.LabelKey + for k := range o.constLabels { + constLabelKeys = append(constLabelKeys, k) + } + sort.Slice(constLabelKeys, func(i, j int) bool { + return constLabelKeys[i].Key < constLabelKeys[j].Key + }) + + var constLabelValues []metricdata.LabelValue + for _, k := range constLabelKeys { + constLabelValues = append(constLabelValues, o.constLabels[k]) + } + + bm.keys = append(constLabelKeys, o.labelkeys...) + bm.constLabelValues = constLabelValues + bm.desc = metricdata.Descriptor{ Name: name, Description: o.desc, Unit: o.unit, - LabelKeys: o.labelkeys, + LabelKeys: bm.keys, Type: bmTypeToMetricType(bm), } r.baseMetrics.Store(name, bm) From ed3a3f0bf00d34af1ca7056123dae29672ca3b1a Mon Sep 17 00:00:00 2001 From: Yang Song Date: Tue, 23 Apr 2019 13:26:13 -0700 Subject: [PATCH 173/212] Move exporters out from core. (#1118) --- exporter/jaeger/agent.go | 89 - exporter/jaeger/example/main.go | 60 - exporter/jaeger/example_test.go | 74 - exporter/jaeger/internal/gen-go/README | 2 - .../gen-go/jaeger/GoUnusedProtection__.go | 6 - .../jaeger/internal/gen-go/jaeger/agent.go | 244 -- .../collector-remote/collector-remote.go | 155 -- .../internal/gen-go/jaeger/jaeger-consts.go | 23 - .../jaeger/internal/gen-go/jaeger/jaeger.go | 2443 ----------------- exporter/jaeger/jaeger.go | 369 --- exporter/jaeger/jaeger_test.go | 148 - exporter/prometheus/example/main.go | 83 - exporter/prometheus/example_test.go | 33 - exporter/prometheus/prometheus.go | 278 -- exporter/prometheus/prometheus_test.go | 450 --- exporter/zipkin/example/main.go | 77 - exporter/zipkin/example_test.go | 40 - exporter/zipkin/zipkin.go | 196 -- exporter/zipkin/zipkin_test.go | 258 -- go.mod | 4 +- go.sum | 18 +- 21 files changed, 7 insertions(+), 5043 deletions(-) delete mode 100644 exporter/jaeger/agent.go delete mode 100644 exporter/jaeger/example/main.go delete mode 100644 exporter/jaeger/example_test.go delete mode 100644 exporter/jaeger/internal/gen-go/README delete mode 100644 exporter/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go delete mode 100644 exporter/jaeger/internal/gen-go/jaeger/agent.go delete mode 100755 exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go delete mode 100644 exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go delete mode 100644 exporter/jaeger/internal/gen-go/jaeger/jaeger.go delete mode 100644 exporter/jaeger/jaeger.go delete mode 100644 exporter/jaeger/jaeger_test.go delete mode 100644 exporter/prometheus/example/main.go delete mode 100644 exporter/prometheus/example_test.go delete mode 100644 exporter/prometheus/prometheus.go delete mode 100644 exporter/prometheus/prometheus_test.go delete mode 100644 exporter/zipkin/example/main.go delete mode 100644 exporter/zipkin/example_test.go delete mode 100644 exporter/zipkin/zipkin.go delete mode 100644 exporter/zipkin/zipkin_test.go diff --git a/exporter/jaeger/agent.go b/exporter/jaeger/agent.go deleted file mode 100644 index 4c8cd989f..000000000 --- a/exporter/jaeger/agent.go +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "io" - "net" - - "github.com/apache/thrift/lib/go/thrift" - gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" -) - -// udpPacketMaxLength is the max size of UDP packet we want to send, synced with jaeger-agent -const udpPacketMaxLength = 65000 - -// agentClientUDP is a UDP client to Jaeger agent that implements gen.Agent interface. -type agentClientUDP struct { - gen.Agent - io.Closer - - connUDP *net.UDPConn - client *gen.AgentClient - maxPacketSize int // max size of datagram in bytes - thriftBuffer *thrift.TMemoryBuffer // buffer used to calculate byte size of a span -} - -// newAgentClientUDP creates a client that sends spans to Jaeger Agent over UDP. -func newAgentClientUDP(hostPort string, maxPacketSize int) (*agentClientUDP, error) { - if maxPacketSize == 0 { - maxPacketSize = udpPacketMaxLength - } - - thriftBuffer := thrift.NewTMemoryBufferLen(maxPacketSize) - protocolFactory := thrift.NewTCompactProtocolFactory() - client := gen.NewAgentClientFactory(thriftBuffer, protocolFactory) - - destAddr, err := net.ResolveUDPAddr("udp", hostPort) - if err != nil { - return nil, err - } - - connUDP, err := net.DialUDP(destAddr.Network(), nil, destAddr) - if err != nil { - return nil, err - } - if err := connUDP.SetWriteBuffer(maxPacketSize); err != nil { - return nil, err - } - - clientUDP := &agentClientUDP{ - connUDP: connUDP, - client: client, - maxPacketSize: maxPacketSize, - thriftBuffer: thriftBuffer} - return clientUDP, nil -} - -// EmitBatch implements EmitBatch() of Agent interface -func (a *agentClientUDP) EmitBatch(batch *gen.Batch) error { - a.thriftBuffer.Reset() - a.client.SeqId = 0 // we have no need for distinct SeqIds for our one-way UDP messages - if err := a.client.EmitBatch(batch); err != nil { - return err - } - if a.thriftBuffer.Len() > a.maxPacketSize { - return fmt.Errorf("Data does not fit within one UDP packet; size %d, max %d, spans %d", - a.thriftBuffer.Len(), a.maxPacketSize, len(batch.Spans)) - } - _, err := a.connUDP.Write(a.thriftBuffer.Bytes()) - return err -} - -// Close implements Close() of io.Closer and closes the underlying UDP connection. -func (a *agentClientUDP) Close() error { - return a.connUDP.Close() -} diff --git a/exporter/jaeger/example/main.go b/exporter/jaeger/example/main.go deleted file mode 100644 index 12e7a9052..000000000 --- a/exporter/jaeger/example/main.go +++ /dev/null @@ -1,60 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Command jaeger is an example program that creates spans -// and uploads to Jaeger. -package main - -import ( - "context" - "log" - - "go.opencensus.io/exporter/jaeger" - "go.opencensus.io/trace" -) - -func main() { - ctx := context.Background() - - // Register the Jaeger exporter to be able to retrieve - // the collected spans. - exporter, err := jaeger.NewExporter(jaeger.Options{ - CollectorEndpoint: "http://localhost:14268/api/traces", - Process: jaeger.Process{ - ServiceName: "trace-demo", - }, - }) - if err != nil { - log.Fatal(err) - } - trace.RegisterExporter(exporter) - - // For demoing purposes, always sample. In a production application, you should - // configure this to a trace.ProbabilitySampler set at the desired - // probability. - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - ctx, span := trace.StartSpan(ctx, "/foo") - bar(ctx) - span.End() - - exporter.Flush() -} - -func bar(ctx context.Context) { - ctx, span := trace.StartSpan(ctx, "/bar") - defer span.End() - - // Do bar... -} diff --git a/exporter/jaeger/example_test.go b/exporter/jaeger/example_test.go deleted file mode 100644 index bb21207e4..000000000 --- a/exporter/jaeger/example_test.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger_test - -import ( - "log" - - "go.opencensus.io/exporter/jaeger" - "go.opencensus.io/trace" -) - -func ExampleNewExporter_collector() { - // Register the Jaeger exporter to be able to retrieve - // the collected spans. - exporter, err := jaeger.NewExporter(jaeger.Options{ - Endpoint: "http://localhost:14268", - Process: jaeger.Process{ - ServiceName: "trace-demo", - }, - }) - if err != nil { - log.Fatal(err) - } - trace.RegisterExporter(exporter) -} - -func ExampleNewExporter_agent() { - // Register the Jaeger exporter to be able to retrieve - // the collected spans. - exporter, err := jaeger.NewExporter(jaeger.Options{ - AgentEndpoint: "localhost:6831", - Process: jaeger.Process{ - ServiceName: "trace-demo", - }, - }) - if err != nil { - log.Fatal(err) - } - trace.RegisterExporter(exporter) -} - -// ExampleNewExporter_processTags shows how to set ProcessTags -// on a Jaeger exporter. These tags will be added to the exported -// Jaeger process. -func ExampleNewExporter_processTags() { - // Register the Jaeger exporter to be able to retrieve - // the collected spans. - exporter, err := jaeger.NewExporter(jaeger.Options{ - AgentEndpoint: "localhost:6831", - Process: jaeger.Process{ - ServiceName: "trace-demo", - Tags: []jaeger.Tag{ - jaeger.StringTag("ip", "127.0.0.1"), - jaeger.BoolTag("demo", true), - }, - }, - }) - if err != nil { - log.Fatal(err) - } - trace.RegisterExporter(exporter) -} diff --git a/exporter/jaeger/internal/gen-go/README b/exporter/jaeger/internal/gen-go/README deleted file mode 100644 index cda0c56ff..000000000 --- a/exporter/jaeger/internal/gen-go/README +++ /dev/null @@ -1,2 +0,0 @@ -Files autogenerated by the Thrift compiler -from the files at https://github.com/jaegertracing/jaeger-idl. \ No newline at end of file diff --git a/exporter/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go b/exporter/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go deleted file mode 100644 index 345a65acb..000000000 --- a/exporter/jaeger/internal/gen-go/jaeger/GoUnusedProtection__.go +++ /dev/null @@ -1,6 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -var GoUnusedProtection__ int diff --git a/exporter/jaeger/internal/gen-go/jaeger/agent.go b/exporter/jaeger/internal/gen-go/jaeger/agent.go deleted file mode 100644 index 88d2df576..000000000 --- a/exporter/jaeger/internal/gen-go/jaeger/agent.go +++ /dev/null @@ -1,244 +0,0 @@ -// Autogenerated by Thrift Compiler (0.9.3) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "context" - "fmt" - - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = bytes.Equal - -type Agent interface { - // Parameters: - // - Batch - EmitBatch(batch *Batch) (err error) -} - -type AgentClient struct { - Transport thrift.TTransport - ProtocolFactory thrift.TProtocolFactory - InputProtocol thrift.TProtocol - OutputProtocol thrift.TProtocol - SeqId int32 -} - -func NewAgentClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: f, - InputProtocol: f.GetProtocol(t), - OutputProtocol: f.GetProtocol(t), - SeqId: 0, - } -} - -func NewAgentClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *AgentClient { - return &AgentClient{Transport: t, - ProtocolFactory: nil, - InputProtocol: iprot, - OutputProtocol: oprot, - SeqId: 0, - } -} - -// Parameters: -// - Batch -func (p *AgentClient) EmitBatch(batch *Batch) (err error) { - if err = p.sendEmitBatch(batch); err != nil { - return - } - return -} - -func (p *AgentClient) sendEmitBatch(batch *Batch) (err error) { - oprot := p.OutputProtocol - if oprot == nil { - oprot = p.ProtocolFactory.GetProtocol(p.Transport) - p.OutputProtocol = oprot - } - p.SeqId++ - if err = oprot.WriteMessageBegin("emitBatch", thrift.ONEWAY, p.SeqId); err != nil { - return - } - args := AgentEmitBatchArgs{ - Batch: batch, - } - if err = args.Write(oprot); err != nil { - return - } - if err = oprot.WriteMessageEnd(); err != nil { - return - } - return oprot.Flush(context.Background()) -} - -type AgentProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Agent -} - -func (p *AgentProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *AgentProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *AgentProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewAgentProcessor(handler Agent) *AgentProcessor { - - self0 := &AgentProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self0.processorMap["emitBatch"] = &agentProcessorEmitBatch{handler: handler} - return self0 -} - -func (p *AgentProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - ctx := context.Background() - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x1 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x1.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x1 -} - -type agentProcessorEmitBatch struct { - handler Agent -} - -func (p *agentProcessorEmitBatch) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := AgentEmitBatchArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - return false, err - } - - iprot.ReadMessageEnd() - var err2 error - if err2 = p.handler.EmitBatch(args.Batch); err2 != nil { - return true, err2 - } - return true, nil -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batch -type AgentEmitBatchArgs struct { - Batch *Batch `thrift:"batch,1" json:"batch"` -} - -func NewAgentEmitBatchArgs() *AgentEmitBatchArgs { - return &AgentEmitBatchArgs{} -} - -var AgentEmitBatchArgs_Batch_DEFAULT *Batch - -func (p *AgentEmitBatchArgs) GetBatch() *Batch { - if !p.IsSetBatch() { - return AgentEmitBatchArgs_Batch_DEFAULT - } - return p.Batch -} -func (p *AgentEmitBatchArgs) IsSetBatch() bool { - return p.Batch != nil -} - -func (p *AgentEmitBatchArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if err := p.readField1(iprot); err != nil { - return err - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) readField1(iprot thrift.TProtocol) error { - p.Batch = &Batch{} - if err := p.Batch.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Batch), err) - } - return nil -} - -func (p *AgentEmitBatchArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("emitBatch_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if err := p.writeField1(oprot); err != nil { - return err - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *AgentEmitBatchArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batch", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batch: ", p), err) - } - if err := p.Batch.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Batch), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batch: ", p), err) - } - return err -} - -func (p *AgentEmitBatchArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("AgentEmitBatchArgs(%+v)", *p) -} diff --git a/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go b/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go deleted file mode 100755 index 157559e0a..000000000 --- a/exporter/jaeger/internal/gen-go/jaeger/collector-remote/collector-remote.go +++ /dev/null @@ -1,155 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package main - -import ( - "context" - "flag" - "fmt" - "math" - "net" - "net/url" - "os" - "strconv" - "strings" - - "github.com/apache/thrift/lib/go/thrift" - "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" -) - -func Usage() { - fmt.Fprintln(os.Stderr, "Usage of ", os.Args[0], " [-h host:port] [-u url] [-f[ramed]] function [arg1 [arg2...]]:") - flag.PrintDefaults() - fmt.Fprintln(os.Stderr, "\nFunctions:") - fmt.Fprintln(os.Stderr, " submitBatches( batches)") - fmt.Fprintln(os.Stderr) - os.Exit(0) -} - -func main() { - flag.Usage = Usage - var host string - var port int - var protocol string - var urlString string - var framed bool - var useHttp bool - var parsedUrl *url.URL - var trans thrift.TTransport - _ = strconv.Atoi - _ = math.Abs - flag.Usage = Usage - flag.StringVar(&host, "h", "localhost", "Specify host and port") - flag.IntVar(&port, "p", 9090, "Specify port") - flag.StringVar(&protocol, "P", "binary", "Specify the protocol (binary, compact, simplejson, json)") - flag.StringVar(&urlString, "u", "", "Specify the url") - flag.BoolVar(&framed, "framed", false, "Use framed transport") - flag.BoolVar(&useHttp, "http", false, "Use http") - flag.Parse() - - if len(urlString) > 0 { - var err error - parsedUrl, err = url.Parse(urlString) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - host = parsedUrl.Host - useHttp = len(parsedUrl.Scheme) <= 0 || parsedUrl.Scheme == "http" - } else if useHttp { - _, err := url.Parse(fmt.Sprint("http://", host, ":", port)) - if err != nil { - fmt.Fprintln(os.Stderr, "Error parsing URL: ", err) - flag.Usage() - } - } - - cmd := flag.Arg(0) - var err error - if useHttp { - trans, err = thrift.NewTHttpClient(parsedUrl.String()) - } else { - portStr := fmt.Sprint(port) - if strings.Contains(host, ":") { - host, portStr, err = net.SplitHostPort(host) - if err != nil { - fmt.Fprintln(os.Stderr, "error with host:", err) - os.Exit(1) - } - } - trans, err = thrift.NewTSocket(net.JoinHostPort(host, portStr)) - if err != nil { - fmt.Fprintln(os.Stderr, "error resolving address:", err) - os.Exit(1) - } - if framed { - trans = thrift.NewTFramedTransport(trans) - } - } - if err != nil { - fmt.Fprintln(os.Stderr, "Error creating transport", err) - os.Exit(1) - } - defer trans.Close() - var protocolFactory thrift.TProtocolFactory - switch protocol { - case "compact": - protocolFactory = thrift.NewTCompactProtocolFactory() - break - case "simplejson": - protocolFactory = thrift.NewTSimpleJSONProtocolFactory() - break - case "json": - protocolFactory = thrift.NewTJSONProtocolFactory() - break - case "binary", "": - protocolFactory = thrift.NewTBinaryProtocolFactoryDefault() - break - default: - fmt.Fprintln(os.Stderr, "Invalid protocol specified: ", protocol) - Usage() - os.Exit(1) - } - iprot := protocolFactory.GetProtocol(trans) - oprot := protocolFactory.GetProtocol(trans) - client := jaeger.NewCollectorClient(thrift.NewTStandardClient(iprot, oprot)) - if err := trans.Open(); err != nil { - fmt.Fprintln(os.Stderr, "Error opening socket to ", host, ":", port, " ", err) - os.Exit(1) - } - - switch cmd { - case "submitBatches": - if flag.NArg()-1 != 1 { - fmt.Fprintln(os.Stderr, "SubmitBatches requires 1 args") - flag.Usage() - } - arg12 := flag.Arg(1) - mbTrans13 := thrift.NewTMemoryBufferLen(len(arg12)) - defer mbTrans13.Close() - _, err14 := mbTrans13.WriteString(arg12) - if err14 != nil { - Usage() - return - } - factory15 := thrift.NewTSimpleJSONProtocolFactory() - jsProt16 := factory15.GetProtocol(mbTrans13) - containerStruct0 := jaeger.NewCollectorSubmitBatchesArgs() - err17 := containerStruct0.ReadField1(jsProt16) - if err17 != nil { - Usage() - return - } - argvalue0 := containerStruct0.Batches - value0 := argvalue0 - fmt.Print(client.SubmitBatches(context.Background(), value0)) - fmt.Print("\n") - break - case "": - Usage() - break - default: - fmt.Fprintln(os.Stderr, "Invalid function ", cmd) - } -} diff --git a/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go b/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go deleted file mode 100644 index d2b0fa9a9..000000000 --- a/exporter/jaeger/internal/gen-go/jaeger/jaeger-consts.go +++ /dev/null @@ -1,23 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger - -import ( - "bytes" - "context" - "fmt" - "reflect" - - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -func init() { -} diff --git a/exporter/jaeger/internal/gen-go/jaeger/jaeger.go b/exporter/jaeger/internal/gen-go/jaeger/jaeger.go deleted file mode 100644 index 0f913633d..000000000 --- a/exporter/jaeger/internal/gen-go/jaeger/jaeger.go +++ /dev/null @@ -1,2443 +0,0 @@ -// Autogenerated by Thrift Compiler (0.11.0) -// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING - -package jaeger // import "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - -import ( - "bytes" - "context" - "database/sql/driver" - "errors" - "fmt" - "reflect" - - "github.com/apache/thrift/lib/go/thrift" -) - -// (needed to ensure safety because of naive import list construction.) -var _ = thrift.ZERO -var _ = fmt.Printf -var _ = context.Background -var _ = reflect.DeepEqual -var _ = bytes.Equal - -type TagType int64 - -const ( - TagType_STRING TagType = 0 - TagType_DOUBLE TagType = 1 - TagType_BOOL TagType = 2 - TagType_LONG TagType = 3 - TagType_BINARY TagType = 4 -) - -func (p TagType) String() string { - switch p { - case TagType_STRING: - return "STRING" - case TagType_DOUBLE: - return "DOUBLE" - case TagType_BOOL: - return "BOOL" - case TagType_LONG: - return "LONG" - case TagType_BINARY: - return "BINARY" - } - return "" -} - -func TagTypeFromString(s string) (TagType, error) { - switch s { - case "STRING": - return TagType_STRING, nil - case "DOUBLE": - return TagType_DOUBLE, nil - case "BOOL": - return TagType_BOOL, nil - case "LONG": - return TagType_LONG, nil - case "BINARY": - return TagType_BINARY, nil - } - return TagType(0), fmt.Errorf("not a valid TagType string") -} - -func TagTypePtr(v TagType) *TagType { return &v } - -func (p TagType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *TagType) UnmarshalText(text []byte) error { - q, err := TagTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *TagType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = TagType(v) - return nil -} - -func (p *TagType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -type SpanRefType int64 - -const ( - SpanRefType_CHILD_OF SpanRefType = 0 - SpanRefType_FOLLOWS_FROM SpanRefType = 1 -) - -func (p SpanRefType) String() string { - switch p { - case SpanRefType_CHILD_OF: - return "CHILD_OF" - case SpanRefType_FOLLOWS_FROM: - return "FOLLOWS_FROM" - } - return "" -} - -func SpanRefTypeFromString(s string) (SpanRefType, error) { - switch s { - case "CHILD_OF": - return SpanRefType_CHILD_OF, nil - case "FOLLOWS_FROM": - return SpanRefType_FOLLOWS_FROM, nil - } - return SpanRefType(0), fmt.Errorf("not a valid SpanRefType string") -} - -func SpanRefTypePtr(v SpanRefType) *SpanRefType { return &v } - -func (p SpanRefType) MarshalText() ([]byte, error) { - return []byte(p.String()), nil -} - -func (p *SpanRefType) UnmarshalText(text []byte) error { - q, err := SpanRefTypeFromString(string(text)) - if err != nil { - return err - } - *p = q - return nil -} - -func (p *SpanRefType) Scan(value interface{}) error { - v, ok := value.(int64) - if !ok { - return errors.New("Scan value is not int64") - } - *p = SpanRefType(v) - return nil -} - -func (p *SpanRefType) Value() (driver.Value, error) { - if p == nil { - return nil, nil - } - return int64(*p), nil -} - -// Attributes: -// - Key -// - VType -// - VStr -// - VDouble -// - VBool -// - VLong -// - VBinary -type Tag struct { - Key string `thrift:"key,1,required" db:"key" json:"key"` - VType TagType `thrift:"vType,2,required" db:"vType" json:"vType"` - VStr *string `thrift:"vStr,3" db:"vStr" json:"vStr,omitempty"` - VDouble *float64 `thrift:"vDouble,4" db:"vDouble" json:"vDouble,omitempty"` - VBool *bool `thrift:"vBool,5" db:"vBool" json:"vBool,omitempty"` - VLong *int64 `thrift:"vLong,6" db:"vLong" json:"vLong,omitempty"` - VBinary []byte `thrift:"vBinary,7" db:"vBinary" json:"vBinary,omitempty"` -} - -func NewTag() *Tag { - return &Tag{} -} - -func (p *Tag) GetKey() string { - return p.Key -} - -func (p *Tag) GetVType() TagType { - return p.VType -} - -var Tag_VStr_DEFAULT string - -func (p *Tag) GetVStr() string { - if !p.IsSetVStr() { - return Tag_VStr_DEFAULT - } - return *p.VStr -} - -var Tag_VDouble_DEFAULT float64 - -func (p *Tag) GetVDouble() float64 { - if !p.IsSetVDouble() { - return Tag_VDouble_DEFAULT - } - return *p.VDouble -} - -var Tag_VBool_DEFAULT bool - -func (p *Tag) GetVBool() bool { - if !p.IsSetVBool() { - return Tag_VBool_DEFAULT - } - return *p.VBool -} - -var Tag_VLong_DEFAULT int64 - -func (p *Tag) GetVLong() int64 { - if !p.IsSetVLong() { - return Tag_VLong_DEFAULT - } - return *p.VLong -} - -var Tag_VBinary_DEFAULT []byte - -func (p *Tag) GetVBinary() []byte { - return p.VBinary -} -func (p *Tag) IsSetVStr() bool { - return p.VStr != nil -} - -func (p *Tag) IsSetVDouble() bool { - return p.VDouble != nil -} - -func (p *Tag) IsSetVBool() bool { - return p.VBool != nil -} - -func (p *Tag) IsSetVLong() bool { - return p.VLong != nil -} - -func (p *Tag) IsSetVBinary() bool { - return p.VBinary != nil -} - -func (p *Tag) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetKey bool = false - var issetVType bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetKey = true - case 2: - if fieldTypeId == thrift.I32 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetVType = true - case 3: - if fieldTypeId == thrift.STRING { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 4: - if fieldTypeId == thrift.DOUBLE { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 5: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 6: - if fieldTypeId == thrift.I64 { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.STRING { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetKey { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Key is not set")) - } - if !issetVType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field VType is not set")) - } - return nil -} - -func (p *Tag) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Key = v - } - return nil -} - -func (p *Tag) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - temp := TagType(v) - p.VType = temp - } - return nil -} - -func (p *Tag) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.VStr = &v - } - return nil -} - -func (p *Tag) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadDouble(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.VDouble = &v - } - return nil -} - -func (p *Tag) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.VBool = &v - } - return nil -} - -func (p *Tag) ReadField6(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 6: ", err) - } else { - p.VLong = &v - } - return nil -} - -func (p *Tag) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBinary(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.VBinary = v - } - return nil -} - -func (p *Tag) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Tag"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Tag) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err) - } - if err := oprot.WriteString(string(p.Key)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err) - } - return err -} - -func (p *Tag) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("vType", thrift.I32, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:vType: ", p), err) - } - if err := oprot.WriteI32(int32(p.VType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vType (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:vType: ", p), err) - } - return err -} - -func (p *Tag) writeField3(oprot thrift.TProtocol) (err error) { - if p.IsSetVStr() { - if err := oprot.WriteFieldBegin("vStr", thrift.STRING, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:vStr: ", p), err) - } - if err := oprot.WriteString(string(*p.VStr)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vStr (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:vStr: ", p), err) - } - } - return err -} - -func (p *Tag) writeField4(oprot thrift.TProtocol) (err error) { - if p.IsSetVDouble() { - if err := oprot.WriteFieldBegin("vDouble", thrift.DOUBLE, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:vDouble: ", p), err) - } - if err := oprot.WriteDouble(float64(*p.VDouble)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vDouble (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:vDouble: ", p), err) - } - } - return err -} - -func (p *Tag) writeField5(oprot thrift.TProtocol) (err error) { - if p.IsSetVBool() { - if err := oprot.WriteFieldBegin("vBool", thrift.BOOL, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:vBool: ", p), err) - } - if err := oprot.WriteBool(bool(*p.VBool)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBool (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:vBool: ", p), err) - } - } - return err -} - -func (p *Tag) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetVLong() { - if err := oprot.WriteFieldBegin("vLong", thrift.I64, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:vLong: ", p), err) - } - if err := oprot.WriteI64(int64(*p.VLong)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vLong (6) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:vLong: ", p), err) - } - } - return err -} - -func (p *Tag) writeField7(oprot thrift.TProtocol) (err error) { - if p.IsSetVBinary() { - if err := oprot.WriteFieldBegin("vBinary", thrift.STRING, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:vBinary: ", p), err) - } - if err := oprot.WriteBinary(p.VBinary); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.vBinary (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:vBinary: ", p), err) - } - } - return err -} - -func (p *Tag) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Tag(%+v)", *p) -} - -// Attributes: -// - Timestamp -// - Fields -type Log struct { - Timestamp int64 `thrift:"timestamp,1,required" db:"timestamp" json:"timestamp"` - Fields []*Tag `thrift:"fields,2,required" db:"fields" json:"fields"` -} - -func NewLog() *Log { - return &Log{} -} - -func (p *Log) GetTimestamp() int64 { - return p.Timestamp -} - -func (p *Log) GetFields() []*Tag { - return p.Fields -} -func (p *Log) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTimestamp bool = false - var issetFields bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTimestamp = true - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetFields = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTimestamp { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Timestamp is not set")) - } - if !issetFields { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Fields is not set")) - } - return nil -} - -func (p *Log) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Timestamp = v - } - return nil -} - -func (p *Log) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Fields = tSlice - for i := 0; i < size; i++ { - _elem0 := &Tag{} - if err := _elem0.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err) - } - p.Fields = append(p.Fields, _elem0) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Log) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Log"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Log) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err) - } - if err := oprot.WriteI64(int64(p.Timestamp)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err) - } - return err -} - -func (p *Log) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("fields", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:fields: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Fields)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Fields { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:fields: ", p), err) - } - return err -} - -func (p *Log) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Log(%+v)", *p) -} - -// Attributes: -// - RefType -// - TraceIdLow -// - TraceIdHigh -// - SpanId -type SpanRef struct { - RefType SpanRefType `thrift:"refType,1,required" db:"refType" json:"refType"` - TraceIdLow int64 `thrift:"traceIdLow,2,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,3,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,4,required" db:"spanId" json:"spanId"` -} - -func NewSpanRef() *SpanRef { - return &SpanRef{} -} - -func (p *SpanRef) GetRefType() SpanRefType { - return p.RefType -} - -func (p *SpanRef) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *SpanRef) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *SpanRef) GetSpanId() int64 { - return p.SpanId -} -func (p *SpanRef) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetRefType bool = false - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I32 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetRefType = true - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceIdLow = true - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceIdHigh = true - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetSpanId = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetRefType { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field RefType is not set")) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - return nil -} - -func (p *SpanRef) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - temp := SpanRefType(v) - p.RefType = temp - } - return nil -} - -func (p *SpanRef) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *SpanRef) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *SpanRef) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *SpanRef) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("SpanRef"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *SpanRef) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("refType", thrift.I32, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:refType: ", p), err) - } - if err := oprot.WriteI32(int32(p.RefType)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.refType (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:refType: ", p), err) - } - return err -} - -func (p *SpanRef) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdLow: ", p), err) - } - return err -} - -func (p *SpanRef) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:traceIdHigh: ", p), err) - } - return err -} - -func (p *SpanRef) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:spanId: ", p), err) - } - return err -} - -func (p *SpanRef) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("SpanRef(%+v)", *p) -} - -// Attributes: -// - TraceIdLow -// - TraceIdHigh -// - SpanId -// - ParentSpanId -// - OperationName -// - References -// - Flags -// - StartTime -// - Duration -// - Tags -// - Logs -type Span struct { - TraceIdLow int64 `thrift:"traceIdLow,1,required" db:"traceIdLow" json:"traceIdLow"` - TraceIdHigh int64 `thrift:"traceIdHigh,2,required" db:"traceIdHigh" json:"traceIdHigh"` - SpanId int64 `thrift:"spanId,3,required" db:"spanId" json:"spanId"` - ParentSpanId int64 `thrift:"parentSpanId,4,required" db:"parentSpanId" json:"parentSpanId"` - OperationName string `thrift:"operationName,5,required" db:"operationName" json:"operationName"` - References []*SpanRef `thrift:"references,6" db:"references" json:"references,omitempty"` - Flags int32 `thrift:"flags,7,required" db:"flags" json:"flags"` - StartTime int64 `thrift:"startTime,8,required" db:"startTime" json:"startTime"` - Duration int64 `thrift:"duration,9,required" db:"duration" json:"duration"` - Tags []*Tag `thrift:"tags,10" db:"tags" json:"tags,omitempty"` - Logs []*Log `thrift:"logs,11" db:"logs" json:"logs,omitempty"` -} - -func NewSpan() *Span { - return &Span{} -} - -func (p *Span) GetTraceIdLow() int64 { - return p.TraceIdLow -} - -func (p *Span) GetTraceIdHigh() int64 { - return p.TraceIdHigh -} - -func (p *Span) GetSpanId() int64 { - return p.SpanId -} - -func (p *Span) GetParentSpanId() int64 { - return p.ParentSpanId -} - -func (p *Span) GetOperationName() string { - return p.OperationName -} - -var Span_References_DEFAULT []*SpanRef - -func (p *Span) GetReferences() []*SpanRef { - return p.References -} - -func (p *Span) GetFlags() int32 { - return p.Flags -} - -func (p *Span) GetStartTime() int64 { - return p.StartTime -} - -func (p *Span) GetDuration() int64 { - return p.Duration -} - -var Span_Tags_DEFAULT []*Tag - -func (p *Span) GetTags() []*Tag { - return p.Tags -} - -var Span_Logs_DEFAULT []*Log - -func (p *Span) GetLogs() []*Log { - return p.Logs -} -func (p *Span) IsSetReferences() bool { - return p.References != nil -} - -func (p *Span) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Span) IsSetLogs() bool { - return p.Logs != nil -} - -func (p *Span) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetTraceIdLow bool = false - var issetTraceIdHigh bool = false - var issetSpanId bool = false - var issetParentSpanId bool = false - var issetOperationName bool = false - var issetFlags bool = false - var issetStartTime bool = false - var issetDuration bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.I64 { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceIdLow = true - case 2: - if fieldTypeId == thrift.I64 { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetTraceIdHigh = true - case 3: - if fieldTypeId == thrift.I64 { - if err := p.ReadField3(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetSpanId = true - case 4: - if fieldTypeId == thrift.I64 { - if err := p.ReadField4(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetParentSpanId = true - case 5: - if fieldTypeId == thrift.STRING { - if err := p.ReadField5(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetOperationName = true - case 6: - if fieldTypeId == thrift.LIST { - if err := p.ReadField6(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 7: - if fieldTypeId == thrift.I32 { - if err := p.ReadField7(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetFlags = true - case 8: - if fieldTypeId == thrift.I64 { - if err := p.ReadField8(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetStartTime = true - case 9: - if fieldTypeId == thrift.I64 { - if err := p.ReadField9(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetDuration = true - case 10: - if fieldTypeId == thrift.LIST { - if err := p.ReadField10(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - case 11: - if fieldTypeId == thrift.LIST { - if err := p.ReadField11(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetTraceIdLow { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdLow is not set")) - } - if !issetTraceIdHigh { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field TraceIdHigh is not set")) - } - if !issetSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field SpanId is not set")) - } - if !issetParentSpanId { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ParentSpanId is not set")) - } - if !issetOperationName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field OperationName is not set")) - } - if !issetFlags { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Flags is not set")) - } - if !issetStartTime { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field StartTime is not set")) - } - if !issetDuration { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Duration is not set")) - } - return nil -} - -func (p *Span) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.TraceIdLow = v - } - return nil -} - -func (p *Span) ReadField2(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 2: ", err) - } else { - p.TraceIdHigh = v - } - return nil -} - -func (p *Span) ReadField3(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 3: ", err) - } else { - p.SpanId = v - } - return nil -} - -func (p *Span) ReadField4(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 4: ", err) - } else { - p.ParentSpanId = v - } - return nil -} - -func (p *Span) ReadField5(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 5: ", err) - } else { - p.OperationName = v - } - return nil -} - -func (p *Span) ReadField6(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*SpanRef, 0, size) - p.References = tSlice - for i := 0; i < size; i++ { - _elem1 := &SpanRef{} - if err := _elem1.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err) - } - p.References = append(p.References, _elem1) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField7(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI32(); err != nil { - return thrift.PrependError("error reading field 7: ", err) - } else { - p.Flags = v - } - return nil -} - -func (p *Span) ReadField8(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 8: ", err) - } else { - p.StartTime = v - } - return nil -} - -func (p *Span) ReadField9(iprot thrift.TProtocol) error { - if v, err := iprot.ReadI64(); err != nil { - return thrift.PrependError("error reading field 9: ", err) - } else { - p.Duration = v - } - return nil -} - -func (p *Span) ReadField10(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem2 := &Tag{} - if err := _elem2.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem2), err) - } - p.Tags = append(p.Tags, _elem2) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) ReadField11(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Log, 0, size) - p.Logs = tSlice - for i := 0; i < size; i++ { - _elem3 := &Log{} - if err := _elem3.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem3), err) - } - p.Logs = append(p.Logs, _elem3) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Span) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Span"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - if err := p.writeField3(oprot); err != nil { - return err - } - if err := p.writeField4(oprot); err != nil { - return err - } - if err := p.writeField5(oprot); err != nil { - return err - } - if err := p.writeField6(oprot); err != nil { - return err - } - if err := p.writeField7(oprot); err != nil { - return err - } - if err := p.writeField8(oprot); err != nil { - return err - } - if err := p.writeField9(oprot); err != nil { - return err - } - if err := p.writeField10(oprot); err != nil { - return err - } - if err := p.writeField11(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Span) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdLow", thrift.I64, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:traceIdLow: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdLow)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdLow (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:traceIdLow: ", p), err) - } - return err -} - -func (p *Span) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("traceIdHigh", thrift.I64, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:traceIdHigh: ", p), err) - } - if err := oprot.WriteI64(int64(p.TraceIdHigh)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.traceIdHigh (2) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:traceIdHigh: ", p), err) - } - return err -} - -func (p *Span) writeField3(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spanId", thrift.I64, 3); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:spanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.SpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.spanId (3) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 3:spanId: ", p), err) - } - return err -} - -func (p *Span) writeField4(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("parentSpanId", thrift.I64, 4); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:parentSpanId: ", p), err) - } - if err := oprot.WriteI64(int64(p.ParentSpanId)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.parentSpanId (4) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 4:parentSpanId: ", p), err) - } - return err -} - -func (p *Span) writeField5(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("operationName", thrift.STRING, 5); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:operationName: ", p), err) - } - if err := oprot.WriteString(string(p.OperationName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.operationName (5) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 5:operationName: ", p), err) - } - return err -} - -func (p *Span) writeField6(oprot thrift.TProtocol) (err error) { - if p.IsSetReferences() { - if err := oprot.WriteFieldBegin("references", thrift.LIST, 6); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:references: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.References)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.References { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 6:references: ", p), err) - } - } - return err -} - -func (p *Span) writeField7(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("flags", thrift.I32, 7); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 7:flags: ", p), err) - } - if err := oprot.WriteI32(int32(p.Flags)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.flags (7) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 7:flags: ", p), err) - } - return err -} - -func (p *Span) writeField8(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("startTime", thrift.I64, 8); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:startTime: ", p), err) - } - if err := oprot.WriteI64(int64(p.StartTime)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.startTime (8) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 8:startTime: ", p), err) - } - return err -} - -func (p *Span) writeField9(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("duration", thrift.I64, 9); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:duration: ", p), err) - } - if err := oprot.WriteI64(int64(p.Duration)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.duration (9) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 9:duration: ", p), err) - } - return err -} - -func (p *Span) writeField10(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 10); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 10:tags: ", p), err) - } - } - return err -} - -func (p *Span) writeField11(oprot thrift.TProtocol) (err error) { - if p.IsSetLogs() { - if err := oprot.WriteFieldBegin("logs", thrift.LIST, 11); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:logs: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Logs)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Logs { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 11:logs: ", p), err) - } - } - return err -} - -func (p *Span) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Span(%+v)", *p) -} - -// Attributes: -// - ServiceName -// - Tags -type Process struct { - ServiceName string `thrift:"serviceName,1,required" db:"serviceName" json:"serviceName"` - Tags []*Tag `thrift:"tags,2" db:"tags" json:"tags,omitempty"` -} - -func NewProcess() *Process { - return &Process{} -} - -func (p *Process) GetServiceName() string { - return p.ServiceName -} - -var Process_Tags_DEFAULT []*Tag - -func (p *Process) GetTags() []*Tag { - return p.Tags -} -func (p *Process) IsSetTags() bool { - return p.Tags != nil -} - -func (p *Process) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetServiceName bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRING { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetServiceName = true - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetServiceName { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field ServiceName is not set")) - } - return nil -} - -func (p *Process) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadString(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.ServiceName = v - } - return nil -} - -func (p *Process) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Tag, 0, size) - p.Tags = tSlice - for i := 0; i < size; i++ { - _elem4 := &Tag{} - if err := _elem4.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err) - } - p.Tags = append(p.Tags, _elem4) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Process) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Process"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Process) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("serviceName", thrift.STRING, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:serviceName: ", p), err) - } - if err := oprot.WriteString(string(p.ServiceName)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.serviceName (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:serviceName: ", p), err) - } - return err -} - -func (p *Process) writeField2(oprot thrift.TProtocol) (err error) { - if p.IsSetTags() { - if err := oprot.WriteFieldBegin("tags", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:tags: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Tags)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Tags { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:tags: ", p), err) - } - } - return err -} - -func (p *Process) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Process(%+v)", *p) -} - -// Attributes: -// - Process -// - Spans -type Batch struct { - Process *Process `thrift:"process,1,required" db:"process" json:"process"` - Spans []*Span `thrift:"spans,2,required" db:"spans" json:"spans"` -} - -func NewBatch() *Batch { - return &Batch{} -} - -var Batch_Process_DEFAULT *Process - -func (p *Batch) GetProcess() *Process { - if !p.IsSetProcess() { - return Batch_Process_DEFAULT - } - return p.Process -} - -func (p *Batch) GetSpans() []*Span { - return p.Spans -} -func (p *Batch) IsSetProcess() bool { - return p.Process != nil -} - -func (p *Batch) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetProcess bool = false - var issetSpans bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.STRUCT { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetProcess = true - case 2: - if fieldTypeId == thrift.LIST { - if err := p.ReadField2(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetSpans = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetProcess { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Process is not set")) - } - if !issetSpans { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Spans is not set")) - } - return nil -} - -func (p *Batch) ReadField1(iprot thrift.TProtocol) error { - p.Process = &Process{} - if err := p.Process.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Process), err) - } - return nil -} - -func (p *Batch) ReadField2(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Span, 0, size) - p.Spans = tSlice - for i := 0; i < size; i++ { - _elem5 := &Span{} - if err := _elem5.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem5), err) - } - p.Spans = append(p.Spans, _elem5) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *Batch) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("Batch"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - if err := p.writeField2(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *Batch) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("process", thrift.STRUCT, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:process: ", p), err) - } - if err := p.Process.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Process), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:process: ", p), err) - } - return err -} - -func (p *Batch) writeField2(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("spans", thrift.LIST, 2); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:spans: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Spans)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Spans { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 2:spans: ", p), err) - } - return err -} - -func (p *Batch) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("Batch(%+v)", *p) -} - -// Attributes: -// - Ok -type BatchSubmitResponse struct { - Ok bool `thrift:"ok,1,required" db:"ok" json:"ok"` -} - -func NewBatchSubmitResponse() *BatchSubmitResponse { - return &BatchSubmitResponse{} -} - -func (p *BatchSubmitResponse) GetOk() bool { - return p.Ok -} -func (p *BatchSubmitResponse) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - var issetOk bool = false - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.BOOL { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - issetOk = true - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - if !issetOk { - return thrift.NewTProtocolExceptionWithType(thrift.INVALID_DATA, fmt.Errorf("Required field Ok is not set")) - } - return nil -} - -func (p *BatchSubmitResponse) ReadField1(iprot thrift.TProtocol) error { - if v, err := iprot.ReadBool(); err != nil { - return thrift.PrependError("error reading field 1: ", err) - } else { - p.Ok = v - } - return nil -} - -func (p *BatchSubmitResponse) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("BatchSubmitResponse"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *BatchSubmitResponse) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("ok", thrift.BOOL, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ok: ", p), err) - } - if err := oprot.WriteBool(bool(p.Ok)); err != nil { - return thrift.PrependError(fmt.Sprintf("%T.ok (1) field write error: ", p), err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ok: ", p), err) - } - return err -} - -func (p *BatchSubmitResponse) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("BatchSubmitResponse(%+v)", *p) -} - -type Collector interface { - // Parameters: - // - Batches - SubmitBatches(ctx context.Context, batches []*Batch) (r []*BatchSubmitResponse, err error) -} - -type CollectorClient struct { - c thrift.TClient -} - -// Deprecated: Use NewCollector instead -func NewCollectorClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(f.GetProtocol(t), f.GetProtocol(t)), - } -} - -// Deprecated: Use NewCollector instead -func NewCollectorClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *CollectorClient { - return &CollectorClient{ - c: thrift.NewTStandardClient(iprot, oprot), - } -} - -func NewCollectorClient(c thrift.TClient) *CollectorClient { - return &CollectorClient{ - c: c, - } -} - -// Parameters: -// - Batches -func (p *CollectorClient) SubmitBatches(ctx context.Context, batches []*Batch) (r []*BatchSubmitResponse, err error) { - var _args6 CollectorSubmitBatchesArgs - _args6.Batches = batches - var _result7 CollectorSubmitBatchesResult - if err = p.c.Call(ctx, "submitBatches", &_args6, &_result7); err != nil { - return - } - return _result7.GetSuccess(), nil -} - -type CollectorProcessor struct { - processorMap map[string]thrift.TProcessorFunction - handler Collector -} - -func (p *CollectorProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) { - p.processorMap[key] = processor -} - -func (p *CollectorProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) { - processor, ok = p.processorMap[key] - return processor, ok -} - -func (p *CollectorProcessor) ProcessorMap() map[string]thrift.TProcessorFunction { - return p.processorMap -} - -func NewCollectorProcessor(handler Collector) *CollectorProcessor { - - self8 := &CollectorProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)} - self8.processorMap["submitBatches"] = &collectorProcessorSubmitBatches{handler: handler} - return self8 -} - -func (p *CollectorProcessor) Process(ctx context.Context, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - name, _, seqId, err := iprot.ReadMessageBegin() - if err != nil { - return false, err - } - if processor, ok := p.GetProcessorFunction(name); ok { - return processor.Process(ctx, seqId, iprot, oprot) - } - iprot.Skip(thrift.STRUCT) - iprot.ReadMessageEnd() - x9 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name) - oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId) - x9.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, x9 - -} - -type collectorProcessorSubmitBatches struct { - handler Collector -} - -func (p *collectorProcessorSubmitBatches) Process(ctx context.Context, seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) { - args := CollectorSubmitBatchesArgs{} - if err = args.Read(iprot); err != nil { - iprot.ReadMessageEnd() - x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error()) - oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return false, err - } - - iprot.ReadMessageEnd() - result := CollectorSubmitBatchesResult{} - var retval []*BatchSubmitResponse - var err2 error - if retval, err2 = p.handler.SubmitBatches(ctx, args.Batches); err2 != nil { - x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing submitBatches: "+err2.Error()) - oprot.WriteMessageBegin("submitBatches", thrift.EXCEPTION, seqId) - x.Write(oprot) - oprot.WriteMessageEnd() - oprot.Flush(ctx) - return true, err2 - } else { - result.Success = retval - } - if err2 = oprot.WriteMessageBegin("submitBatches", thrift.REPLY, seqId); err2 != nil { - err = err2 - } - if err2 = result.Write(oprot); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil { - err = err2 - } - if err2 = oprot.Flush(ctx); err == nil && err2 != nil { - err = err2 - } - if err != nil { - return - } - return true, err -} - -// HELPER FUNCTIONS AND STRUCTURES - -// Attributes: -// - Batches -type CollectorSubmitBatchesArgs struct { - Batches []*Batch `thrift:"batches,1" db:"batches" json:"batches"` -} - -func NewCollectorSubmitBatchesArgs() *CollectorSubmitBatchesArgs { - return &CollectorSubmitBatchesArgs{} -} - -func (p *CollectorSubmitBatchesArgs) GetBatches() []*Batch { - return p.Batches -} -func (p *CollectorSubmitBatchesArgs) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 1: - if fieldTypeId == thrift.LIST { - if err := p.ReadField1(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) ReadField1(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*Batch, 0, size) - p.Batches = tSlice - for i := 0; i < size; i++ { - _elem10 := &Batch{} - if err := _elem10.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem10), err) - } - p.Batches = append(p.Batches, _elem10) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitBatches_args"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField1(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesArgs) writeField1(oprot thrift.TProtocol) (err error) { - if err := oprot.WriteFieldBegin("batches", thrift.LIST, 1); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:batches: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Batches)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Batches { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 1:batches: ", p), err) - } - return err -} - -func (p *CollectorSubmitBatchesArgs) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesArgs(%+v)", *p) -} - -// Attributes: -// - Success -type CollectorSubmitBatchesResult struct { - Success []*BatchSubmitResponse `thrift:"success,0" db:"success" json:"success,omitempty"` -} - -func NewCollectorSubmitBatchesResult() *CollectorSubmitBatchesResult { - return &CollectorSubmitBatchesResult{} -} - -var CollectorSubmitBatchesResult_Success_DEFAULT []*BatchSubmitResponse - -func (p *CollectorSubmitBatchesResult) GetSuccess() []*BatchSubmitResponse { - return p.Success -} -func (p *CollectorSubmitBatchesResult) IsSetSuccess() bool { - return p.Success != nil -} - -func (p *CollectorSubmitBatchesResult) Read(iprot thrift.TProtocol) error { - if _, err := iprot.ReadStructBegin(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err) - } - - for { - _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin() - if err != nil { - return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err) - } - if fieldTypeId == thrift.STOP { - break - } - switch fieldId { - case 0: - if fieldTypeId == thrift.LIST { - if err := p.ReadField0(iprot); err != nil { - return err - } - } else { - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - default: - if err := iprot.Skip(fieldTypeId); err != nil { - return err - } - } - if err := iprot.ReadFieldEnd(); err != nil { - return err - } - } - if err := iprot.ReadStructEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) ReadField0(iprot thrift.TProtocol) error { - _, size, err := iprot.ReadListBegin() - if err != nil { - return thrift.PrependError("error reading list begin: ", err) - } - tSlice := make([]*BatchSubmitResponse, 0, size) - p.Success = tSlice - for i := 0; i < size; i++ { - _elem11 := &BatchSubmitResponse{} - if err := _elem11.Read(iprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem11), err) - } - p.Success = append(p.Success, _elem11) - } - if err := iprot.ReadListEnd(); err != nil { - return thrift.PrependError("error reading list end: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) Write(oprot thrift.TProtocol) error { - if err := oprot.WriteStructBegin("submitBatches_result"); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err) - } - if p != nil { - if err := p.writeField0(oprot); err != nil { - return err - } - } - if err := oprot.WriteFieldStop(); err != nil { - return thrift.PrependError("write field stop error: ", err) - } - if err := oprot.WriteStructEnd(); err != nil { - return thrift.PrependError("write struct stop error: ", err) - } - return nil -} - -func (p *CollectorSubmitBatchesResult) writeField0(oprot thrift.TProtocol) (err error) { - if p.IsSetSuccess() { - if err := oprot.WriteFieldBegin("success", thrift.LIST, 0); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err) - } - if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Success)); err != nil { - return thrift.PrependError("error writing list begin: ", err) - } - for _, v := range p.Success { - if err := v.Write(oprot); err != nil { - return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err) - } - } - if err := oprot.WriteListEnd(); err != nil { - return thrift.PrependError("error writing list end: ", err) - } - if err := oprot.WriteFieldEnd(); err != nil { - return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err) - } - } - return err -} - -func (p *CollectorSubmitBatchesResult) String() string { - if p == nil { - return "" - } - return fmt.Sprintf("CollectorSubmitBatchesResult(%+v)", *p) -} diff --git a/exporter/jaeger/jaeger.go b/exporter/jaeger/jaeger.go deleted file mode 100644 index 3574a0650..000000000 --- a/exporter/jaeger/jaeger.go +++ /dev/null @@ -1,369 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package jaeger contains an OpenCensus tracing exporter for Jaeger. -package jaeger // import "go.opencensus.io/exporter/jaeger" - -import ( - "bytes" - "encoding/binary" - "errors" - "fmt" - "io" - "io/ioutil" - "log" - "net/http" - - "github.com/apache/thrift/lib/go/thrift" - gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - "go.opencensus.io/trace" - "google.golang.org/api/support/bundler" -) - -const defaultServiceName = "OpenCensus" - -// Options are the options to be used when initializing a Jaeger exporter. -type Options struct { - // Endpoint is the Jaeger HTTP Thrift endpoint. - // For example, http://localhost:14268. - // - // Deprecated: Use CollectorEndpoint instead. - Endpoint string - - // CollectorEndpoint is the full url to the Jaeger HTTP Thrift collector. - // For example, http://localhost:14268/api/traces - CollectorEndpoint string - - // AgentEndpoint instructs exporter to send spans to jaeger-agent at this address. - // For example, localhost:6831. - AgentEndpoint string - - // OnError is the hook to be called when there is - // an error occurred when uploading the stats data. - // If no custom hook is set, errors are logged. - // Optional. - OnError func(err error) - - // Username to be used if basic auth is required. - // Optional. - Username string - - // Password to be used if basic auth is required. - // Optional. - Password string - - // ServiceName is the Jaeger service name. - // Deprecated: Specify Process instead. - ServiceName string - - // Process contains the information about the exporting process. - Process Process - - //BufferMaxCount defines the total number of traces that can be buffered in memory - BufferMaxCount int -} - -// NewExporter returns a trace.Exporter implementation that exports -// the collected spans to Jaeger. -func NewExporter(o Options) (*Exporter, error) { - if o.Endpoint == "" && o.CollectorEndpoint == "" && o.AgentEndpoint == "" { - return nil, errors.New("missing endpoint for Jaeger exporter") - } - - var endpoint string - var client *agentClientUDP - var err error - if o.Endpoint != "" { - endpoint = o.Endpoint + "/api/traces?format=jaeger.thrift" - log.Printf("Endpoint has been deprecated. Please use CollectorEndpoint instead.") - } else if o.CollectorEndpoint != "" { - endpoint = o.CollectorEndpoint - } else { - client, err = newAgentClientUDP(o.AgentEndpoint, udpPacketMaxLength) - if err != nil { - return nil, err - } - } - onError := func(err error) { - if o.OnError != nil { - o.OnError(err) - return - } - log.Printf("Error when uploading spans to Jaeger: %v", err) - } - service := o.Process.ServiceName - if service == "" && o.ServiceName != "" { - // fallback to old service name if specified - service = o.ServiceName - } else if service == "" { - service = defaultServiceName - } - tags := make([]*gen.Tag, len(o.Process.Tags)) - for i, tag := range o.Process.Tags { - tags[i] = attributeToTag(tag.key, tag.value) - } - e := &Exporter{ - endpoint: endpoint, - agentEndpoint: o.AgentEndpoint, - client: client, - username: o.Username, - password: o.Password, - process: &gen.Process{ - ServiceName: service, - Tags: tags, - }, - } - bundler := bundler.NewBundler((*gen.Span)(nil), func(bundle interface{}) { - if err := e.upload(bundle.([]*gen.Span)); err != nil { - onError(err) - } - }) - - // Set BufferedByteLimit with the total number of spans that are permissible to be held in memory. - // This needs to be done since the size of messages is always set to 1. Failing to set this would allow - // 1G messages to be held in memory since that is the default value of BufferedByteLimit. - if o.BufferMaxCount != 0 { - bundler.BufferedByteLimit = o.BufferMaxCount - } - - e.bundler = bundler - return e, nil -} - -// Process contains the information exported to jaeger about the source -// of the trace data. -type Process struct { - // ServiceName is the Jaeger service name. - ServiceName string - - // Tags are added to Jaeger Process exports - Tags []Tag -} - -// Tag defines a key-value pair -// It is limited to the possible conversions to *jaeger.Tag by attributeToTag -type Tag struct { - key string - value interface{} -} - -// BoolTag creates a new tag of type bool, exported as jaeger.TagType_BOOL -func BoolTag(key string, value bool) Tag { - return Tag{key, value} -} - -// StringTag creates a new tag of type string, exported as jaeger.TagType_STRING -func StringTag(key string, value string) Tag { - return Tag{key, value} -} - -// Int64Tag creates a new tag of type int64, exported as jaeger.TagType_LONG -func Int64Tag(key string, value int64) Tag { - return Tag{key, value} -} - -// Exporter is an implementation of trace.Exporter that uploads spans to Jaeger. -type Exporter struct { - endpoint string - agentEndpoint string - process *gen.Process - bundler *bundler.Bundler - client *agentClientUDP - - username, password string -} - -var _ trace.Exporter = (*Exporter)(nil) - -// ExportSpan exports a SpanData to Jaeger. -func (e *Exporter) ExportSpan(data *trace.SpanData) { - e.bundler.Add(spanDataToThrift(data), 1) - // TODO(jbd): Handle oversized bundlers. -} - -// As per the OpenCensus Status code mapping in -// https://opencensus.io/tracing/span/status/ -// the status is OK if the code is 0. -const opencensusStatusCodeOK = 0 - -func spanDataToThrift(data *trace.SpanData) *gen.Span { - tags := make([]*gen.Tag, 0, len(data.Attributes)) - for k, v := range data.Attributes { - tag := attributeToTag(k, v) - if tag != nil { - tags = append(tags, tag) - } - } - - tags = append(tags, - attributeToTag("status.code", data.Status.Code), - attributeToTag("status.message", data.Status.Message), - ) - - // Ensure that if Status.Code is not OK, that we set the "error" tag on the Jaeger span. - // See Issue https://github.com/census-instrumentation/opencensus-go/issues/1041 - if data.Status.Code != opencensusStatusCodeOK { - tags = append(tags, attributeToTag("error", true)) - } - - var logs []*gen.Log - for _, a := range data.Annotations { - fields := make([]*gen.Tag, 0, len(a.Attributes)) - for k, v := range a.Attributes { - tag := attributeToTag(k, v) - if tag != nil { - fields = append(fields, tag) - } - } - fields = append(fields, attributeToTag("message", a.Message)) - logs = append(logs, &gen.Log{ - Timestamp: a.Time.UnixNano() / 1000, - Fields: fields, - }) - } - var refs []*gen.SpanRef - for _, link := range data.Links { - refs = append(refs, &gen.SpanRef{ - TraceIdHigh: bytesToInt64(link.TraceID[0:8]), - TraceIdLow: bytesToInt64(link.TraceID[8:16]), - SpanId: bytesToInt64(link.SpanID[:]), - }) - } - return &gen.Span{ - TraceIdHigh: bytesToInt64(data.TraceID[0:8]), - TraceIdLow: bytesToInt64(data.TraceID[8:16]), - SpanId: bytesToInt64(data.SpanID[:]), - ParentSpanId: bytesToInt64(data.ParentSpanID[:]), - OperationName: name(data), - Flags: int32(data.TraceOptions), - StartTime: data.StartTime.UnixNano() / 1000, - Duration: data.EndTime.Sub(data.StartTime).Nanoseconds() / 1000, - Tags: tags, - Logs: logs, - References: refs, - } -} - -func name(sd *trace.SpanData) string { - n := sd.Name - switch sd.SpanKind { - case trace.SpanKindClient: - n = "Sent." + n - case trace.SpanKindServer: - n = "Recv." + n - } - return n -} - -func attributeToTag(key string, a interface{}) *gen.Tag { - var tag *gen.Tag - switch value := a.(type) { - case bool: - tag = &gen.Tag{ - Key: key, - VBool: &value, - VType: gen.TagType_BOOL, - } - case string: - tag = &gen.Tag{ - Key: key, - VStr: &value, - VType: gen.TagType_STRING, - } - case int64: - tag = &gen.Tag{ - Key: key, - VLong: &value, - VType: gen.TagType_LONG, - } - case int32: - v := int64(value) - tag = &gen.Tag{ - Key: key, - VLong: &v, - VType: gen.TagType_LONG, - } - case float64: - v := float64(value) - tag = &gen.Tag{ - Key: key, - VDouble: &v, - VType: gen.TagType_DOUBLE, - } - } - return tag -} - -// Flush waits for exported trace spans to be uploaded. -// -// This is useful if your program is ending and you do not want to lose recent spans. -func (e *Exporter) Flush() { - e.bundler.Flush() -} - -func (e *Exporter) upload(spans []*gen.Span) error { - batch := &gen.Batch{ - Spans: spans, - Process: e.process, - } - if e.endpoint != "" { - return e.uploadCollector(batch) - } - return e.uploadAgent(batch) -} - -func (e *Exporter) uploadAgent(batch *gen.Batch) error { - return e.client.EmitBatch(batch) -} - -func (e *Exporter) uploadCollector(batch *gen.Batch) error { - body, err := serialize(batch) - if err != nil { - return err - } - req, err := http.NewRequest("POST", e.endpoint, body) - if err != nil { - return err - } - if e.username != "" && e.password != "" { - req.SetBasicAuth(e.username, e.password) - } - req.Header.Set("Content-Type", "application/x-thrift") - - resp, err := http.DefaultClient.Do(req) - if err != nil { - return err - } - - io.Copy(ioutil.Discard, resp.Body) - resp.Body.Close() - - if resp.StatusCode < 200 || resp.StatusCode >= 300 { - return fmt.Errorf("failed to upload traces; HTTP status code: %d", resp.StatusCode) - } - return nil -} - -func serialize(obj thrift.TStruct) (*bytes.Buffer, error) { - buf := thrift.NewTMemoryBuffer() - if err := obj.Write(thrift.NewTBinaryProtocolTransport(buf)); err != nil { - return nil, err - } - return buf.Buffer, nil -} - -func bytesToInt64(buf []byte) int64 { - u := binary.BigEndian.Uint64(buf) - return int64(u) -} diff --git a/exporter/jaeger/jaeger_test.go b/exporter/jaeger/jaeger_test.go deleted file mode 100644 index f93b5dd0e..000000000 --- a/exporter/jaeger/jaeger_test.go +++ /dev/null @@ -1,148 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package jaeger - -import ( - "fmt" - "reflect" - "testing" - "time" - - gen "go.opencensus.io/exporter/jaeger/internal/gen-go/jaeger" - "go.opencensus.io/trace" - "sort" -) - -// TODO(jbd): Test export. - -func Test_bytesToInt64(t *testing.T) { - type args struct { - } - tests := []struct { - buf []byte - want int64 - }{ - { - buf: []byte{255, 0, 0, 0, 0, 0, 0, 0}, - want: -72057594037927936, - }, - { - buf: []byte{0, 0, 0, 0, 0, 0, 0, 1}, - want: 1, - }, - { - buf: []byte{0, 0, 0, 0, 0, 0, 0, 0}, - want: 0, - }, - } - for _, tt := range tests { - t.Run(fmt.Sprintf("%d", tt.want), func(t *testing.T) { - if got := bytesToInt64(tt.buf); got != tt.want { - t.Errorf("bytesToInt64() = \n%v, \n want \n%v", got, tt.want) - } - }) - } -} - -func Test_spanDataToThrift(t *testing.T) { - now := time.Now() - - answerValue := int64(42) - keyValue := "value" - resultValue := true - statusCodeValue := int64(2) - doubleValue := float64(123.456) - boolTrue := true - statusMessage := "error" - - tests := []struct { - name string - data *trace.SpanData - want *gen.Span - }{ - { - name: "no parent", - data: &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - SpanID: trace.SpanID{1, 2, 3, 4, 5, 6, 7, 8}, - }, - Name: "/foo", - StartTime: now, - EndTime: now, - Attributes: map[string]interface{}{ - "double": doubleValue, - "key": keyValue, - }, - Annotations: []trace.Annotation{ - { - Time: now, - Message: statusMessage, - Attributes: map[string]interface{}{ - "answer": answerValue, - }, - }, - { - Time: now, - Message: statusMessage, - Attributes: map[string]interface{}{ - "result": resultValue, - }, - }, - }, - Status: trace.Status{Code: trace.StatusCodeUnknown, Message: "error"}, - }, - want: &gen.Span{ - TraceIdLow: 651345242494996240, - TraceIdHigh: 72623859790382856, - SpanId: 72623859790382856, - OperationName: "/foo", - StartTime: now.UnixNano() / 1000, - Duration: 0, - Tags: []*gen.Tag{ - {Key: "double", VType: gen.TagType_DOUBLE, VDouble: &doubleValue}, - {Key: "key", VType: gen.TagType_STRING, VStr: &keyValue}, - {Key: "error", VType: gen.TagType_BOOL, VBool: &boolTrue}, - {Key: "status.code", VType: gen.TagType_LONG, VLong: &statusCodeValue}, - {Key: "status.message", VType: gen.TagType_STRING, VStr: &statusMessage}, - }, - Logs: []*gen.Log{ - {Timestamp: now.UnixNano() / 1000, Fields: []*gen.Tag{ - {Key: "answer", VType: gen.TagType_LONG, VLong: &answerValue}, - {Key: "message", VType: gen.TagType_STRING, VStr: &statusMessage}, - }}, - {Timestamp: now.UnixNano() / 1000, Fields: []*gen.Tag{ - {Key: "result", VType: gen.TagType_BOOL, VBool: &resultValue}, - {Key: "message", VType: gen.TagType_STRING, VStr: &statusMessage}, - }}, - }, - }, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got := spanDataToThrift(tt.data) - sort.Slice(got.Tags, func(i, j int) bool { - return got.Tags[i].Key < got.Tags[j].Key - }) - sort.Slice(tt.want.Tags, func(i, j int) bool { - return tt.want.Tags[i].Key < tt.want.Tags[j].Key - }) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("spanDataToThrift()\nGot:\n%v\nWant;\n%v", got, tt.want) - } - }) - } -} diff --git a/exporter/prometheus/example/main.go b/exporter/prometheus/example/main.go deleted file mode 100644 index 838cf3603..000000000 --- a/exporter/prometheus/example/main.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Command prometheus is an example program that collects data for -// video size. Collected data is exported to Prometheus. -package main - -import ( - "context" - "log" - "math/rand" - "net/http" - "time" - - "go.opencensus.io/exporter/prometheus" - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" -) - -// Create measures. The program will record measures for the size of -// processed videos and the number of videos marked as spam. -var ( - videoCount = stats.Int64("example.com/measures/video_count", "number of processed videos", stats.UnitDimensionless) - videoSize = stats.Int64("example.com/measures/video_size", "size of processed video", stats.UnitBytes) -) - -func main() { - ctx := context.Background() - - exporter, err := prometheus.NewExporter(prometheus.Options{}) - if err != nil { - log.Fatal(err) - } - view.RegisterExporter(exporter) - - // Create view to see the number of processed videos cumulatively. - // Create view to see the amount of video processed - // Subscribe will allow view data to be exported. - // Once no longer needed, you can unsubscribe from the view. - if err = view.Register( - &view.View{ - Name: "video_count", - Description: "number of videos processed over time", - Measure: videoCount, - Aggregation: view.Count(), - }, - &view.View{ - Name: "video_size", - Description: "processed video size over time", - Measure: videoSize, - Aggregation: view.Distribution(0, 1<<16, 1<<32), - }, - ); err != nil { - log.Fatalf("Cannot register the view: %v", err) - } - - // Set reporting period to report data at every second. - view.SetReportingPeriod(1 * time.Second) - - // Record some data points... - go func() { - for { - stats.Record(ctx, videoCount.M(1), videoSize.M(rand.Int63())) - <-time.After(time.Millisecond * time.Duration(1+rand.Intn(400))) - } - }() - - addr := ":9999" - log.Printf("Serving at %s", addr) - http.Handle("/metrics", exporter) - log.Fatal(http.ListenAndServe(addr, nil)) -} diff --git a/exporter/prometheus/example_test.go b/exporter/prometheus/example_test.go deleted file mode 100644 index 182ad2003..000000000 --- a/exporter/prometheus/example_test.go +++ /dev/null @@ -1,33 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus_test - -import ( - "log" - "net/http" - - "go.opencensus.io/exporter/prometheus" -) - -func Example() { - exporter, err := prometheus.NewExporter(prometheus.Options{}) - if err != nil { - log.Fatal(err) - } - - // Serve the scrape endpoint on port 9999. - http.Handle("/metrics", exporter) - log.Fatal(http.ListenAndServe(":9999", nil)) -} diff --git a/exporter/prometheus/prometheus.go b/exporter/prometheus/prometheus.go deleted file mode 100644 index 9fbffb306..000000000 --- a/exporter/prometheus/prometheus.go +++ /dev/null @@ -1,278 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package prometheus contains a Prometheus exporter that supports exporting -// OpenCensus views as Prometheus metrics. -package prometheus // import "go.opencensus.io/exporter/prometheus" - -import ( - "fmt" - "log" - "net/http" - "sync" - - "context" - "github.com/prometheus/client_golang/prometheus" - "github.com/prometheus/client_golang/prometheus/promhttp" - "go.opencensus.io/internal" - "go.opencensus.io/metric/metricdata" - "go.opencensus.io/metric/metricexport" - "go.opencensus.io/stats/view" -) - -// Exporter exports stats to Prometheus, users need -// to register the exporter as an http.Handler to be -// able to export. -type Exporter struct { - opts Options - g prometheus.Gatherer - c *collector - handler http.Handler -} - -// Options contains options for configuring the exporter. -type Options struct { - Namespace string - Registry *prometheus.Registry - OnError func(err error) - ConstLabels prometheus.Labels // ConstLabels will be set as labels on all views. -} - -// NewExporter returns an exporter that exports stats to Prometheus. -func NewExporter(o Options) (*Exporter, error) { - if o.Registry == nil { - o.Registry = prometheus.NewRegistry() - } - collector := newCollector(o, o.Registry) - e := &Exporter{ - opts: o, - g: o.Registry, - c: collector, - handler: promhttp.HandlerFor(o.Registry, promhttp.HandlerOpts{}), - } - collector.ensureRegisteredOnce() - - return e, nil -} - -var _ http.Handler = (*Exporter)(nil) - -// ensureRegisteredOnce invokes reg.Register on the collector itself -// exactly once to ensure that we don't get errors such as -// cannot register the collector: descriptor Desc{fqName: *} -// already exists with the same fully-qualified name and const label values -// which is documented by Prometheus at -// https://github.com/prometheus/client_golang/blob/fcc130e101e76c5d303513d0e28f4b6d732845c7/prometheus/registry.go#L89-L101 -func (c *collector) ensureRegisteredOnce() { - c.registerOnce.Do(func() { - if err := c.reg.Register(c); err != nil { - c.opts.onError(fmt.Errorf("cannot register the collector: %v", err)) - } - }) - -} - -func (o *Options) onError(err error) { - if o.OnError != nil { - o.OnError(err) - } else { - log.Printf("Failed to export to Prometheus: %v", err) - } -} - -// ExportView exports to the Prometheus if view data has one or more rows. -// Each OpenCensus AggregationData will be converted to -// corresponding Prometheus Metric: SumData will be converted -// to Untyped Metric, CountData will be a Counter Metric, -// DistributionData will be a Histogram Metric. -// Deprecated in lieu of metricexport.Reader interface. -func (e *Exporter) ExportView(vd *view.Data) { -} - -// ServeHTTP serves the Prometheus endpoint. -func (e *Exporter) ServeHTTP(w http.ResponseWriter, r *http.Request) { - e.handler.ServeHTTP(w, r) -} - -// collector implements prometheus.Collector -type collector struct { - opts Options - mu sync.Mutex // mu guards all the fields. - - registerOnce sync.Once - - // reg helps collector register views dynamically. - reg *prometheus.Registry - - // reader reads metrics from all registered producers. - reader *metricexport.Reader -} - -func (c *collector) Describe(ch chan<- *prometheus.Desc) { - de := &descExporter{c: c, descCh: ch} - c.reader.ReadAndExport(de) -} - -// Collect fetches the statistics from OpenCensus -// and delivers them as Prometheus Metrics. -// Collect is invoked every time a prometheus.Gatherer is run -// for example when the HTTP endpoint is invoked by Prometheus. -func (c *collector) Collect(ch chan<- prometheus.Metric) { - me := &metricExporter{c: c, metricCh: ch} - c.reader.ReadAndExport(me) -} - -func newCollector(opts Options, registrar *prometheus.Registry) *collector { - return &collector{ - reg: registrar, - opts: opts, - reader: metricexport.NewReader()} -} - -func (c *collector) toDesc(metric *metricdata.Metric) *prometheus.Desc { - return prometheus.NewDesc( - metricName(c.opts.Namespace, metric), - metric.Descriptor.Description, - toPromLabels(metric.Descriptor.LabelKeys), - c.opts.ConstLabels) -} - -type metricExporter struct { - c *collector - metricCh chan<- prometheus.Metric -} - -// ExportMetrics exports to the Prometheus. -// Each OpenCensus Metric will be converted to -// corresponding Prometheus Metric: -// TypeCumulativeInt64 and TypeCumulativeFloat64 will be a Counter Metric, -// TypeCumulativeDistribution will be a Histogram Metric. -// TypeGaugeFloat64 and TypeGaugeInt64 will be a Gauge Metric -func (me *metricExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - for _, metric := range metrics { - desc := me.c.toDesc(metric) - for _, ts := range metric.TimeSeries { - tvs := toLabelValues(ts.LabelValues) - for _, point := range ts.Points { - metric, err := toPromMetric(desc, metric, point, tvs) - if err != nil { - me.c.opts.onError(err) - } else if metric != nil { - me.metricCh <- metric - } - } - } - } - return nil -} - -type descExporter struct { - c *collector - descCh chan<- *prometheus.Desc -} - -// ExportMetrics exports descriptor to the Prometheus. -// It is invoked when request to scrape descriptors is received. -func (me *descExporter) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { - for _, metric := range metrics { - desc := me.c.toDesc(metric) - me.descCh <- desc - } - return nil -} - -func toPromLabels(mls []metricdata.LabelKey) (labels []string) { - for _, ml := range mls { - labels = append(labels, internal.Sanitize(ml.Key)) - } - return labels -} - -func metricName(namespace string, m *metricdata.Metric) string { - var name string - if namespace != "" { - name = namespace + "_" - } - return name + internal.Sanitize(m.Descriptor.Name) -} - -func toPromMetric( - desc *prometheus.Desc, - metric *metricdata.Metric, - point metricdata.Point, - labelValues []string) (prometheus.Metric, error) { - switch metric.Descriptor.Type { - case metricdata.TypeCumulativeFloat64, metricdata.TypeCumulativeInt64: - pv, err := toPromValue(point) - if err != nil { - return nil, err - } - return prometheus.NewConstMetric(desc, prometheus.CounterValue, pv, labelValues...) - - case metricdata.TypeGaugeFloat64, metricdata.TypeGaugeInt64: - pv, err := toPromValue(point) - if err != nil { - return nil, err - } - return prometheus.NewConstMetric(desc, prometheus.GaugeValue, pv, labelValues...) - - case metricdata.TypeCumulativeDistribution: - switch v := point.Value.(type) { - case *metricdata.Distribution: - points := make(map[float64]uint64) - // Histograms are cumulative in Prometheus. - // Get cumulative bucket counts. - cumCount := uint64(0) - for i, b := range v.BucketOptions.Bounds { - cumCount += uint64(v.Buckets[i].Count) - points[b] = cumCount - } - return prometheus.NewConstHistogram(desc, uint64(v.Count), v.Sum, points, labelValues...) - default: - return nil, typeMismatchError(point) - } - case metricdata.TypeSummary: - // TODO: [rghetia] add support for TypeSummary. - return nil, nil - default: - return nil, fmt.Errorf("aggregation %T is not yet supported", metric.Descriptor.Type) - } -} - -func toLabelValues(labelValues []metricdata.LabelValue) (values []string) { - for _, lv := range labelValues { - if lv.Present { - values = append(values, lv.Value) - } else { - values = append(values, "") - } - } - return values -} - -func typeMismatchError(point metricdata.Point) error { - return fmt.Errorf("point type %T does not match metric type", point) - -} - -func toPromValue(point metricdata.Point) (float64, error) { - switch v := point.Value.(type) { - case float64: - return v, nil - case int64: - return float64(v), nil - default: - return 0.0, typeMismatchError(point) - } -} diff --git a/exporter/prometheus/prometheus_test.go b/exporter/prometheus/prometheus_test.go deleted file mode 100644 index 83fc90abb..000000000 --- a/exporter/prometheus/prometheus_test.go +++ /dev/null @@ -1,450 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package prometheus - -import ( - "context" - "io/ioutil" - "net/http" - "net/http/httptest" - "strings" - "testing" - "time" - - "go.opencensus.io/stats" - "go.opencensus.io/stats/view" - "go.opencensus.io/tag" - - "github.com/prometheus/client_golang/prometheus" -) - -type mSlice []*stats.Int64Measure - -func (measures *mSlice) createAndAppend(name, desc, unit string) { - m := stats.Int64(name, desc, unit) - *measures = append(*measures, m) -} - -type vCreator []*view.View - -func (vc *vCreator) createAndAppend(name, description string, keys []tag.Key, measure stats.Measure, agg *view.Aggregation) { - v := &view.View{ - Name: name, - Description: description, - TagKeys: keys, - Measure: measure, - Aggregation: agg, - } - *vc = append(*vc, v) -} - -func TestMetricsEndpointOutput(t *testing.T) { - exporter, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("failed to create prometheus exporter: %v", err) - } - - names := []string{"foo", "bar", "baz"} - - var measures mSlice - for _, name := range names { - measures.createAndAppend("tests/"+name, name, "") - } - - var vc vCreator - for _, m := range measures { - vc.createAndAppend(m.Name(), m.Description(), nil, m, view.Count()) - } - - if err := view.Register(vc...); err != nil { - t.Fatalf("failed to create views: %v", err) - } - defer view.Unregister(vc...) - - view.SetReportingPeriod(time.Millisecond) - - for _, m := range measures { - stats.Record(context.Background(), m.M(1)) - } - - srv := httptest.NewServer(exporter) - defer srv.Close() - - var i int - var output string - for { - time.Sleep(10 * time.Millisecond) - if i == 1000 { - t.Fatal("no output at /metrics (10s wait)") - } - i++ - - resp, err := http.Get(srv.URL) - if err != nil { - t.Fatalf("failed to get /metrics: %v", err) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read body: %v", err) - } - resp.Body.Close() - - output = string(body) - if output != "" { - break - } - } - - if strings.Contains(output, "collected before with the same name and label values") { - t.Fatal("metric name and labels being duplicated but must be unique") - } - - if strings.Contains(output, "error(s) occurred") { - t.Fatal("error reported by prometheus registry") - } - - for _, name := range names { - if !strings.Contains(output, "tests_"+name+" 1") { - t.Fatalf("measurement missing in output: %v", name) - } - } -} - -func TestCumulativenessFromHistograms(t *testing.T) { - exporter, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("failed to create prometheus exporter: %v", err) - } - - m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless) - v := &view.View{ - Name: "cash/register", - Description: "this is a test", - Measure: m, - - // Intentionally used repeated elements in the ascending distribution. - // to ensure duplicate distribution items are handles. - Aggregation: view.Distribution(1, 5, 5, 5, 5, 10, 20, 50, 100, 250), - } - - if err := view.Register(v); err != nil { - t.Fatalf("Register error: %v", err) - } - defer view.Unregister(v) - - // Give the reporter ample time to process registration - //<-time.After(10 * reportPeriod) - - values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12} - // We want the results that look like this: - // 1: [0.25] | 1 + prev(i) = 1 + 0 = 1 - // 5: [1.45] | 1 + prev(i) = 1 + 1 = 2 - // 10: [7.69] | 1 + prev(i) = 1 + 2 = 3 - // 20: [12] | 1 + prev(i) = 1 + 3 = 4 - // 50: [] | 0 + prev(i) = 0 + 4 = 4 - // 100: [] | 0 + prev(i) = 0 + 4 = 4 - // 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7 - wantLines := []string{ - `cash_register_bucket{le="1"} 1`, - `cash_register_bucket{le="5"} 2`, - `cash_register_bucket{le="10"} 3`, - `cash_register_bucket{le="20"} 4`, - `cash_register_bucket{le="50"} 4`, - `cash_register_bucket{le="100"} 4`, - `cash_register_bucket{le="250"} 7`, - `cash_register_bucket{le="+Inf"} 7`, - `cash_register_sum 654.0799999999999`, // Summation of the input values - `cash_register_count 7`, - } - - ctx := context.Background() - ms := make([]stats.Measurement, 0, len(values)) - for _, value := range values { - mx := m.M(value) - ms = append(ms, mx) - } - stats.Record(ctx, ms...) - - // Give the recorder ample time to process recording - //<-time.After(10 * reportPeriod) - - cst := httptest.NewServer(exporter) - defer cst.Close() - res, err := http.Get(cst.URL) - if err != nil { - t.Fatalf("http.Get error: %v", err) - } - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("Read body error: %v", err) - } - str := strings.Trim(string(blob), "\n") - lines := strings.Split(str, "\n") - nonComments := make([]string, 0, len(lines)) - for _, line := range lines { - if !strings.Contains(line, "#") { - nonComments = append(nonComments, line) - } - } - - got := strings.Join(nonComments, "\n") - want := strings.Join(wantLines, "\n") - if got != want { - t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want) - } -} - -func TestHistogramUnorderedBucketBounds(t *testing.T) { - exporter, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("failed to create prometheus exporter: %v", err) - } - - m := stats.Float64("tests/bills", "payments by denomination", stats.UnitDimensionless) - v := &view.View{ - Name: "cash/register", - Description: "this is a test", - Measure: m, - - // Intentionally used unordered and duplicated elements in the distribution - // to ensure unordered bucket bounds are handled. - Aggregation: view.Distribution(10, 5, 1, 1, 50, 5, 20, 100, 250), - } - - if err := view.Register(v); err != nil { - t.Fatalf("Register error: %v", err) - } - defer view.Unregister(v) - - // Give the reporter ample time to process registration - //<-time.After(10 * reportPeriod) - - values := []float64{0.25, 245.67, 12, 1.45, 199.9, 7.69, 187.12} - // We want the results that look like this: - // 1: [0.25] | 1 + prev(i) = 1 + 0 = 1 - // 5: [1.45] | 1 + prev(i) = 1 + 1 = 2 - // 10: [7.69] | 1 + prev(i) = 1 + 2 = 3 - // 20: [12] | 1 + prev(i) = 1 + 3 = 4 - // 50: [] | 0 + prev(i) = 0 + 4 = 4 - // 100: [] | 0 + prev(i) = 0 + 4 = 4 - // 250: [187.12, 199.9, 245.67] | 3 + prev(i) = 3 + 4 = 7 - wantLines := []string{ - `cash_register_bucket{le="1"} 1`, - `cash_register_bucket{le="5"} 2`, - `cash_register_bucket{le="10"} 3`, - `cash_register_bucket{le="20"} 4`, - `cash_register_bucket{le="50"} 4`, - `cash_register_bucket{le="100"} 4`, - `cash_register_bucket{le="250"} 7`, - `cash_register_bucket{le="+Inf"} 7`, - `cash_register_sum 654.0799999999999`, // Summation of the input values - `cash_register_count 7`, - } - - ctx := context.Background() - ms := make([]stats.Measurement, 0, len(values)) - for _, value := range values { - mx := m.M(value) - ms = append(ms, mx) - } - stats.Record(ctx, ms...) - - // Give the recorder ample time to process recording - //<-time.After(10 * reportPeriod) - - cst := httptest.NewServer(exporter) - defer cst.Close() - res, err := http.Get(cst.URL) - if err != nil { - t.Fatalf("http.Get error: %v", err) - } - blob, err := ioutil.ReadAll(res.Body) - if err != nil { - t.Fatalf("Read body error: %v", err) - } - str := strings.Trim(string(blob), "\n") - lines := strings.Split(str, "\n") - nonComments := make([]string, 0, len(lines)) - for _, line := range lines { - if !strings.Contains(line, "#") { - nonComments = append(nonComments, line) - } - } - - got := strings.Join(nonComments, "\n") - want := strings.Join(wantLines, "\n") - if got != want { - t.Fatalf("\ngot:\n%s\n\nwant:\n%s\n", got, want) - } -} - -func TestConstLabelsIncluded(t *testing.T) { - constLabels := prometheus.Labels{ - "service": "spanner", - } - measureLabel, _ := tag.NewKey("method") - - exporter, err := NewExporter(Options{ - ConstLabels: constLabels, - }) - if err != nil { - t.Fatalf("failed to create prometheus exporter: %v", err) - } - - names := []string{"foo", "bar", "baz"} - - var measures mSlice - for _, name := range names { - measures.createAndAppend("tests/"+name, name, "") - } - - var vc vCreator - for _, m := range measures { - vc.createAndAppend(m.Name(), m.Description(), []tag.Key{measureLabel}, m, view.Count()) - } - - if err := view.Register(vc...); err != nil { - t.Fatalf("failed to create views: %v", err) - } - defer view.Unregister(vc...) - - view.SetReportingPeriod(time.Millisecond) - - ctx, _ := tag.New(context.Background(), tag.Upsert(measureLabel, "issue961")) - for _, m := range measures { - stats.Record(ctx, m.M(1)) - } - - srv := httptest.NewServer(exporter) - defer srv.Close() - - var i int - var output string - for { - time.Sleep(10 * time.Millisecond) - if i == 1000 { - t.Fatal("no output at /metrics (10s wait)") - } - i++ - - resp, err := http.Get(srv.URL) - if err != nil { - t.Fatalf("failed to get /metrics: %v", err) - } - - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read body: %v", err) - } - resp.Body.Close() - - output = string(body) - if output != "" { - break - } - } - - if strings.Contains(output, "collected before with the same name and label values") { - t.Fatal("metric name and labels being duplicated but must be unique") - } - - if strings.Contains(output, "error(s) occurred") { - t.Fatal("error reported by prometheus registry") - } - - want := `# HELP tests_bar bar -# TYPE tests_bar counter -tests_bar{method="issue961",service="spanner"} 1 -# HELP tests_baz baz -# TYPE tests_baz counter -tests_baz{method="issue961",service="spanner"} 1 -# HELP tests_foo foo -# TYPE tests_foo counter -tests_foo{method="issue961",service="spanner"} 1 -` - if output != want { - t.Fatal("output differed from expected") - } -} - -func TestViewMeasureWithoutTag(t *testing.T) { - exporter, err := NewExporter(Options{}) - if err != nil { - t.Fatalf("failed to create prometheus exporter: %v", err) - } - m := stats.Int64("tests/foo", "foo", stats.UnitDimensionless) - k1, _ := tag.NewKey("key/1") - k2, _ := tag.NewKey("key/2") - k3, _ := tag.NewKey("key/3") - k4, _ := tag.NewKey("key/4") - k5, _ := tag.NewKey("key/5") - randomKey, _ := tag.NewKey("issue659") - v := &view.View{ - Name: m.Name(), - Description: m.Description(), - TagKeys: []tag.Key{k2, k5, k3, k1, k4}, // Ensure view has a tag - Measure: m, - Aggregation: view.Count(), - } - if err := view.Register(v); err != nil { - t.Fatalf("failed to create views: %v", err) - } - defer view.Unregister(v) - view.SetReportingPeriod(time.Millisecond) - // Make a measure without some tags in the view. - ctx1, _ := tag.New(context.Background(), tag.Upsert(k4, "issue659"), tag.Upsert(randomKey, "value"), tag.Upsert(k2, "issue659")) - stats.Record(ctx1, m.M(1)) - ctx2, _ := tag.New(context.Background(), tag.Upsert(k5, "issue659"), tag.Upsert(k3, "issue659"), tag.Upsert(k1, "issue659")) - stats.Record(ctx2, m.M(2)) - srv := httptest.NewServer(exporter) - defer srv.Close() - var i int - var output string - for { - time.Sleep(10 * time.Millisecond) - if i == 1000 { - t.Fatal("no output at /metrics (10s wait)") - } - i++ - resp, err := http.Get(srv.URL) - if err != nil { - t.Fatalf("failed to get /metrics: %v", err) - } - body, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("failed to read body: %v", err) - } - resp.Body.Close() - output = string(body) - if output != "" { - break - } - } - if strings.Contains(output, "collected before with the same name and label values") { - t.Fatal("metric name and labels being duplicated but must be unique") - } - if strings.Contains(output, "error(s) occurred") { - t.Fatal("error reported by prometheus registry") - } - want := `# HELP tests_foo foo -# TYPE tests_foo counter -tests_foo{key_1="",key_2="issue659",key_3="",key_4="issue659",key_5=""} 1 -tests_foo{key_1="issue659",key_2="",key_3="issue659",key_4="",key_5="issue659"} 1 -` - if output != want { - t.Fatalf("output differed from expected output: %s want: %s", output, want) - } -} diff --git a/exporter/zipkin/example/main.go b/exporter/zipkin/example/main.go deleted file mode 100644 index 9466c9809..000000000 --- a/exporter/zipkin/example/main.go +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package main - -import ( - "context" - "log" - "time" - - openzipkin "github.com/openzipkin/zipkin-go" - "github.com/openzipkin/zipkin-go/reporter/http" - "go.opencensus.io/exporter/zipkin" - "go.opencensus.io/trace" -) - -func main() { - // The localEndpoint stores the name and address of the local service - localEndpoint, err := openzipkin.NewEndpoint("example-server", "192.168.1.5:5454") - if err != nil { - log.Println(err) - } - - // The Zipkin reporter takes collected spans from the app and reports them to the backend - // http://localhost:9411/api/v2/spans is the default for the Zipkin Span v2 - reporter := http.NewReporter("http://localhost:9411/api/v2/spans") - defer reporter.Close() - - // The OpenCensus exporter wraps the Zipkin reporter - exporter := zipkin.NewExporter(reporter, localEndpoint) - trace.RegisterExporter(exporter) - - // For example purposes, sample every trace. In a production application, you should - // configure this to a trace.ProbabilitySampler set at the desired - // probability. - trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - - ctx := context.Background() - foo(ctx) -} - -func foo(ctx context.Context) { - // Name the current span "/foo" - ctx, span := trace.StartSpan(ctx, "/foo") - defer span.End() - - // Foo calls bar and baz - bar(ctx) - baz(ctx) -} - -func bar(ctx context.Context) { - ctx, span := trace.StartSpan(ctx, "/bar") - defer span.End() - - // Do bar - time.Sleep(2 * time.Millisecond) -} - -func baz(ctx context.Context) { - ctx, span := trace.StartSpan(ctx, "/baz") - defer span.End() - - // Do baz - time.Sleep(4 * time.Millisecond) -} diff --git a/exporter/zipkin/example_test.go b/exporter/zipkin/example_test.go deleted file mode 100644 index 7ef147014..000000000 --- a/exporter/zipkin/example_test.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin_test - -import ( - "log" - - openzipkin "github.com/openzipkin/zipkin-go" - "github.com/openzipkin/zipkin-go/reporter/http" - "go.opencensus.io/exporter/zipkin" - "go.opencensus.io/trace" -) - -func Example() { - // import ( - // openzipkin "github.com/openzipkin/zipkin-go" - // "github.com/openzipkin/zipkin-go/reporter/http" - // "go.opencensus.io/exporter/trace/zipkin" - // ) - - localEndpoint, err := openzipkin.NewEndpoint("server", "192.168.1.5:5454") - if err != nil { - log.Print(err) - } - reporter := http.NewReporter("http://localhost:9411/api/v2/spans") - exporter := zipkin.NewExporter(reporter, localEndpoint) - trace.RegisterExporter(exporter) -} diff --git a/exporter/zipkin/zipkin.go b/exporter/zipkin/zipkin.go deleted file mode 100644 index 69de70571..000000000 --- a/exporter/zipkin/zipkin.go +++ /dev/null @@ -1,196 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package zipkin contains an trace exporter for Zipkin. -package zipkin // import "go.opencensus.io/exporter/zipkin" - -import ( - "encoding/binary" - "strconv" - - "github.com/openzipkin/zipkin-go/model" - "github.com/openzipkin/zipkin-go/reporter" - "go.opencensus.io/trace" -) - -// Exporter is an implementation of trace.Exporter that uploads spans to a -// Zipkin server. -type Exporter struct { - reporter reporter.Reporter - localEndpoint *model.Endpoint -} - -// NewExporter returns an implementation of trace.Exporter that uploads spans -// to a Zipkin server. -// -// reporter is a Zipkin Reporter which will be used to send the spans. These -// can be created with the openzipkin library, using one of the packages under -// github.com/openzipkin/zipkin-go/reporter. -// -// localEndpoint sets the local endpoint of exported spans. It can be -// constructed with github.com/openzipkin/zipkin-go.NewEndpoint, e.g.: -// localEndpoint, err := NewEndpoint("my server", listener.Addr().String()) -// localEndpoint can be nil. -func NewExporter(reporter reporter.Reporter, localEndpoint *model.Endpoint) *Exporter { - return &Exporter{ - reporter: reporter, - localEndpoint: localEndpoint, - } -} - -// ExportSpan exports a span to a Zipkin server. -func (e *Exporter) ExportSpan(s *trace.SpanData) { - e.reporter.Send(zipkinSpan(s, e.localEndpoint)) -} - -const ( - statusCodeTagKey = "error" - statusDescriptionTagKey = "opencensus.status_description" -) - -var ( - sampledTrue = true - canonicalCodes = [...]string{ - "OK", - "CANCELLED", - "UNKNOWN", - "INVALID_ARGUMENT", - "DEADLINE_EXCEEDED", - "NOT_FOUND", - "ALREADY_EXISTS", - "PERMISSION_DENIED", - "RESOURCE_EXHAUSTED", - "FAILED_PRECONDITION", - "ABORTED", - "OUT_OF_RANGE", - "UNIMPLEMENTED", - "INTERNAL", - "UNAVAILABLE", - "DATA_LOSS", - "UNAUTHENTICATED", - } -) - -func canonicalCodeString(code int32) string { - if code < 0 || int(code) >= len(canonicalCodes) { - return "error code " + strconv.FormatInt(int64(code), 10) - } - return canonicalCodes[code] -} - -func convertTraceID(t trace.TraceID) model.TraceID { - return model.TraceID{ - High: binary.BigEndian.Uint64(t[:8]), - Low: binary.BigEndian.Uint64(t[8:]), - } -} - -func convertSpanID(s trace.SpanID) model.ID { - return model.ID(binary.BigEndian.Uint64(s[:])) -} - -func spanKind(s *trace.SpanData) model.Kind { - switch s.SpanKind { - case trace.SpanKindClient: - return model.Client - case trace.SpanKindServer: - return model.Server - } - return model.Undetermined -} - -func zipkinSpan(s *trace.SpanData, localEndpoint *model.Endpoint) model.SpanModel { - sc := s.SpanContext - z := model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: convertTraceID(sc.TraceID), - ID: convertSpanID(sc.SpanID), - Sampled: &sampledTrue, - }, - Kind: spanKind(s), - Name: s.Name, - Timestamp: s.StartTime, - Shared: false, - LocalEndpoint: localEndpoint, - } - - if s.ParentSpanID != (trace.SpanID{}) { - id := convertSpanID(s.ParentSpanID) - z.ParentID = &id - } - - if s, e := s.StartTime, s.EndTime; !s.IsZero() && !e.IsZero() { - z.Duration = e.Sub(s) - } - - // construct Tags from s.Attributes and s.Status. - if len(s.Attributes) != 0 { - m := make(map[string]string, len(s.Attributes)+2) - for key, value := range s.Attributes { - switch v := value.(type) { - case string: - m[key] = v - case bool: - if v { - m[key] = "true" - } else { - m[key] = "false" - } - case int64: - m[key] = strconv.FormatInt(v, 10) - case float64: - m[key] = strconv.FormatFloat(v, 'f', -1, 64) - } - } - z.Tags = m - } - if s.Status.Code != 0 || s.Status.Message != "" { - if z.Tags == nil { - z.Tags = make(map[string]string, 2) - } - if s.Status.Code != 0 { - z.Tags[statusCodeTagKey] = canonicalCodeString(s.Status.Code) - } - if s.Status.Message != "" { - z.Tags[statusDescriptionTagKey] = s.Status.Message - } - } - - // construct Annotations from s.Annotations and s.MessageEvents. - if len(s.Annotations) != 0 || len(s.MessageEvents) != 0 { - z.Annotations = make([]model.Annotation, 0, len(s.Annotations)+len(s.MessageEvents)) - for _, a := range s.Annotations { - z.Annotations = append(z.Annotations, model.Annotation{ - Timestamp: a.Time, - Value: a.Message, - }) - } - for _, m := range s.MessageEvents { - a := model.Annotation{ - Timestamp: m.Time, - } - switch m.EventType { - case trace.MessageEventTypeSent: - a.Value = "SENT" - case trace.MessageEventTypeRecv: - a.Value = "RECV" - default: - a.Value = "" - } - z.Annotations = append(z.Annotations, a) - } - } - - return z -} diff --git a/exporter/zipkin/zipkin_test.go b/exporter/zipkin/zipkin_test.go deleted file mode 100644 index c2b5420de..000000000 --- a/exporter/zipkin/zipkin_test.go +++ /dev/null @@ -1,258 +0,0 @@ -// Copyright 2017, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package zipkin - -import ( - "encoding/json" - "io/ioutil" - "net/http" - "reflect" - "strings" - "testing" - "time" - - "github.com/openzipkin/zipkin-go/model" - httpreporter "github.com/openzipkin/zipkin-go/reporter/http" - "go.opencensus.io/trace" -) - -type roundTripper func(*http.Request) (*http.Response, error) - -func (r roundTripper) RoundTrip(req *http.Request) (*http.Response, error) { - return r(req) -} - -func TestExport(t *testing.T) { - // Since Zipkin reports in microsecond resolution let's round our Timestamp, - // so when deserializing Zipkin data in this test we can properly compare. - now := time.Now().Round(time.Microsecond) - tests := []struct { - span *trace.SpanData - want model.SpanModel - }{ - { - span: &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - SpanID: trace.SpanID{17, 18, 19, 20, 21, 22, 23, 24}, - TraceOptions: 1, - }, - Name: "name", - SpanKind: trace.SpanKindClient, - StartTime: now, - EndTime: now.Add(24 * time.Hour), - Attributes: map[string]interface{}{ - "stringkey": "value", - "intkey": int64(42), - "boolkey1": true, - "boolkey2": false, - "doublekey": float64(123.456), - }, - MessageEvents: []trace.MessageEvent{ - { - Time: now, - EventType: trace.MessageEventTypeSent, - MessageID: 12, - UncompressedByteSize: 99, - CompressedByteSize: 98, - }, - }, - Annotations: []trace.Annotation{ - { - Time: now, - Message: "Annotation", - Attributes: map[string]interface{}{ - "stringkey": "value", - "intkey": int64(42), - "boolkey1": true, - "boolkey2": false, - "doublekey": float64(123.456), - }, - }, - }, - Status: trace.Status{ - Code: 3, - Message: "error", - }, - }, - want: model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: model.TraceID{ - High: 0x0102030405060708, - Low: 0x090a0b0c0d0e0f10, - }, - ID: 0x1112131415161718, - Sampled: &sampledTrue, - }, - Name: "name", - Kind: model.Client, - Timestamp: now, - Duration: 24 * time.Hour, - Shared: false, - Annotations: []model.Annotation{ - { - Timestamp: now, - Value: "Annotation", - }, - { - Timestamp: now, - Value: "SENT", - }, - }, - Tags: map[string]string{ - "stringkey": "value", - "intkey": "42", - "boolkey1": "true", - "boolkey2": "false", - "doublekey": "123.456", - "error": "INVALID_ARGUMENT", - "opencensus.status_description": "error", - }, - }, - }, - { - span: &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - SpanID: trace.SpanID{17, 18, 19, 20, 21, 22, 23, 24}, - TraceOptions: 1, - }, - Name: "name", - StartTime: now, - EndTime: now.Add(24 * time.Hour), - }, - want: model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: model.TraceID{ - High: 0x0102030405060708, - Low: 0x090a0b0c0d0e0f10, - }, - ID: 0x1112131415161718, - Sampled: &sampledTrue, - }, - Name: "name", - Timestamp: now, - Duration: 24 * time.Hour, - Shared: false, - }, - }, - { - span: &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - SpanID: trace.SpanID{17, 18, 19, 20, 21, 22, 23, 24}, - TraceOptions: 1, - }, - Name: "name", - StartTime: now, - EndTime: now.Add(24 * time.Hour), - Status: trace.Status{ - Code: 0, - Message: "there is no cause for alarm", - }, - }, - want: model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: model.TraceID{ - High: 0x0102030405060708, - Low: 0x090a0b0c0d0e0f10, - }, - ID: 0x1112131415161718, - Sampled: &sampledTrue, - }, - Name: "name", - Timestamp: now, - Duration: 24 * time.Hour, - Shared: false, - Tags: map[string]string{ - "opencensus.status_description": "there is no cause for alarm", - }, - }, - }, - { - span: &trace.SpanData{ - SpanContext: trace.SpanContext{ - TraceID: trace.TraceID{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, - SpanID: trace.SpanID{17, 18, 19, 20, 21, 22, 23, 24}, - TraceOptions: 1, - }, - Name: "name", - StartTime: now, - EndTime: now.Add(24 * time.Hour), - Status: trace.Status{ - Code: 1234, - }, - }, - want: model.SpanModel{ - SpanContext: model.SpanContext{ - TraceID: model.TraceID{ - High: 0x0102030405060708, - Low: 0x090a0b0c0d0e0f10, - }, - ID: 0x1112131415161718, - Sampled: &sampledTrue, - }, - Name: "name", - Timestamp: now, - Duration: 24 * time.Hour, - Shared: false, - Tags: map[string]string{ - "error": "error code 1234", - }, - }, - }, - } - for _, tt := range tests { - got := zipkinSpan(tt.span, nil) - if len(got.Annotations) != len(tt.want.Annotations) { - t.Fatalf("zipkinSpan: got %d annotations in span, want %d", len(got.Annotations), len(tt.want.Annotations)) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("zipkinSpan:\n\tgot %#v\n\twant %#v", got, tt.want) - } - } - for _, tt := range tests { - ch := make(chan []byte) - client := http.Client{ - Transport: roundTripper(func(req *http.Request) (*http.Response, error) { - body, _ := ioutil.ReadAll(req.Body) - ch <- body - return &http.Response{StatusCode: 200, Body: ioutil.NopCloser(strings.NewReader(""))}, nil - }), - } - reporter := httpreporter.NewReporter("foo", httpreporter.Client(&client), httpreporter.BatchInterval(time.Millisecond)) - exporter := NewExporter(reporter, nil) - exporter.ExportSpan(tt.span) - var data []byte - select { - case data = <-ch: - case <-time.After(2 * time.Second): - t.Fatalf("span was not exported") - } - var spans []model.SpanModel - json.Unmarshal(data, &spans) - if len(spans) != 1 { - t.Fatalf("Export: got %d spans, want 1", len(spans)) - } - got := spans[0] - got.SpanContext.Sampled = &sampledTrue // Sampled is not set when the span is reported. - if len(got.Annotations) != len(tt.want.Annotations) { - t.Fatalf("Export: got %d annotations in span, want %d", len(got.Annotations), len(tt.want.Annotations)) - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("Export:\n\tgot %#v\n\twant %#v", got, tt.want) - } - } -} diff --git a/go.mod b/go.mod index cc9febc02..621601a44 100644 --- a/go.mod +++ b/go.mod @@ -1,13 +1,11 @@ module go.opencensus.io require ( - github.com/apache/thrift v0.12.0 github.com/golang/protobuf v1.2.0 github.com/google/go-cmp v0.2.0 github.com/hashicorp/golang-lru v0.5.0 - github.com/openzipkin/zipkin-go v0.1.6 - github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 golang.org/x/net v0.0.0-20190311183353-d8887717615a google.golang.org/api v0.3.1 + google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect google.golang.org/grpc v1.19.0 ) diff --git a/go.sum b/go.sum index 954fadf79..e3de6e6c6 100644 --- a/go.sum +++ b/go.sum @@ -1,14 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= -github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= -github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= -github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= -github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/apache/thrift v0.12.0 h1:pODnxUFNcjP9UTLZGTdeh+j16A8lJbRvD3rOtrk/7bs= -github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973 h1:xJ4a3vCFaGF/jqvzLMYoU8P317H5OQ+Via4RmuPwCS0= -github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= @@ -25,7 +17,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= @@ -68,11 +59,12 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= -golang.org/x/lint v0.0.0-20190301231843-5614ed5bae6f/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -89,8 +81,6 @@ golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6Zh golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4 h1:YUO/7uOKsKeq9UokNS62b8FYywz3ker1l1vDZRCRefw= -golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -108,6 +98,8 @@ golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3 golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= +golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= @@ -124,4 +116,6 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= +google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 6161d2e35a4cdc5aab068cd3be67a33172ce6510 Mon Sep 17 00:00:00 2001 From: rghetia Date: Tue, 23 Apr 2019 15:06:44 -0700 Subject: [PATCH 174/212] Add support for Tag metadata (#1125) * Add support for tag metadata. * update ocgrpc and ochttp to use new insert/update/upsert api. : * updated existing method optional metadata option. * make TTLNoPropagation and TTLUnlimitedPropagation a function. * changed ttl api. * add test case for multiple TTL metadata. * add test case and note for update/insert api. --- tag/map.go | 66 ++++++++++---- tag/map_codec.go | 15 ++-- tag/map_codec_test.go | 2 +- tag/map_test.go | 194 +++++++++++++++++++++++++++++++++++++++--- tag/metadata.go | 52 +++++++++++ tag/profile_19.go | 2 +- 6 files changed, 293 insertions(+), 38 deletions(-) create mode 100644 tag/metadata.go diff --git a/tag/map.go b/tag/map.go index 5b72ba6ad..0272ef85a 100644 --- a/tag/map.go +++ b/tag/map.go @@ -28,10 +28,15 @@ type Tag struct { Value string } +type tagContent struct { + value string + m metadatas +} + // Map is a map of tags. Use New to create a context containing // a new Map. type Map struct { - m map[Key]string + m map[Key]tagContent } // Value returns the value for the key if a value for the key exists. @@ -40,7 +45,7 @@ func (m *Map) Value(k Key) (string, bool) { return "", false } v, ok := m.m[k] - return v, ok + return v.value, ok } func (m *Map) String() string { @@ -62,21 +67,21 @@ func (m *Map) String() string { return buffer.String() } -func (m *Map) insert(k Key, v string) { +func (m *Map) insert(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { return } - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } -func (m *Map) update(k Key, v string) { +func (m *Map) update(k Key, v string, md metadatas) { if _, ok := m.m[k]; ok { - m.m[k] = v + m.m[k] = tagContent{value: v, m: md} } } -func (m *Map) upsert(k Key, v string) { - m.m[k] = v +func (m *Map) upsert(k Key, v string, md metadatas) { + m.m[k] = tagContent{value: v, m: md} } func (m *Map) delete(k Key) { @@ -84,7 +89,7 @@ func (m *Map) delete(k Key) { } func newMap() *Map { - return &Map{m: make(map[Key]string)} + return &Map{m: make(map[Key]tagContent)} } // Mutator modifies a tag map. @@ -95,13 +100,17 @@ type Mutator interface { // Insert returns a mutator that inserts a // value associated with k. If k already exists in the tag map, // mutator doesn't update the value. -func Insert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Insert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.insert(k, v) + m.insert(k, v, createMetadatas(mds...)) return m, nil }, } @@ -110,13 +119,17 @@ func Insert(k Key, v string) Mutator { // Update returns a mutator that updates the // value of the tag associated with k with v. If k doesn't // exists in the tag map, the mutator doesn't insert the value. -func Update(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Update(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.update(k, v) + m.update(k, v, createMetadatas(mds...)) return m, nil }, } @@ -126,18 +139,37 @@ func Update(k Key, v string) Mutator { // value of the tag associated with k with v. It inserts the // value if k doesn't exist already. It mutates the value // if k already exists. -func Upsert(k Key, v string) Mutator { +// Metadata applies metadata to the tag. It is optional. +// Metadatas are applied in the order in which it is provided. +// If more than one metadata updates the same attribute then +// the update from the last metadata prevails. +func Upsert(k Key, v string, mds ...Metadata) Mutator { return &mutator{ fn: func(m *Map) (*Map, error) { if !checkValue(v) { return nil, errInvalidValue } - m.upsert(k, v) + m.upsert(k, v, createMetadatas(mds...)) return m, nil }, } } +func createMetadatas(mds ...Metadata) metadatas { + var metas metadatas + if len(mds) > 0 { + for _, md := range mds { + if md != nil { + md(&metas) + } + } + } else { + WithTTL(TTLUnlimitedPropagation)(&metas) + } + return metas + +} + // Delete returns a mutator that deletes // the value associated with k. func Delete(k Key) Mutator { @@ -160,10 +192,10 @@ func New(ctx context.Context, mutator ...Mutator) (context.Context, error) { if !checkKeyName(k.Name()) { return ctx, fmt.Errorf("key:%q: %v", k, errInvalidKeyName) } - if !checkValue(v) { + if !checkValue(v.value) { return ctx, fmt.Errorf("key:%q value:%q: %v", k.Name(), v, errInvalidValue) } - m.insert(k, v) + m.insert(k, v.value, v.m) } } var err error diff --git a/tag/map_codec.go b/tag/map_codec.go index c14c7f6db..f8b582761 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -170,9 +170,11 @@ func Encode(m *Map) []byte { } eg.writeByte(byte(tagsVersionID)) for k, v := range m.m { - eg.writeByte(byte(keyTypeString)) - eg.writeStringWithVarintLen(k.name) - eg.writeBytesWithVarintLen([]byte(v)) + if v.m.ttl.ttl == valueTTLUnlimitedPropagation { + eg.writeByte(byte(keyTypeString)) + eg.writeStringWithVarintLen(k.name) + eg.writeBytesWithVarintLen([]byte(v.value)) + } } return eg.bytes() } @@ -190,7 +192,7 @@ func Decode(bytes []byte) (*Map, error) { // DecodeEach decodes the given serialized tag map, calling handler for each // tag key and value decoded. -func DecodeEach(bytes []byte, fn func(key Key, val string)) error { +func DecodeEach(bytes []byte, fn func(key Key, val string, md metadatas)) error { eg := &encoderGRPC{ buf: bytes, } @@ -228,7 +230,10 @@ func DecodeEach(bytes []byte, fn func(key Key, val string)) error { if !checkValue(val) { return errInvalidValue } - fn(key, val) + fn(key, val, createMetadatas(WithTTL(TTLUnlimitedPropagation))) + if err != nil { + return err + } } return nil } diff --git a/tag/map_codec_test.go b/tag/map_codec_test.go index a5605e690..344ab0e87 100644 --- a/tag/map_codec_test.go +++ b/tag/map_codec_test.go @@ -91,7 +91,7 @@ func TestEncodeDecode(t *testing.T) { got := make([]keyValue, 0) for k, v := range decoded.m { - got = append(got, keyValue{k, string(v)}) + got = append(got, keyValue{k, string(v.value)}) } want := tc.pairs diff --git a/tag/map_test.go b/tag/map_test.go index 855b747a4..04471a88e 100644 --- a/tag/map_test.go +++ b/tag/map_test.go @@ -23,6 +23,11 @@ import ( "testing" ) +var ( + ttlUnlimitedPropMd = createMetadatas(WithTTL(TTLUnlimitedPropagation)) + ttlNoPropMd = createMetadatas(WithTTL(TTLNoPropagation)) +) + func TestContext(t *testing.T) { k1, _ := NewKey("k1") k2, _ := NewKey("k2") @@ -34,8 +39,8 @@ func TestContext(t *testing.T) { ) got := FromContext(ctx) want := newMap() - want.insert(k1, "v1") - want.insert(k2, "v2") + want.insert(k1, "v1", ttlUnlimitedPropMd) + want.insert(k2, "v2", ttlUnlimitedPropMd) if !reflect.DeepEqual(got, want) { t.Errorf("Map = %#v; want %#v", got, want) @@ -52,8 +57,8 @@ func TestDo(t *testing.T) { ) got := FromContext(ctx) want := newMap() - want.insert(k1, "v1") - want.insert(k2, "v2") + want.insert(k1, "v1", ttlUnlimitedPropMd) + want.insert(k2, "v2", ttlUnlimitedPropMd) Do(ctx, func(ctx context.Context) { got = FromContext(ctx) }) @@ -168,23 +173,175 @@ func TestNewMap(t *testing.T) { } } +func TestNewMapWithMetadata(t *testing.T) { + k3, _ := NewKey("k3") + k4, _ := NewKey("k4") + k5, _ := NewKey("k5") + + tests := []struct { + name string + initial *Map + mods []Mutator + want *Map + }{ + { + name: "from empty; insert", + initial: nil, + mods: []Mutator{ + Insert(k5, "5", WithTTL(TTLNoPropagation)), + Insert(k4, "4"), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}, + tagContent{"4", ttlUnlimitedPropMd}), + }, + { + name: "from existing; insert existing", + initial: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), + mods: []Mutator{ + Insert(k5, "5", WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), + }, + { + name: "from existing; update non-existing", + initial: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), + mods: []Mutator{ + Update(k4, "4", WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata(tagContent{"5", ttlNoPropMd}), + }, + { + name: "from existing; update existing", + initial: makeTestTagMapWithMetadata( + tagContent{"5", ttlUnlimitedPropMd}, + tagContent{"4", ttlNoPropMd}), + mods: []Mutator{ + Update(k5, "5"), + Update(k4, "4", WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlUnlimitedPropMd}, + tagContent{"4", ttlUnlimitedPropMd}), + }, + { + name: "from existing; upsert existing", + initial: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}, + tagContent{"4", ttlNoPropMd}), + mods: []Mutator{ + Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}, + tagContent{"4", ttlUnlimitedPropMd}), + }, + { + name: "from existing; upsert non-existing", + initial: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}), + mods: []Mutator{ + Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation)), + Upsert(k3, "3"), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}, + tagContent{"4", ttlUnlimitedPropMd}, + tagContent{"3", ttlUnlimitedPropMd}), + }, + { + name: "from existing; delete", + initial: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}, + tagContent{"4", ttlNoPropMd}), + mods: []Mutator{ + Delete(k5), + }, + want: makeTestTagMapWithMetadata( + tagContent{"4", ttlNoPropMd}), + }, + { + name: "from non-existing; upsert with multiple-metadata", + initial: nil, + mods: []Mutator{ + Upsert(k4, "4", WithTTL(TTLUnlimitedPropagation), WithTTL(TTLNoPropagation)), + Upsert(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata( + tagContent{"4", ttlNoPropMd}, + tagContent{"5", ttlUnlimitedPropMd}), + }, + { + name: "from non-existing; insert with multiple-metadata", + initial: nil, + mods: []Mutator{ + Insert(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlUnlimitedPropMd}), + }, + { + name: "from existing; update with multiple-metadata", + initial: makeTestTagMapWithMetadata( + tagContent{"5", ttlNoPropMd}), + mods: []Mutator{ + Update(k5, "5", WithTTL(TTLNoPropagation), WithTTL(TTLUnlimitedPropagation)), + }, + want: makeTestTagMapWithMetadata( + tagContent{"5", ttlUnlimitedPropMd}), + }, + { + name: "from empty; update invalid", + initial: nil, + mods: []Mutator{ + Insert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), + Upsert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), + Update(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), + }, + want: nil, + }, + { + name: "from empty; insert partial", + initial: nil, + mods: []Mutator{ + Upsert(k3, "3", WithTTL(TTLUnlimitedPropagation)), + Upsert(k4, "4\x19", WithTTL(TTLUnlimitedPropagation)), + }, + want: nil, + }, + } + + // Test api for insert, update, and upsert using metadata. + for _, tt := range tests { + ctx := NewContext(context.Background(), tt.initial) + ctx, err := New(ctx, tt.mods...) + if tt.want != nil && err != nil { + t.Errorf("%v: New = %v", tt.name, err) + } + + if got, want := FromContext(ctx), tt.want; !reflect.DeepEqual(got, want) { + t.Errorf("%v: got %v; want %v", tt.name, got, want) + } + } +} + func TestNewValidation(t *testing.T) { tests := []struct { err string seed *Map }{ // Key name validation in seed - {err: "invalid key", seed: &Map{m: map[Key]string{{name: ""}: "foo"}}}, - {err: "", seed: &Map{m: map[Key]string{{name: "key"}: "foo"}}}, - {err: "", seed: &Map{m: map[Key]string{{name: strings.Repeat("a", 255)}: "census"}}}, - {err: "invalid key", seed: &Map{m: map[Key]string{{name: strings.Repeat("a", 256)}: "census"}}}, - {err: "invalid key", seed: &Map{m: map[Key]string{{name: "Приве́т"}: "census"}}}, + {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: ""}: {"foo", ttlNoPropMd}}}}, + {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"foo", ttlNoPropMd}}}}, + {err: "", seed: &Map{m: map[Key]tagContent{{name: strings.Repeat("a", 255)}: {"census", ttlNoPropMd}}}}, + {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: strings.Repeat("a", 256)}: {"census", ttlNoPropMd}}}}, + {err: "invalid key", seed: &Map{m: map[Key]tagContent{{name: "Приве́т"}: {"census", ttlNoPropMd}}}}, // Value validation - {err: "", seed: &Map{m: map[Key]string{{name: "key"}: ""}}}, - {err: "", seed: &Map{m: map[Key]string{{name: "key"}: strings.Repeat("a", 255)}}}, - {err: "invalid value", seed: &Map{m: map[Key]string{{name: "key"}: "Приве́т"}}}, - {err: "invalid value", seed: &Map{m: map[Key]string{{name: "key"}: strings.Repeat("a", 256)}}}, + {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"", ttlNoPropMd}}}}, + {err: "", seed: &Map{m: map[Key]tagContent{{name: "key"}: {strings.Repeat("a", 255), ttlNoPropMd}}}}, + {err: "invalid value", seed: &Map{m: map[Key]tagContent{{name: "key"}: {"Приве́т", ttlNoPropMd}}}}, + {err: "invalid value", seed: &Map{m: map[Key]tagContent{{name: "key"}: {strings.Repeat("a", 256), ttlNoPropMd}}}}, } for i, tt := range tests { @@ -216,7 +373,16 @@ func makeTestTagMap(ids ...int) *Map { m := newMap() for _, v := range ids { k, _ := NewKey(fmt.Sprintf("k%d", v)) - m.m[k] = fmt.Sprintf("v%d", v) + m.m[k] = tagContent{fmt.Sprintf("v%d", v), ttlUnlimitedPropMd} + } + return m +} + +func makeTestTagMapWithMetadata(tcs ...tagContent) *Map { + m := newMap() + for _, tc := range tcs { + k, _ := NewKey(fmt.Sprintf("k%s", tc.value)) + m.m[k] = tc } return m } diff --git a/tag/metadata.go b/tag/metadata.go new file mode 100644 index 000000000..6571a583e --- /dev/null +++ b/tag/metadata.go @@ -0,0 +1,52 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +package tag + +const ( + // valueTTLNoPropagation prevents tag from propagating. + valueTTLNoPropagation = 0 + + // valueTTLUnlimitedPropagation allows tag to propagate without any limits on number of hops. + valueTTLUnlimitedPropagation = -1 +) + +// TTL is metadata that specifies number of hops a tag can propagate. +// Details about TTL metadata is specified at https://github.com/census-instrumentation/opencensus-specs/blob/master/tags/TagMap.md#tagmetadata +type TTL struct { + ttl int +} + +var ( + // TTLUnlimitedPropagation is TTL metadata that allows tag to propagate without any limits on number of hops. + TTLUnlimitedPropagation = TTL{ttl: valueTTLUnlimitedPropagation} + + // TTLNoPropagation is TTL metadata that prevents tag from propagating. + TTLNoPropagation = TTL{ttl: valueTTLNoPropagation} +) + +type metadatas struct { + ttl TTL +} + +// Metadata applies metadatas specified by the function. +type Metadata func(*metadatas) + +// WithTTL applies metadata with provided ttl. +func WithTTL(ttl TTL) Metadata { + return func(m *metadatas) { + m.ttl = ttl + } +} diff --git a/tag/profile_19.go b/tag/profile_19.go index f81cd0b4a..b34d95e34 100644 --- a/tag/profile_19.go +++ b/tag/profile_19.go @@ -25,7 +25,7 @@ func do(ctx context.Context, f func(ctx context.Context)) { m := FromContext(ctx) keyvals := make([]string, 0, 2*len(m.m)) for k, v := range m.m { - keyvals = append(keyvals, k.Name(), v) + keyvals = append(keyvals, k.Name(), v.value) } pprof.Do(ctx, pprof.Labels(keyvals...), f) } From d00fa0cebf4100c68eb95c9afc5252e4dde0b3fa Mon Sep 17 00:00:00 2001 From: Alex Amies Date: Wed, 24 Apr 2019 15:08:58 -0700 Subject: [PATCH 175/212] Added payload size to http server integration (#1129) * Added payload size to http server integration, logged in the LogExporter, and modified example to use LogExporter * Fixed format problems * Corrected comment --- examples/exporter/logexporter.go | 25 +++++++++++++++++++++++++ examples/http/helloworld_server/main.go | 21 +++++++++++++++++---- plugin/ochttp/server.go | 9 +++++++++ 3 files changed, 51 insertions(+), 4 deletions(-) diff --git a/examples/exporter/logexporter.go b/examples/exporter/logexporter.go index d868b5a65..3b9159dd1 100644 --- a/examples/exporter/logexporter.go +++ b/examples/exporter/logexporter.go @@ -177,6 +177,14 @@ func (e *LogExporter) ExportSpan(sd *trace.SpanData) { e.tLogger.Printf("Status: %v [%v]\n", sd.Status.Message, sd.Status.Code) e.tLogger.Printf("Elapsed: %v\n", sd.EndTime.Sub(sd.StartTime).Round(time.Millisecond)) + spanKinds := map[int]string{ + 1: "Server", + 2: "Client", + } + if spanKind, ok := spanKinds[sd.SpanKind]; ok { + e.tLogger.Printf("SpanKind: %s\n", spanKind) + } + if len(sd.Annotations) > 0 { e.tLogger.Println() e.tLogger.Println("Annotations:") @@ -196,4 +204,21 @@ func (e *LogExporter) ExportSpan(sd *trace.SpanData) { e.tLogger.Printf("%v- %v=%v\n", indent, k, v) } } + + if len(sd.MessageEvents) > 0 { + eventTypes := map[trace.MessageEventType]string{ + trace.MessageEventTypeSent: "Sent", + trace.MessageEventTypeRecv: "Received", + } + e.tLogger.Println() + e.tLogger.Println("MessageEvents:") + for _, item := range sd.MessageEvents { + if eventType, ok := eventTypes[item.EventType]; ok { + e.tLogger.Print(eventType) + } + e.tLogger.Printf("UncompressedByteSize: %v", item.UncompressedByteSize) + e.tLogger.Printf("CompressedByteSize: %v", item.CompressedByteSize) + e.tLogger.Println() + } + } } diff --git a/examples/http/helloworld_server/main.go b/examples/http/helloworld_server/main.go index 16ed55261..551915d10 100644 --- a/examples/http/helloworld_server/main.go +++ b/examples/http/helloworld_server/main.go @@ -28,6 +28,11 @@ import ( "go.opencensus.io/trace" ) +const ( + metricsLogFile = "/tmp/metrics.log" + tracesLogFile = "/tmp/trace.log" +) + func main() { // Start z-Pages server. go func() { @@ -36,10 +41,18 @@ func main() { log.Fatal(http.ListenAndServe("127.0.0.1:8081", mux)) }() - // Register stats and trace exporters to export the collected data. - exporter := &exporter.PrintExporter{} - view.RegisterExporter(exporter) - trace.RegisterExporter(exporter) + // Using log exporter to export metrics but you can choose any supported exporter. + exporter, err := exporter.NewLogExporter(exporter.Options{ + ReportingInterval: time.Duration(10 * time.Second), + MetricsLogFile: metricsLogFile, + TracesLogFile: tracesLogFile, + }) + if err != nil { + log.Fatalf("Error creating log exporter: %v", err) + } + exporter.Start() + defer exporter.Stop() + defer exporter.Close() // Always trace for this demo. In a production application, you should // configure this to a trace.ProbabilitySampler set at the desired diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 5fe15e89f..4f6404fa7 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -124,6 +124,12 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ } } span.AddAttributes(requestAttrs(r)...) + if r.Body == nil { + // TODO: Handle cases where ContentLength is not set. + } else if r.ContentLength > 0 { + span.AddMessageReceiveEvent(0, /* TODO: messageID */ + int64(r.ContentLength), -1) + } return r.WithContext(ctx), span.End } @@ -201,6 +207,9 @@ func (t *trackingResponseWriter) Header() http.Header { func (t *trackingResponseWriter) Write(data []byte) (int, error) { n, err := t.writer.Write(data) t.respSize += int64(n) + // Add message event for request bytes sent. + span := trace.FromContext(t.ctx) + span.AddMessageSendEvent(0 /* TODO: messageID */, int64(n), -1) return n, err } From 648e9a0a3cf396d583eaac525b80af509be823b0 Mon Sep 17 00:00:00 2001 From: Han Kang Date: Wed, 24 Apr 2019 15:09:24 -0700 Subject: [PATCH 176/212] fix buckets in example/quickstart (#1132) --- examples/quickstart/stats.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 35208774d..7e9735188 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -90,7 +90,7 @@ var ( Description: "Groups the lengths of keys in buckets", Measure: mLineLengths, // Lengths: [>=0B, >=5B, >=10B, >=15B, >=20B, >=40B, >=60B, >=80, >=100B, >=200B, >=400, >=600, >=800, >=1000] - Aggregation: view.Distribution(5, 2000, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), + Aggregation: view.Distribution(5, 10, 15, 20, 40, 60, 80, 100, 200, 400, 600, 800, 1000), } ) From beff310c05d38f26989b9fb231bdd8d1ef9c8667 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Wed, 24 Apr 2019 16:37:48 -0700 Subject: [PATCH 177/212] Exemplar: Record with sampled SpanContext in gRPC plugin. (#1127) --- plugin/ocgrpc/client_stats_handler_test.go | 76 ++++++++++++++++++++++ plugin/ocgrpc/server_stats_handler_test.go | 68 +++++++++++++++++++ plugin/ocgrpc/stats_common.go | 53 ++++++++++----- 3 files changed, 180 insertions(+), 17 deletions(-) diff --git a/plugin/ocgrpc/client_stats_handler_test.go b/plugin/ocgrpc/client_stats_handler_test.go index 53f924823..e9197fcb0 100644 --- a/plugin/ocgrpc/client_stats_handler_test.go +++ b/plugin/ocgrpc/client_stats_handler_test.go @@ -16,14 +16,19 @@ package ocgrpc import ( + "reflect" "testing" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" "golang.org/x/net/context" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" "go.opencensus.io/tag" @@ -334,6 +339,72 @@ func TestClientDefaultCollections(t *testing.T) { } } +func TestClientRecordExemplar(t *testing.T) { + key, _ := tag.NewKey("test_key") + tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} + out := &stats.OutPayload{Length: 2000} + end := &stats.End{Error: nil} + + if err := view.Register(ClientSentBytesPerRPCView); err != nil { + t.Error(err) + } + h := &ClientHandler{} + h.StartOptions.Sampler = trace.AlwaysSample() + ctx, err := tag.New(context.Background(), tag.Upsert(key, "test_val")) + if err != nil { + t.Error(err) + } + encoded := tag.Encode(tag.FromContext(ctx)) + ctx = stats.SetTags(context.Background(), encoded) + ctx = h.TagRPC(ctx, tagInfo) + + out.Client = true + h.HandleRPC(ctx, out) + end.Client = true + h.HandleRPC(ctx, end) + + span := trace.FromContext(ctx) + if span == nil { + t.Fatal("expected non-nil span, got nil") + } + if !span.IsRecordingEvents() { + t.Errorf("span should be sampled") + } + attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: span.SpanContext()} + wantExemplar := &metricdata.Exemplar{Value: 2000, Attachments: attachments} + + rows, err := view.RetrieveData(ClientSentBytesPerRPCView.Name) + if err != nil { + t.Fatal("Error RetrieveData ", err) + } + if len(rows) == 0 { + t.Fatal("No data was recorded.") + } + data := rows[0].Data + dis, ok := data.(*view.DistributionData) + if !ok { + t.Fatal("want DistributionData, got ", data) + } + // Only recorded value is 2000, which falls into the second bucket (1024, 2048]. + wantBuckets := []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { + t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) + } + for i, e := range dis.ExemplarsPerBucket { + // Only the second bucket should have an exemplar. + if i == 1 { + if diff := cmpExemplar(e, wantExemplar); diff != "" { + t.Fatalf("Unexpected Exemplar -got +want: %s", diff) + } + } else if e != nil { + t.Errorf("want nil exemplar, got %v", e) + } + } + + // Unregister views to cleanup. + view.Unregister(ClientSentBytesPerRPCView) +} + // containsRow returns true if rows contain r. func containsRow(rows []*view.Row, r *view.Row) bool { for _, x := range rows { @@ -343,3 +414,8 @@ func containsRow(rows []*view.Row, r *view.Row) bool { } return false } + +// Compare exemplars while ignoring exemplar timestamp, since timestamp is non-deterministic. +func cmpExemplar(got, want *metricdata.Exemplar) string { + return cmp.Diff(got, want, cmpopts.IgnoreFields(metricdata.Exemplar{}, "Timestamp"), cmpopts.IgnoreUnexported(metricdata.Exemplar{})) +} diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index cab232a68..921155e06 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -16,11 +16,13 @@ package ocgrpc import ( + "reflect" "testing" "go.opencensus.io/trace" "golang.org/x/net/context" + "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" "go.opencensus.io/tag" @@ -334,3 +336,69 @@ func newDistributionData(countPerBucket []int64, count int64, min, max, mean, su CountPerBucket: countPerBucket, } } + +func TestServerRecordExemplar(t *testing.T) { + key, _ := tag.NewKey("test_key") + tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} + out := &stats.OutPayload{Length: 2000} + end := &stats.End{Error: nil} + + if err := view.Register(ServerSentBytesPerRPCView); err != nil { + t.Error(err) + } + h := &ServerHandler{} + h.StartOptions.Sampler = trace.AlwaysSample() + ctx, err := tag.New(context.Background(), tag.Upsert(key, "test_val")) + if err != nil { + t.Error(err) + } + encoded := tag.Encode(tag.FromContext(ctx)) + ctx = stats.SetTags(context.Background(), encoded) + ctx = h.TagRPC(ctx, tagInfo) + + out.Client = false + h.HandleRPC(ctx, out) + end.Client = false + h.HandleRPC(ctx, end) + + span := trace.FromContext(ctx) + if span == nil { + t.Fatal("expected non-nil span, got nil") + } + if !span.IsRecordingEvents() { + t.Errorf("span should be sampled") + } + attachments := map[string]interface{}{metricdata.AttachmentKeySpanContext: span.SpanContext()} + wantExemplar := &metricdata.Exemplar{Value: 2000, Attachments: attachments} + + rows, err := view.RetrieveData(ServerSentBytesPerRPCView.Name) + if err != nil { + t.Fatal("Error RetrieveData ", err) + } + if len(rows) == 0 { + t.Fatal("No data was recorded.") + } + data := rows[0].Data + dis, ok := data.(*view.DistributionData) + if !ok { + t.Fatal("want DistributionData, got ", data) + } + // Only recorded value is 2000, which falls into the second bucket (1024, 2048]. + wantBuckets := []int64{0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + if !reflect.DeepEqual(dis.CountPerBucket, wantBuckets) { + t.Errorf("want buckets %v, got %v", wantBuckets, dis.CountPerBucket) + } + for i, e := range dis.ExemplarsPerBucket { + // Only the second bucket should have an exemplar. + if i == 1 { + if diff := cmpExemplar(e, wantExemplar); diff != "" { + t.Fatalf("Unexpected Exemplar -got +want: %s", diff) + } + } else if e != nil { + t.Errorf("want nil exemplar, got %v", e) + } + } + + // Unregister views to cleanup. + view.Unregister(ServerSentBytesPerRPCView) +} diff --git a/plugin/ocgrpc/stats_common.go b/plugin/ocgrpc/stats_common.go index e9991fe0f..0ae569182 100644 --- a/plugin/ocgrpc/stats_common.go +++ b/plugin/ocgrpc/stats_common.go @@ -22,9 +22,11 @@ import ( "sync/atomic" "time" + "go.opencensus.io/metric/metricdata" ocstats "go.opencensus.io/stats" "go.opencensus.io/stats/view" "go.opencensus.io/tag" + "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" @@ -141,27 +143,31 @@ func handleRPCEnd(ctx context.Context, s *stats.End) { } latencyMillis := float64(elapsedTime) / float64(time.Millisecond) + attachments := getSpanCtxAttachment(ctx) if s.Client { - ocstats.RecordWithTags(ctx, - []tag.Mutator{ + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( tag.Upsert(KeyClientMethod, methodName(d.method)), - tag.Upsert(KeyClientStatus, st), - }, - ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ClientRoundtripLatency.M(latencyMillis)) + tag.Upsert(KeyClientStatus, st)), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ClientSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ClientSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ClientReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ClientReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ClientRoundtripLatency.M(latencyMillis))) } else { - ocstats.RecordWithTags(ctx, - []tag.Mutator{ + ocstats.RecordWithOptions(ctx, + ocstats.WithTags( tag.Upsert(KeyServerStatus, st), - }, - ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), - ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), - ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), - ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), - ServerLatency.M(latencyMillis)) + ), + ocstats.WithAttachments(attachments), + ocstats.WithMeasurements( + ServerSentBytesPerRPC.M(atomic.LoadInt64(&d.sentBytes)), + ServerSentMessagesPerRPC.M(atomic.LoadInt64(&d.sentCount)), + ServerReceivedMessagesPerRPC.M(atomic.LoadInt64(&d.recvCount)), + ServerReceivedBytesPerRPC.M(atomic.LoadInt64(&d.recvBytes)), + ServerLatency.M(latencyMillis))) } } @@ -206,3 +212,16 @@ func statusCodeToString(s *status.Status) string { return "CODE_" + strconv.FormatInt(int64(c), 10) } } + +func getSpanCtxAttachment(ctx context.Context) metricdata.Attachments { + attachments := map[string]interface{}{} + span := trace.FromContext(ctx) + if span == nil { + return attachments + } + spanCtx := span.SpanContext() + if spanCtx.IsSampled() { + attachments[metricdata.AttachmentKeySpanContext] = spanCtx + } + return attachments +} From 9328d4452712b1c5de920db06d8dffdb1c29cdab Mon Sep 17 00:00:00 2001 From: Yang Song Date: Wed, 24 Apr 2019 16:38:01 -0700 Subject: [PATCH 178/212] Remove deprecated exemplar package. (#1124) --- exemplar/exemplar.go | 83 -------------------------------------------- 1 file changed, 83 deletions(-) delete mode 100644 exemplar/exemplar.go diff --git a/exemplar/exemplar.go b/exemplar/exemplar.go deleted file mode 100644 index 5a4c4345e..000000000 --- a/exemplar/exemplar.go +++ /dev/null @@ -1,83 +0,0 @@ -// Copyright 2018, OpenCensus Authors -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package exemplar implements support for exemplars. Exemplars are additional -// data associated with each measurement. -// -// Their purpose it to provide an example of the kind of thing -// (request, RPC, trace span, etc.) that resulted in that measurement. -// -// Deprecated: Use go.opencensus.io/metric/metricdata instead. -package exemplar - -import ( - "context" - "time" -) - -// Exemplars keys. -const ( - KeyTraceID = "trace_id" - KeySpanID = "span_id" - KeyPrefixTag = "tag:" -) - -// Exemplar is an example data point associated with each bucket of a -// distribution type aggregation. -// -// Deprecated: Use go.opencensus.io/metric/metricdata/exemplar instead. -type Exemplar struct { - Value float64 // the value that was recorded - Timestamp time.Time // the time the value was recorded - Attachments Attachments // attachments (if any) -} - -// Attachments is a map of extra values associated with a recorded data point. -// The map should only be mutated from AttachmentExtractor functions. -type Attachments map[string]string - -// AttachmentExtractor is a function capable of extracting exemplar attachments -// from the context used to record measurements. -// The map passed to the function should be mutated and returned. It will -// initially be nil: the first AttachmentExtractor that would like to add keys to the -// map is responsible for initializing it. -type AttachmentExtractor func(ctx context.Context, a Attachments) Attachments - -var extractors []AttachmentExtractor - -// RegisterAttachmentExtractor registers the given extractor associated with the exemplar -// type name. -// -// Extractors will be used to attempt to extract exemplars from the context -// associated with each recorded measurement. -// -// Packages that support exemplars should register their extractor functions on -// initialization. -// -// RegisterAttachmentExtractor should not be called after any measurements have -// been recorded. -func RegisterAttachmentExtractor(e AttachmentExtractor) { - extractors = append(extractors, e) -} - -// AttachmentsFromContext extracts exemplars from the given context. -// Each registered AttachmentExtractor (see RegisterAttachmentExtractor) is called in an -// unspecified order to add attachments to the exemplar. -func AttachmentsFromContext(ctx context.Context) Attachments { - var a Attachments - for _, extractor := range extractors { - a = extractor(ctx, a) - } - return a -} From df6e2001952312404b06f5f6f03fcb4aec1648e5 Mon Sep 17 00:00:00 2001 From: rahulpa Date: Wed, 24 Apr 2019 18:30:28 -0700 Subject: [PATCH 179/212] Run go mod tidy before merging dev to master. --- go.mod | 1 - go.sum | 77 +++------------------------------------------------------- 2 files changed, 3 insertions(+), 75 deletions(-) diff --git a/go.mod b/go.mod index 621601a44..8b7d38e91 100644 --- a/go.mod +++ b/go.mod @@ -5,7 +5,6 @@ require ( github.com/google/go-cmp v0.2.0 github.com/hashicorp/golang-lru v0.5.0 golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/api v0.3.1 google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect google.golang.org/grpc v1.19.0 ) diff --git a/go.sum b/go.sum index e3de6e6c6..cbb37036d 100644 --- a/go.sum +++ b/go.sum @@ -1,17 +1,6 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= -cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= -github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5mFgVsvEsIPBvNs= -github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= -github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= -github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= -github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= -github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= -github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= -github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= -github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= @@ -19,64 +8,20 @@ github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/gorilla/context v1.1.1 h1:AWwleXJkX/nhcU9bZSnZoi3h/qGYqQAGhq6zZe/aQW8= -github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= -github.com/gorilla/mux v1.6.2 h1:Pgr17XVTNXAk3q/r4CpKzC5xBM/qW1uVLV+IhRZpIIk= -github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= -github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= -github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/openzipkin/zipkin-go v0.1.6 h1:yXiysv1CSK7Q5yjGy1710zZGnsbMUIjluWBxtLXHPBo= -github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= -github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= -github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829 h1:D+CiwcpGTW6pL6bv6KI3KbyEyCKyS+1JWS2h8PNDnGA= -github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910 h1:idejC8f05m9MGOsuEi1ATq9shN03HrxNkD/luQvxCv8= -github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f h1:BVwpUVJDADN2ufcGik7W992pyps0wZ888b/y9GXcLTU= -github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= -github.com/prometheus/common v0.2.0 h1:kUZDBDTdBVBYBj5Tmh2NZLlF60mfjA27rM34b+cVwNU= -github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1 h1:/K3IL0Z1quvmJ7X0A1AwNEK7CRkVK3YwfOU/QAL4WGg= -github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= -github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= -github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= -golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3 h1:ulvT7fqt0yHWzpJwI57MezWnYDVpCAYBVuYst/L+fAY= -golang.org/x/net v0.0.0-20190125091013-d26f9f9a57f3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f h1:Bl/8QSvNqXvPGPGXa2z5xUTmV7VDcZyvRZ+QQXkXTZQ= @@ -84,38 +29,22 @@ golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6 h1:bjcUS9ztw9kFmmIxJInhon/0Is3p+EHBKNgquIzo1OI= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= -golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -google.golang.org/api v0.3.1 h1:oJra/lMfmtm13/rgY/8i3MzjFWYXvQIAKjQ3HqofMk8= -google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= -google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= -gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= -gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= -gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= -gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= -gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 5c6a904e408b5ba73f0e0abd48bdb7325c796f8f Mon Sep 17 00:00:00 2001 From: rahulpa Date: Wed, 24 Apr 2019 21:37:35 -0700 Subject: [PATCH 180/212] Bump up the version to v0.22.0 --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index d2565f1e2..626d73645 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.21.0" + return "0.22.0" } From 3f87460392ecb6a7d045e65de9b0dee340fa3a40 Mon Sep 17 00:00:00 2001 From: rahulpa Date: Fri, 26 Apr 2019 15:51:01 -0700 Subject: [PATCH 181/212] remove triage-me label from template. --- .github/ISSUE_TEMPLATE/bug_report.md | 2 +- .github/ISSUE_TEMPLATE/feature_request.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 9d1067b51..66d132d01 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -1,7 +1,7 @@ --- name: Bug report about: Create a report to help us improve -labels: bug, triage-me +labels: bug --- Please answer these questions before submitting a bug report. diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md index 64f3d4678..05aea226a 100644 --- a/.github/ISSUE_TEMPLATE/feature_request.md +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -1,7 +1,7 @@ --- name: Feature request about: Suggest an idea for this project -labels: feature-request, triage-me +labels: feature-request --- **NB:** Before opening a feature request against this repo, consider whether the feature should/could be implemented in OpenCensus libraries in other languages. If so, please [open an issue on opencensus-specs](https://github.com/census-instrumentation/opencensus-specs/issues/new) first. From a2c785e1ef970451110c62e968897b762df15c94 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Thu, 2 May 2019 11:25:50 -0700 Subject: [PATCH 182/212] Upgrade dependencies. (#1137) --- go.mod | 14 ++++++++------ go.sum | 27 +++++++++++++++++++-------- 2 files changed, 27 insertions(+), 14 deletions(-) diff --git a/go.mod b/go.mod index 8b7d38e91..cb4de80f3 100644 --- a/go.mod +++ b/go.mod @@ -1,10 +1,12 @@ module go.opencensus.io require ( - github.com/golang/protobuf v1.2.0 - github.com/google/go-cmp v0.2.0 - github.com/hashicorp/golang-lru v0.5.0 - golang.org/x/net v0.0.0-20190311183353-d8887717615a - google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 // indirect - google.golang.org/grpc v1.19.0 + github.com/golang/protobuf v1.3.1 + github.com/google/go-cmp v0.3.0 + github.com/hashicorp/golang-lru v0.5.1 + golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect + golang.org/x/text v0.3.2 // indirect + google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect + google.golang.org/grpc v1.20.1 ) diff --git a/go.sum b/go.sum index cbb37036d..0b948c2b4 100644 --- a/go.sum +++ b/go.sum @@ -6,21 +6,24 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/google/go-cmp v0.2.0 h1:+dTQ8DZQJz0Mb/HjFlkptS1FeQ4cWSnN941F8aEG4SQ= -github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0 h1:CL2msUPvZTLb5O648aiLNJw3hnBxN2+1Jq8rCOH9wdo= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= -github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= +github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= +github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= +github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= +golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -31,20 +34,28 @@ golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a h1:1BGLXjeY4akVXGgbC9HugT3Jv3hCI0z56oJR5vAMgBU= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd h1:r7DufRZuZbWB7j439YfAzP8RPDa9unLkpwQKUYbIMPI= +golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8 h1:Nw54tB0rB7hY/N0NQvRW8DG4Yk3Q6T9cu9RcFQDu1tc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19 h1:Lj2SnHtxkRGJDqnGaSjo+CCdIieEnwVazbOXILwQemk= -google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb h1:i1Ppqkc3WQXikh8bXiwHqAN5Rv3/qDCcRk0/Otx73BY= +google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0 h1:cfg4PD8YEdSFnm7qLV4++93WcmhH2nIUhMjhdCvl3j8= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= +google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= +google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= From 17d7955af9d42886455ce010dd46878208041a58 Mon Sep 17 00:00:00 2001 From: Yang Song Date: Fri, 3 May 2019 13:09:27 -0700 Subject: [PATCH 183/212] Replace "x/net/context" import with "context" (#1138) * Replace "x/net/context" import with "context" * go mod tidy --- examples/grpc/helloworld_client/main.go | 2 +- examples/grpc/helloworld_server/main.go | 2 +- plugin/ocgrpc/client.go | 2 +- plugin/ocgrpc/client_stats_handler.go | 2 +- plugin/ocgrpc/client_stats_handler_test.go | 2 +- plugin/ocgrpc/grpc_test.go | 2 +- plugin/ocgrpc/server.go | 2 +- plugin/ocgrpc/server_stats_handler.go | 2 +- plugin/ocgrpc/server_stats_handler_test.go | 2 +- plugin/ocgrpc/trace_common.go | 2 +- plugin/ocgrpc/trace_common_test.go | 2 +- plugin/ocgrpc/trace_test.go | 2 +- trace/examples_test.go | 2 +- 13 files changed, 13 insertions(+), 13 deletions(-) diff --git a/examples/grpc/helloworld_client/main.go b/examples/grpc/helloworld_client/main.go index 12d845090..e0a3edfbb 100644 --- a/examples/grpc/helloworld_client/main.go +++ b/examples/grpc/helloworld_client/main.go @@ -19,11 +19,11 @@ import ( "os" "time" + "context" "go.opencensus.io/examples/exporter" pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" - "golang.org/x/net/context" "google.golang.org/grpc" ) diff --git a/examples/grpc/helloworld_server/main.go b/examples/grpc/helloworld_server/main.go index c0215a921..5cbe06b9f 100644 --- a/examples/grpc/helloworld_server/main.go +++ b/examples/grpc/helloworld_server/main.go @@ -23,13 +23,13 @@ import ( "net/http" "time" + "context" "go.opencensus.io/examples/exporter" pb "go.opencensus.io/examples/grpc/proto" "go.opencensus.io/plugin/ocgrpc" "go.opencensus.io/stats/view" "go.opencensus.io/trace" "go.opencensus.io/zpages" - "golang.org/x/net/context" "google.golang.org/grpc" ) diff --git a/plugin/ocgrpc/client.go b/plugin/ocgrpc/client.go index a6c466ae8..28fddb844 100644 --- a/plugin/ocgrpc/client.go +++ b/plugin/ocgrpc/client.go @@ -15,8 +15,8 @@ package ocgrpc import ( + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "google.golang.org/grpc/stats" ) diff --git a/plugin/ocgrpc/client_stats_handler.go b/plugin/ocgrpc/client_stats_handler.go index 303c607f6..18821c7f5 100644 --- a/plugin/ocgrpc/client_stats_handler.go +++ b/plugin/ocgrpc/client_stats_handler.go @@ -18,8 +18,8 @@ package ocgrpc import ( "time" + "context" "go.opencensus.io/tag" - "golang.org/x/net/context" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" ) diff --git a/plugin/ocgrpc/client_stats_handler_test.go b/plugin/ocgrpc/client_stats_handler_test.go index e9197fcb0..5bf7ef4b0 100644 --- a/plugin/ocgrpc/client_stats_handler_test.go +++ b/plugin/ocgrpc/client_stats_handler_test.go @@ -26,7 +26,7 @@ import ( "google.golang.org/grpc/codes" "google.golang.org/grpc/status" - "golang.org/x/net/context" + "context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" diff --git a/plugin/ocgrpc/grpc_test.go b/plugin/ocgrpc/grpc_test.go index 73b0f5c13..ab275277d 100644 --- a/plugin/ocgrpc/grpc_test.go +++ b/plugin/ocgrpc/grpc_test.go @@ -19,8 +19,8 @@ import ( "testing" "time" + "context" "go.opencensus.io/stats/view" - "golang.org/x/net/context" "google.golang.org/grpc/metadata" "go.opencensus.io/trace" diff --git a/plugin/ocgrpc/server.go b/plugin/ocgrpc/server.go index b67b3e2be..15ada839d 100644 --- a/plugin/ocgrpc/server.go +++ b/plugin/ocgrpc/server.go @@ -15,8 +15,8 @@ package ocgrpc import ( + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "google.golang.org/grpc/stats" ) diff --git a/plugin/ocgrpc/server_stats_handler.go b/plugin/ocgrpc/server_stats_handler.go index 7847c1a91..afcef023a 100644 --- a/plugin/ocgrpc/server_stats_handler.go +++ b/plugin/ocgrpc/server_stats_handler.go @@ -18,7 +18,7 @@ package ocgrpc import ( "time" - "golang.org/x/net/context" + "context" "go.opencensus.io/tag" "google.golang.org/grpc/grpclog" diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index 921155e06..bb3ca9a5b 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -19,8 +19,8 @@ import ( "reflect" "testing" + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats/view" diff --git a/plugin/ocgrpc/trace_common.go b/plugin/ocgrpc/trace_common.go index 720f381c2..fef582756 100644 --- a/plugin/ocgrpc/trace_common.go +++ b/plugin/ocgrpc/trace_common.go @@ -19,9 +19,9 @@ import ( "google.golang.org/grpc/codes" + "context" "go.opencensus.io/trace" "go.opencensus.io/trace/propagation" - "golang.org/x/net/context" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" "google.golang.org/grpc/status" diff --git a/plugin/ocgrpc/trace_common_test.go b/plugin/ocgrpc/trace_common_test.go index 9e590d994..2b883b60f 100644 --- a/plugin/ocgrpc/trace_common_test.go +++ b/plugin/ocgrpc/trace_common_test.go @@ -17,8 +17,8 @@ package ocgrpc import ( "testing" + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" "google.golang.org/grpc/metadata" "google.golang.org/grpc/stats" ) diff --git a/plugin/ocgrpc/trace_test.go b/plugin/ocgrpc/trace_test.go index 7c2243db7..f7bd182b4 100644 --- a/plugin/ocgrpc/trace_test.go +++ b/plugin/ocgrpc/trace_test.go @@ -19,9 +19,9 @@ import ( "testing" "time" + "context" "go.opencensus.io/internal/testpb" "go.opencensus.io/trace" - "golang.org/x/net/context" ) type testExporter struct { diff --git a/trace/examples_test.go b/trace/examples_test.go index 41925d6c2..15f5b7973 100644 --- a/trace/examples_test.go +++ b/trace/examples_test.go @@ -17,8 +17,8 @@ package trace_test import ( "fmt" + "context" "go.opencensus.io/trace" - "golang.org/x/net/context" ) // This example shows how to use StartSpan and (*Span).End to capture From f24e56296b3a742d13b8368bbb1e831f9b0d2c7f Mon Sep 17 00:00:00 2001 From: Yang Song Date: Mon, 13 May 2019 13:13:55 -0700 Subject: [PATCH 184/212] Fix typos and imports. (#1139) * Fix a typo: Lable -> Label. * Fix import order. --- metric/metricexport/reader.go | 4 ++-- stats/view/view_to_metric.go | 4 ++-- stats/view/view_to_metric_test.go | 1 + 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/metric/metricexport/reader.go b/metric/metricexport/reader.go index 44ace7008..b920bacd8 100644 --- a/metric/metricexport/reader.go +++ b/metric/metricexport/reader.go @@ -16,14 +16,14 @@ package metricexport import ( + "context" "fmt" + "sync" "time" - "context" "go.opencensus.io/metric/metricdata" "go.opencensus.io/metric/metricproducer" "go.opencensus.io/trace" - "sync" ) var ( diff --git a/stats/view/view_to_metric.go b/stats/view/view_to_metric.go index 010f81bab..f67b5c464 100644 --- a/stats/view/view_to_metric.go +++ b/stats/view/view_to_metric.go @@ -73,7 +73,7 @@ func getType(v *View) metricdata.Type { } } -func getLableKeys(v *View) []metricdata.LabelKey { +func getLabelKeys(v *View) []metricdata.LabelKey { labelKeys := []metricdata.LabelKey{} for _, k := range v.TagKeys { labelKeys = append(labelKeys, metricdata.LabelKey{Key: k.Name()}) @@ -87,7 +87,7 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { Description: v.Description, Unit: getUnit(v.Measure.Unit()), Type: getType(v), - LabelKeys: getLableKeys(v), + LabelKeys: getLabelKeys(v), } } diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index c73d9a248..82ba96943 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -21,6 +21,7 @@ import ( "time" "encoding/json" + "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" "go.opencensus.io/stats" From 54a91f56dc0739b255dcb4ad2e03fd464bbb3a1f Mon Sep 17 00:00:00 2001 From: Edwin Ikechukwu Date: Tue, 28 May 2019 21:54:24 +0200 Subject: [PATCH 185/212] Fix a couple of broken links (#1142) ### Description Fix a couple of broken links --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 3f40ed5cb..fabab2e06 100644 --- a/README.md +++ b/README.md @@ -253,10 +253,10 @@ release in which the functionality was marked *Deprecated*. [new-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap [new-replace-ex]: https://godoc.org/go.opencensus.io/tag#example-NewMap--Replace -[exporter-prom]: https://godoc.org/go.opencensus.io/exporter/prometheus +[exporter-prom]: https://godoc.org/contrib.go.opencensus.io/exporter/prometheus [exporter-stackdriver]: https://godoc.org/contrib.go.opencensus.io/exporter/stackdriver -[exporter-zipkin]: https://godoc.org/go.opencensus.io/exporter/zipkin -[exporter-jaeger]: https://godoc.org/go.opencensus.io/exporter/jaeger +[exporter-zipkin]: https://godoc.org/contrib.go.opencensus.io/exporter/zipkin +[exporter-jaeger]: https://godoc.org/contrib.go.opencensus.io/exporter/jaeger [exporter-xray]: https://github.com/census-ecosystem/opencensus-go-exporter-aws [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite From fff365efde290a8255695a28d0042a052e2196d4 Mon Sep 17 00:00:00 2001 From: rahulpa Date: Wed, 29 May 2019 11:21:58 -0700 Subject: [PATCH 186/212] add constant keys for resources. --- resource/resourcekeys/const.go | 66 ++++++++++++++++++++++++++++++++++ 1 file changed, 66 insertions(+) create mode 100644 resource/resourcekeys/const.go diff --git a/resource/resourcekeys/const.go b/resource/resourcekeys/const.go new file mode 100644 index 000000000..d43fb1a6f --- /dev/null +++ b/resource/resourcekeys/const.go @@ -0,0 +1,66 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resourcekeys contains well known type and label keys for resources. +package resourcekeys // import "go.opencensus.io/resource/resourcekeys" + +// Constants for Kubernetes resources. +const ( + K8SType = "k8s" + + // A uniquely identifying name for the Kubernetes cluster. Kubernetes + // does not have cluster names as an internal concept so this may be + // set to any meaningful value within the environment. For example, + // GKE clusters have a name which can be used for this label. + K8SKeyClusterName = "k8s.cluster.name" + K8SKeyNamespaceName = "k8s.namespace.name" + K8SKeyPodName = "k8s.pod.name" +) + +// Constants for Container resources. +const ( + ContainerType = "container" + + // A uniquely identifying name for the Container. + ContainerKeyName = "container.name" + ContainerKeyImageName = "container.image.name" + ContainerKeyImageTag = "container.image.tag" +) + +// Constants for Cloud resources. +const ( + CloudType = "cloud" + + // A uniquely identifying name for the Container. + CloudKeyProvider = "cloud.provider" + CloudKeyAccountID = "cloud.account.id" + CloudKeyRegion = "cloud.region" + CloudKeyZone = "cloud.zone" + CloudProviderAWS = "aws" + CloudProviderGCP = "gcp" + CloudProviderAZURE = "azure" +) + +// Constants for Host resources. +const ( + HostType = "host" + + // A uniquely identifying name for the host. + HostKeyName = "host.name" + + // A hostname as returned by the 'hostname' command on host machine. + HostKeyHostName = "host.hostname" + HostKeyID = "host.id" + HostKeyType = "host.type" +) From 766cc5be833088517f0a505203556dd2e7cb6a4b Mon Sep 17 00:00:00 2001 From: rahulpa Date: Wed, 29 May 2019 11:28:58 -0700 Subject: [PATCH 187/212] update comment for cloud resources. --- resource/resourcekeys/const.go | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/resource/resourcekeys/const.go b/resource/resourcekeys/const.go index d43fb1a6f..c8b7b2938 100644 --- a/resource/resourcekeys/const.go +++ b/resource/resourcekeys/const.go @@ -42,11 +42,12 @@ const ( const ( CloudType = "cloud" - // A uniquely identifying name for the Container. - CloudKeyProvider = "cloud.provider" - CloudKeyAccountID = "cloud.account.id" - CloudKeyRegion = "cloud.region" - CloudKeyZone = "cloud.zone" + CloudKeyProvider = "cloud.provider" + CloudKeyAccountID = "cloud.account.id" + CloudKeyRegion = "cloud.region" + CloudKeyZone = "cloud.zone" + + // Cloud Providers CloudProviderAWS = "aws" CloudProviderGCP = "gcp" CloudProviderAZURE = "azure" From 9c377598961b706d1542bd2d84d538b5094d596e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Olivier=20Mengu=C3=A9?= Date: Wed, 29 May 2019 21:10:40 +0200 Subject: [PATCH 188/212] tag: add func MustNewKey to wrap NewKey with panic (#1141) --- tag/key.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/tag/key.go b/tag/key.go index ebbed9500..4e63d08c9 100644 --- a/tag/key.go +++ b/tag/key.go @@ -29,6 +29,16 @@ func NewKey(name string) (Key, error) { return Key{name: name}, nil } +// MustNewKey creates or retrieves a string key identified by name. +// An invalid key name raises a panic. +func MustNewKey(name string) Key { + k, err := NewKey(name) + if err != nil { + panic(err) + } + return k +} + // Name returns the name of the key. func (k Key) Name() string { return k.name From 6325d764b2d4a66576c5623aa1e6010b4148a429 Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 30 May 2019 17:48:01 -0700 Subject: [PATCH 189/212] Bump up the version to v0.23.0 (#1144) --- opencensus.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/opencensus.go b/opencensus.go index 626d73645..e5e4b4368 100644 --- a/opencensus.go +++ b/opencensus.go @@ -17,5 +17,5 @@ package opencensus // import "go.opencensus.io" // Version is the current release version of OpenCensus in use. func Version() string { - return "0.22.0" + return "0.23.0" } From a092815c29e3a8fb79dfa966ad048ed20f1f8c01 Mon Sep 17 00:00:00 2001 From: Ran Tavory Date: Tue, 4 Jun 2019 21:45:50 +0300 Subject: [PATCH 190/212] Fix typo in docs An => A (#1145) `An distribution` fixed to `A distribution` --- stats/view/aggregation.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/stats/view/aggregation.go b/stats/view/aggregation.go index b7f169b4a..8bd25314e 100644 --- a/stats/view/aggregation.go +++ b/stats/view/aggregation.go @@ -82,7 +82,7 @@ func Sum() *Aggregation { // Distribution indicates that the desired aggregation is // a histogram distribution. // -// An distribution aggregation may contain a histogram of the values in the +// A distribution aggregation may contain a histogram of the values in the // population. The bucket boundaries for that histogram are described // by the bounds. This defines len(bounds)+1 buckets. // From f6172e422374225c6748bd2a9c872e88c2eb43d4 Mon Sep 17 00:00:00 2001 From: Ramon Nogueira Date: Tue, 11 Jun 2019 09:53:23 -0700 Subject: [PATCH 191/212] Replace tag.NewKey calls with tag.MustNewKey where appropriate (#1146) --- README.md | 2 +- examples/helloworld/main.go | 5 +---- examples/quickstart/stats.go | 2 +- internal/readme/README.md | 2 +- internal/readme/tags.go | 12 +++--------- plugin/ocgrpc/client_stats_handler_test.go | 6 +++--- plugin/ocgrpc/end_to_end_test.go | 2 +- plugin/ocgrpc/server_stats_handler_test.go | 6 +++--- plugin/ocgrpc/stats_common.go | 8 ++++---- plugin/ochttp/stats.go | 18 +++++++++--------- stats/benchmark_test.go | 16 ++++++++-------- stats/record_test.go | 4 ++-- stats/view/benchmark_test.go | 21 +++++++++++---------- stats/view/collector_test.go | 15 +++------------ stats/view/view_test.go | 22 +++++++++++----------- stats/view/view_to_metric_test.go | 6 +++--- stats/view/worker_test.go | 4 ++-- tag/example_test.go | 15 +++++++-------- tag/key.go | 5 ++--- 19 files changed, 76 insertions(+), 95 deletions(-) diff --git a/README.md b/README.md index fabab2e06..a8cd09eaf 100644 --- a/README.md +++ b/README.md @@ -78,7 +78,7 @@ Package `tag` allows adding or modifying tags in the current context. [embedmd]:# (internal/readme/tags.go new) ```go -ctx, err = tag.New(ctx, +ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index 5428d6e2a..ce647f6ad 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -54,10 +54,7 @@ func main() { trace.ApplyConfig(trace.Config{DefaultSampler: trace.AlwaysSample()}) - frontendKey, err = tag.NewKey("example.com/keys/frontend") - if err != nil { - log.Fatal(err) - } + frontendKey = tag.MustNewKey("example.com/keys/frontend") videoSize = stats.Int64("example.com/measure/video_size", "size of processed videos", stats.UnitBytes) view.SetReportingPeriod(2 * time.Second) diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 7e9735188..04937523d 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -56,7 +56,7 @@ var ( // TagKeys for the stats quickstart. var ( - keyMethod, _ = tag.NewKey("method") + keyMethod = tag.MustNewKey("method") ) // Views for the stats quickstart. diff --git a/internal/readme/README.md b/internal/readme/README.md index c744016da..c98f2a152 100644 --- a/internal/readme/README.md +++ b/internal/readme/README.md @@ -1,6 +1,6 @@ Use the following commands to regenerate the README. ```bash -$ go get github.com/rakyll/embedmd +$ GO11MODULE=off go get github.com/rakyll/embedmd $ embedmd -w ../../README.md ``` diff --git a/internal/readme/tags.go b/internal/readme/tags.go index 09d9ac12f..1219dd965 100644 --- a/internal/readme/tags.go +++ b/internal/readme/tags.go @@ -24,17 +24,11 @@ import ( func tagsExamples() { ctx := context.Background() - osKey, err := tag.NewKey("example.com/keys/user-os") - if err != nil { - log.Fatal(err) - } - userIDKey, err := tag.NewKey("example.com/keys/user-id") - if err != nil { - log.Fatal(err) - } + osKey := tag.MustNewKey("example.com/keys/user-os") + userIDKey := tag.MustNewKey("example.com/keys/user-id") // START new - ctx, err = tag.New(ctx, + ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), tag.Upsert(userIDKey, "cde36753ed"), ) diff --git a/plugin/ocgrpc/client_stats_handler_test.go b/plugin/ocgrpc/client_stats_handler_test.go index 5bf7ef4b0..7a2c366c0 100644 --- a/plugin/ocgrpc/client_stats_handler_test.go +++ b/plugin/ocgrpc/client_stats_handler_test.go @@ -36,8 +36,8 @@ import ( ) func TestClientDefaultCollections(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") type tagPair struct { k tag.Key @@ -340,7 +340,7 @@ func TestClientDefaultCollections(t *testing.T) { } func TestClientRecordExemplar(t *testing.T) { - key, _ := tag.NewKey("test_key") + key := tag.MustNewKey("test_key") tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} out := &stats.OutPayload{Length: 2000} end := &stats.End{Error: nil} diff --git a/plugin/ocgrpc/end_to_end_test.go b/plugin/ocgrpc/end_to_end_test.go index a305bbcff..8715079d7 100644 --- a/plugin/ocgrpc/end_to_end_test.go +++ b/plugin/ocgrpc/end_to_end_test.go @@ -27,7 +27,7 @@ import ( "go.opencensus.io/tag" ) -var keyAccountId, _ = tag.NewKey("account_id") +var keyAccountId = tag.MustNewKey("account_id") func TestEndToEnd_Single(t *testing.T) { view.Register(ocgrpc.DefaultClientViews...) diff --git a/plugin/ocgrpc/server_stats_handler_test.go b/plugin/ocgrpc/server_stats_handler_test.go index bb3ca9a5b..4b4cad69f 100644 --- a/plugin/ocgrpc/server_stats_handler_test.go +++ b/plugin/ocgrpc/server_stats_handler_test.go @@ -32,8 +32,8 @@ import ( ) func TestServerDefaultCollections(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") type tagPair struct { k tag.Key @@ -338,7 +338,7 @@ func newDistributionData(countPerBucket []int64, count int64, min, max, mean, su } func TestServerRecordExemplar(t *testing.T) { - key, _ := tag.NewKey("test_key") + key := tag.MustNewKey("test_key") tagInfo := &stats.RPCTagInfo{FullMethodName: "/package.service/method"} out := &stats.OutPayload{Length: 2000} end := &stats.End{Error: nil} diff --git a/plugin/ocgrpc/stats_common.go b/plugin/ocgrpc/stats_common.go index 0ae569182..89cac9c4e 100644 --- a/plugin/ocgrpc/stats_common.go +++ b/plugin/ocgrpc/stats_common.go @@ -61,14 +61,14 @@ var ( // Server tags are applied to the context used to process each RPC, as well as // the measures at the end of each RPC. var ( - KeyServerMethod, _ = tag.NewKey("grpc_server_method") - KeyServerStatus, _ = tag.NewKey("grpc_server_status") + KeyServerMethod = tag.MustNewKey("grpc_server_method") + KeyServerStatus = tag.MustNewKey("grpc_server_status") ) // Client tags are applied to measures at the end of each RPC. var ( - KeyClientMethod, _ = tag.NewKey("grpc_client_method") - KeyClientStatus, _ = tag.NewKey("grpc_client_status") + KeyClientMethod = tag.MustNewKey("grpc_client_method") + KeyClientStatus = tag.MustNewKey("grpc_client_status") ) var ( diff --git a/plugin/ochttp/stats.go b/plugin/ochttp/stats.go index 63bbcda5e..ee3729040 100644 --- a/plugin/ochttp/stats.go +++ b/plugin/ochttp/stats.go @@ -92,38 +92,38 @@ var ( // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Host, _ = tag.NewKey("http.host") + Host = tag.MustNewKey("http.host") // StatusCode is the numeric HTTP response status code, // or "error" if a transport error occurred and no status code was read. - StatusCode, _ = tag.NewKey("http.status") + StatusCode = tag.MustNewKey("http.status") // Path is the URL path (not including query string) in the request. // // The value of this tag can be controlled by the HTTP client, so you need // to watch out for potentially generating high-cardinality labels in your // metrics backend if you use this tag in views. - Path, _ = tag.NewKey("http.path") + Path = tag.MustNewKey("http.path") // Method is the HTTP method of the request, capitalized (GET, POST, etc.). - Method, _ = tag.NewKey("http.method") + Method = tag.MustNewKey("http.method") // KeyServerRoute is a low cardinality string representing the logical // handler of the request. This is usually the pattern registered on the a // ServeMux (or similar string). - KeyServerRoute, _ = tag.NewKey("http_server_route") + KeyServerRoute = tag.MustNewKey("http_server_route") ) // Client tag keys. var ( // KeyClientMethod is the HTTP method, capitalized (i.e. GET, POST, PUT, DELETE, etc.). - KeyClientMethod, _ = tag.NewKey("http_client_method") + KeyClientMethod = tag.MustNewKey("http_client_method") // KeyClientPath is the URL path (not including query string). - KeyClientPath, _ = tag.NewKey("http_client_path") + KeyClientPath = tag.MustNewKey("http_client_path") // KeyClientStatus is the HTTP status code as an integer (e.g. 200, 404, 500.), or "error" if no response status line was received. - KeyClientStatus, _ = tag.NewKey("http_client_status") + KeyClientStatus = tag.MustNewKey("http_client_status") // KeyClientHost is the value of the request Host header. - KeyClientHost, _ = tag.NewKey("http_client_host") + KeyClientHost = tag.MustNewKey("http_client_host") ) // Default distributions used by views in this package. diff --git a/stats/benchmark_test.go b/stats/benchmark_test.go index 3e0264fa5..1c467ec54 100644 --- a/stats/benchmark_test.go +++ b/stats/benchmark_test.go @@ -65,14 +65,14 @@ func BenchmarkRecord8_Parallel(b *testing.B) { func BenchmarkRecord8_8Tags(b *testing.B) { ctx := context.Background() - key1, _ := tag.NewKey("key1") - key2, _ := tag.NewKey("key2") - key3, _ := tag.NewKey("key3") - key4, _ := tag.NewKey("key4") - key5, _ := tag.NewKey("key5") - key6, _ := tag.NewKey("key6") - key7, _ := tag.NewKey("key7") - key8, _ := tag.NewKey("key8") + key1 := tag.MustNewKey("key1") + key2 := tag.MustNewKey("key2") + key3 := tag.MustNewKey("key3") + key4 := tag.MustNewKey("key4") + key5 := tag.MustNewKey("key5") + key6 := tag.MustNewKey("key6") + key7 := tag.MustNewKey("key7") + key8 := tag.MustNewKey("key8") tag.New(ctx, tag.Insert(key1, "value"), diff --git a/stats/record_test.go b/stats/record_test.go index ca46ed540..93a652200 100644 --- a/stats/record_test.go +++ b/stats/record_test.go @@ -42,8 +42,8 @@ var ( ) func TestRecordWithAttachments(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") distribution := view.Distribution(5, 10) m := stats.Int64("TestRecordWithAttachments/m1", "", stats.UnitDimensionless) v := &view.View{ diff --git a/stats/view/benchmark_test.go b/stats/view/benchmark_test.go index 0f195d43b..5937b57e6 100644 --- a/stats/view/benchmark_test.go +++ b/stats/view/benchmark_test.go @@ -26,16 +26,17 @@ import ( ) var ( - m = stats.Float64("m", "", "") - k1, _ = tag.NewKey("k1") - k2, _ = tag.NewKey("k2") - k3, _ = tag.NewKey("k3") - k4, _ = tag.NewKey("k4") - k5, _ = tag.NewKey("k5") - k6, _ = tag.NewKey("k6") - k7, _ = tag.NewKey("k7") - k8, _ = tag.NewKey("k8") - view = &View{ + m = stats.Float64("m", "", "") + k1 = tag.MustNewKey("k1") + k2 = tag.MustNewKey("k2") + k3 = tag.MustNewKey("k3") + k4 = tag.MustNewKey("k4") + k5 = tag.MustNewKey("k5") + k6 = tag.MustNewKey("k6") + k7 = tag.MustNewKey("k7") + k8 = tag.MustNewKey("k8") + + view = &View{ Measure: m, Aggregation: Distribution(1, 2, 3, 4, 5, 6, 7, 8, 9, 10), TagKeys: []tag.Key{k1, k2}, diff --git a/stats/view/collector_test.go b/stats/view/collector_test.go index 57720c10a..2905a180d 100644 --- a/stats/view/collector_test.go +++ b/stats/view/collector_test.go @@ -29,18 +29,9 @@ func TestEncodeDecodeTags(t *testing.T) { want map[tag.Key][]byte } - k1, err := tag.NewKey("/encodedecodetest/k1") - if err != nil { - t.Fatal(err) - } - k2, err := tag.NewKey("/encodedecodetest/k2") - if err != nil { - t.Fatal(err) - } - k3, err := tag.NewKey("/encodedecodetest/k3") - if err != nil { - t.Fatal(err) - } + k1 = tag.MustNewKey("/encodedecodetest/k1") + k2 = tag.MustNewKey("/encodedecodetest/k2") + k3 = tag.MustNewKey("/encodedecodetest/k3") ctx1, _ := tag.New(ctx) ctx2, _ := tag.New(ctx, tag.Insert(k2, "v2")) diff --git a/stats/view/view_test.go b/stats/view/view_test.go index 7d2bed9c4..2a2bde494 100644 --- a/stats/view/view_test.go +++ b/stats/view/view_test.go @@ -29,9 +29,9 @@ import ( ) func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") - k3, _ := tag.NewKey("k3") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") + k3 := tag.MustNewKey("k3") agg1 := Distribution(2) m := stats.Int64("Test_View_MeasureFloat64_AggregationDistribution/m1", "", stats.UnitDimensionless) view1 := &View{ @@ -199,9 +199,9 @@ func Test_View_MeasureFloat64_AggregationDistribution(t *testing.T) { } func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") - k3, _ := tag.NewKey("k3") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") + k3 := tag.MustNewKey("k3") m := stats.Int64("Test_View_MeasureFloat64_AggregationSum/m1", "", stats.UnitDimensionless) view, err := newViewInternal(&View{TagKeys: []tag.Key{k1, k2}, Measure: m, Aggregation: Sum()}) if err != nil { @@ -315,8 +315,8 @@ func Test_View_MeasureFloat64_AggregationSum(t *testing.T) { } func TestCanonicalize(t *testing.T) { - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") m := stats.Int64("TestCanonicalize/m1", "desc desc", stats.UnitDimensionless) v := &View{TagKeys: []tag.Key{k2, k1}, Measure: m, Aggregation: Sum()} err := v.canonicalize() @@ -338,9 +338,9 @@ func TestCanonicalize(t *testing.T) { } func TestViewSortedKeys(t *testing.T) { - k1, _ := tag.NewKey("a") - k2, _ := tag.NewKey("b") - k3, _ := tag.NewKey("c") + k1 := tag.MustNewKey("a") + k2 := tag.MustNewKey("b") + k3 := tag.MustNewKey("c") ks := []tag.Key{k1, k3, k2} m := stats.Int64("TestViewSortedKeys/m1", "", stats.UnitDimensionless) diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index 82ba96943..da4135404 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -111,9 +111,9 @@ func init() { } func initTags() { - tk1, _ = tag.NewKey("k1") - tk2, _ = tag.NewKey("k2") - tk3, _ = tag.NewKey("k3") + tk1 = tag.MustNewKey("k1") + tk2 = tag.MustNewKey("k2") + tk3 = tag.MustNewKey("k3") tk1v1 = tag.Tag{Key: tk1, Value: v1} tk2v2 = tag.Tag{Key: tk2, Value: v2} diff --git a/stats/view/worker_test.go b/stats/view/worker_test.go index 8d4546ea4..6c15d37ef 100644 --- a/stats/view/worker_test.go +++ b/stats/view/worker_test.go @@ -124,8 +124,8 @@ func Test_Worker_RecordFloat64(t *testing.T) { someError := errors.New("some error") m := stats.Float64("Test_Worker_RecordFloat64/MF1", "desc MF1", "unit") - k1, _ := tag.NewKey("k1") - k2, _ := tag.NewKey("k2") + k1 := tag.MustNewKey("k1") + k2 := tag.MustNewKey("k2") ctx, err := tag.New(context.Background(), tag.Insert(k1, "v1"), tag.Insert(k2, "v2"), diff --git a/tag/example_test.go b/tag/example_test.go index fe0c5d9e9..9e3a59724 100644 --- a/tag/example_test.go +++ b/tag/example_test.go @@ -37,15 +37,14 @@ func ExampleNewKey() { _ = key // use key } +func ExampleMustNewKey() { + key := tag.MustNewKey("example.com/keys/user-os") + _ = key // use key +} + func ExampleNew() { - osKey, err := tag.NewKey("example.com/keys/user-os") - if err != nil { - log.Fatal(err) - } - userIDKey, err := tag.NewKey("example.com/keys/user-id") - if err != nil { - log.Fatal(err) - } + osKey := tag.MustNewKey("example.com/keys/user-os") + userIDKey := tag.MustNewKey("example.com/keys/user-id") ctx, err := tag.New(ctx, tag.Insert(osKey, "macOS-10.12.5"), diff --git a/tag/key.go b/tag/key.go index 4e63d08c9..71ec91365 100644 --- a/tag/key.go +++ b/tag/key.go @@ -21,7 +21,7 @@ type Key struct { } // NewKey creates or retrieves a string key identified by name. -// Calling NewKey consequently with the same name returns the same key. +// Calling NewKey more than once with the same name returns the same key. func NewKey(name string) (Key, error) { if !checkKeyName(name) { return Key{}, errInvalidKeyName @@ -29,8 +29,7 @@ func NewKey(name string) (Key, error) { return Key{name: name}, nil } -// MustNewKey creates or retrieves a string key identified by name. -// An invalid key name raises a panic. +// MustNewKey returns a key with the given name, and panics if name is an invalid key name. func MustNewKey(name string) Key { k, err := NewKey(name) if err != nil { From df42942ad08fc1d7526b5a0361f909c351fabfa3 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 19 Jun 2019 11:41:31 -0700 Subject: [PATCH 192/212] Add deployment key for k8s resource. (#1148) --- resource/resourcekeys/const.go | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/resource/resourcekeys/const.go b/resource/resourcekeys/const.go index c8b7b2938..1f2246662 100644 --- a/resource/resourcekeys/const.go +++ b/resource/resourcekeys/const.go @@ -23,9 +23,10 @@ const ( // does not have cluster names as an internal concept so this may be // set to any meaningful value within the environment. For example, // GKE clusters have a name which can be used for this label. - K8SKeyClusterName = "k8s.cluster.name" - K8SKeyNamespaceName = "k8s.namespace.name" - K8SKeyPodName = "k8s.pod.name" + K8SKeyClusterName = "k8s.cluster.name" + K8SKeyNamespaceName = "k8s.namespace.name" + K8SKeyPodName = "k8s.pod.name" + K8SKeyDeploymentName = "k8s.deployment.name" ) // Constants for Container resources. From 19a91518e33fbd42c94402bb5758d88f3c3738eb Mon Sep 17 00:00:00 2001 From: Luke Cawood Date: Wed, 3 Jul 2019 01:12:00 +1000 Subject: [PATCH 193/212] Map 422 Unprocessable Entity to INVALID_ARGUMENT (#1149) --- plugin/ochttp/trace.go | 2 ++ plugin/ochttp/trace_test.go | 3 +++ 2 files changed, 5 insertions(+) diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index c23b97fb1..53e71305a 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -186,6 +186,8 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeCancelled case http.StatusBadRequest: code = trace.StatusCodeInvalidArgument + case http.StatusUnprocessableEntity: + code = trace.StatusCodeInvalidArgument case http.StatusGatewayTimeout: code = trace.StatusCodeDeadlineExceeded case http.StatusNotFound: diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index 13ef30cab..e1d13d155 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -668,6 +668,9 @@ func TestStatusUnitTest(t *testing.T) { {204, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {100, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {500, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, + {400, trace.Status{Code: trace.StatusCodeInvalidArgument, Message: `INVALID_ARGUMENT`}}, + {422, trace.Status{Code: trace.StatusCodeInvalidArgument, Message: `INVALID_ARGUMENT`}}, + {499, trace.Status{Code: trace.StatusCodeCancelled, Message: `CANCELLED`}}, {404, trace.Status{Code: trace.StatusCodeNotFound, Message: `NOT_FOUND`}}, {600, trace.Status{Code: trace.StatusCodeUnknown, Message: `UNKNOWN`}}, {401, trace.Status{Code: trace.StatusCodeUnauthenticated, Message: `UNAUTHENTICATED`}}, From b4a14686f0a98096416fe1b4cb848e384fb2b22b Mon Sep 17 00:00:00 2001 From: Andrew Sinclair Date: Sat, 13 Jul 2019 00:22:01 -0700 Subject: [PATCH 194/212] Clean up comments in view.go (#1152) --- stats/view/view.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/stats/view/view.go b/stats/view/view.go index 37f88e1d9..293b54ecb 100644 --- a/stats/view/view.go +++ b/stats/view/view.go @@ -30,7 +30,7 @@ import ( ) // View allows users to aggregate the recorded stats.Measurements. -// Views need to be passed to the Register function to be before data will be +// Views need to be passed to the Register function before data will be // collected and sent to Exporters. type View struct { Name string // Name of View. Must be unique. If unset, will default to the name of the Measure. @@ -43,7 +43,7 @@ type View struct { // Measure is a stats.Measure to aggregate in this view. Measure stats.Measure - // Aggregation is the aggregation function tp apply to the set of Measurements. + // Aggregation is the aggregation function to apply to the set of Measurements. Aggregation *Aggregation } @@ -189,7 +189,7 @@ func (r *Row) String() string { } // Equal returns true if both rows are equal. Tags are expected to be ordered -// by the key name. Even both rows have the same tags but the tags appear in +// by the key name. Even if both rows have the same tags but the tags appear in // different orders it will return false. func (r *Row) Equal(other *Row) bool { if r == other { From 29aa3cabbf25be9f1c3c6d78cecfbe0c3e20cf5a Mon Sep 17 00:00:00 2001 From: rghetia Date: Thu, 29 Aug 2019 10:56:57 -0700 Subject: [PATCH 195/212] change unit to dimensionless when aggregation is count. (#1157) --- stats/view/view_to_metric.go | 11 +++++- stats/view/view_to_metric_test.go | 59 +++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+), 1 deletion(-) diff --git a/stats/view/view_to_metric.go b/stats/view/view_to_metric.go index f67b5c464..293c1646d 100644 --- a/stats/view/view_to_metric.go +++ b/stats/view/view_to_metric.go @@ -85,12 +85,21 @@ func viewToMetricDescriptor(v *View) *metricdata.Descriptor { return &metricdata.Descriptor{ Name: v.Name, Description: v.Description, - Unit: getUnit(v.Measure.Unit()), + Unit: convertUnit(v), Type: getType(v), LabelKeys: getLabelKeys(v), } } +func convertUnit(v *View) metricdata.Unit { + switch v.Aggregation.Type { + case AggTypeCount: + return metricdata.UnitDimensionless + default: + return getUnit(v.Measure.Unit()) + } +} + func toLabelValues(row *Row, expectedKeys []metricdata.LabelKey) []metricdata.LabelValue { labelValues := []metricdata.LabelValue{} tagMap := make(map[string]string) diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index da4135404..e4a0b2716 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -457,6 +457,65 @@ func Test_ViewToMetric(t *testing.T) { } } +// Test to verify that a metric converted from a view with Aggregation Count should always +// have Dimensionless unit. +func TestUnitConversionForAggCount(t *testing.T) { + startTime := time.Now().Add(-time.Duration(60 * time.Second)) + now := time.Now() + tests := []*struct { + name string + vi *viewInternal + v *View + wantUnit metricdata.Unit + }{ + { + name: "View with Count Aggregation on Latency measurement", + v: &View{ + Name: "request_count1", + Measure: stats.Int64("request_latency", "", stats.UnitMilliseconds), + Aggregation: aggCnt, + }, + wantUnit: metricdata.UnitDimensionless, + }, + { + name: "View with Count Aggregation on bytes measurement", + v: &View{ + Name: "request_count2", + Measure: stats.Int64("request_bytes", "", stats.UnitBytes), + Aggregation: aggCnt, + }, + wantUnit: metricdata.UnitDimensionless, + }, + { + name: "View with aggregation other than Count Aggregation on Latency measurement", + v: &View{ + Name: "request_latency", + Measure: stats.Int64("request_latency", "", stats.UnitMilliseconds), + Aggregation: aggSum, + }, + wantUnit: metricdata.UnitMilliseconds, + }, + } + var err error + for _, tc := range tests { + tc.vi, err = defaultWorker.tryRegisterView(tc.v) + if err != nil { + t.Fatalf("error registering view: %v, err: %v\n", tc.v, err) + } + tc.vi.clearRows() + tc.vi.subscribe() + } + + for _, tc := range tests { + tc.vi.addSample(tag.FromContext(context.Background()), 5.0, nil, now) + gotMetric := viewToMetric(tc.vi, now, startTime) + gotUnit := gotMetric.Descriptor.Unit + if !cmp.Equal(gotUnit, tc.wantUnit) { + t.Errorf("Verify Unit: %s: Got:%v Want:%v", tc.name, gotUnit, tc.wantUnit) + } + } +} + func serializeAsJSON(v interface{}) string { blob, _ := json.MarshalIndent(v, "", " ") return string(blob) From 19caf3aebd1153094beef53e7449948b2d403406 Mon Sep 17 00:00:00 2001 From: Ben Wells Date: Tue, 3 Sep 2019 20:14:13 +0100 Subject: [PATCH 196/212] Use time.Since rather than time.Now().Sub (#1160) --- examples/derived_gauges/README.md | 4 ++-- examples/derived_gauges/derived_gauge.go | 2 +- internal/internal.go | 2 +- zpages/rpcz.go | 2 +- 4 files changed, 5 insertions(+), 5 deletions(-) diff --git a/examples/derived_gauges/README.md b/examples/derived_gauges/README.md index 8d6f0db1a..f811d1f80 100644 --- a/examples/derived_gauges/README.md +++ b/examples/derived_gauges/README.md @@ -131,7 +131,7 @@ func (q *queue) Size() int64 { func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() - return time.Now().Sub(q.lastConsumed).Seconds() + return time.Since(q.lastConsumed).Seconds() } ``` @@ -254,7 +254,7 @@ func (q *queue) Size() int64 { func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() - return time.Now().Sub(q.lastConsumed).Seconds() + return time.Since(q.lastConsumed).Seconds() } diff --git a/examples/derived_gauges/derived_gauge.go b/examples/derived_gauges/derived_gauge.go index 721742de5..07c130119 100644 --- a/examples/derived_gauges/derived_gauge.go +++ b/examples/derived_gauges/derived_gauge.go @@ -130,7 +130,7 @@ func (q *queue) Size() int64 { func (q *queue) Elapsed() float64 { q.mu.Lock() defer q.mu.Unlock() - return time.Now().Sub(q.lastConsumed).Seconds() + return time.Since(q.lastConsumed).Seconds() } // END tofloat64 diff --git a/internal/internal.go b/internal/internal.go index 9a638781c..81dc7183e 100644 --- a/internal/internal.go +++ b/internal/internal.go @@ -33,5 +33,5 @@ var UserAgent = fmt.Sprintf("opencensus-go/%s", opencensus.Version()) // end as a monotonic time. // See https://golang.org/pkg/time/#hdr-Monotonic_Clocks func MonotonicEndTime(start time.Time) time.Time { - return start.Add(time.Now().Sub(start)) + return start.Add(time.Since(start)) } diff --git a/zpages/rpcz.go b/zpages/rpcz.go index dee28f982..ef4dd15b3 100644 --- a/zpages/rpcz.go +++ b/zpages/rpcz.go @@ -205,7 +205,7 @@ func (s snapExporter) ExportView(vd *view.Data) { if len(vd.Rows) == 0 { return } - ageSec := float64(time.Now().Sub(programStartTime)) / float64(time.Second) + ageSec := float64(time.Since(programStartTime)) / float64(time.Second) computeRate := func(maxSec, x float64) float64 { dur := ageSec From c3153da60838155e855bf92946f877d763607410 Mon Sep 17 00:00:00 2001 From: Ben Wells Date: Tue, 3 Sep 2019 22:01:08 +0100 Subject: [PATCH 197/212] Fix bug in ocgrpc client logging (#1161) --- plugin/ocgrpc/client_stats_handler.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/plugin/ocgrpc/client_stats_handler.go b/plugin/ocgrpc/client_stats_handler.go index 18821c7f5..b36349820 100644 --- a/plugin/ocgrpc/client_stats_handler.go +++ b/plugin/ocgrpc/client_stats_handler.go @@ -16,9 +16,9 @@ package ocgrpc import ( + "context" "time" - "context" "go.opencensus.io/tag" "google.golang.org/grpc/grpclog" "google.golang.org/grpc/stats" @@ -30,7 +30,7 @@ func (h *ClientHandler) statsTagRPC(ctx context.Context, info *stats.RPCTagInfo) startTime := time.Now() if info == nil { if grpclog.V(2) { - grpclog.Infof("clientHandler.TagRPC called with nil info.", info.FullMethodName) + grpclog.Info("clientHandler.TagRPC called with nil info.") } return ctx } From 556cb5bcd57b4e3e6bdb538f7ac80f161f10393c Mon Sep 17 00:00:00 2001 From: Ben Wells Date: Wed, 4 Sep 2019 17:25:04 +0100 Subject: [PATCH 198/212] Fix typo in view documentation and remove unnecessary type conversions (#1162) * Remove unnecessary type conversions * Fix typo in view documentation --- metric/cumulative.go | 2 +- metric/metricexport/reader_test.go | 6 +++--- plugin/ochttp/server.go | 4 +--- stats/view/doc.go | 2 +- tag/map_codec.go | 2 +- tag/map_codec_test.go | 2 +- 6 files changed, 8 insertions(+), 10 deletions(-) diff --git a/metric/cumulative.go b/metric/cumulative.go index 549d09199..b25ba9439 100644 --- a/metric/cumulative.go +++ b/metric/cumulative.go @@ -116,7 +116,7 @@ func (e *Int64CumulativeEntry) Inc(val int64) { if val <= 0 { return } - atomic.AddInt64(&e.val, int64(val)) + atomic.AddInt64(&e.val, val) } // Int64DerivedCumulative represents int64 cumulative value that is derived from an object. diff --git a/metric/metricexport/reader_test.go b/metric/metricexport/reader_test.go index 756792486..33313dcf2 100644 --- a/metric/metricexport/reader_test.go +++ b/metric/metricexport/reader_test.go @@ -33,8 +33,8 @@ var ( exporter1 = &metricExporter{} exporter2 = &metricExporter{} gaugeEntry *metric.Int64GaugeEntry - duration1 = time.Duration(1000 * time.Millisecond) - duration2 = time.Duration(2000 * time.Millisecond) + duration1 = 1000 * time.Millisecond + duration2 = 2000 * time.Millisecond ) type metricExporter struct { @@ -194,7 +194,7 @@ func TestNewIntervalReaderWithNilExporter(t *testing.T) { func TestNewIntervalReaderStartWithInvalidInterval(t *testing.T) { ir, err := NewIntervalReader(reader1, exporter1) - ir.ReportingInterval = time.Duration(500 * time.Millisecond) + ir.ReportingInterval = 500 * time.Millisecond err = ir.Start() if err == nil { t.Fatalf("expected error but got nil\n") diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index 4f6404fa7..dc6563a15 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -128,7 +128,7 @@ func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Requ // TODO: Handle cases where ContentLength is not set. } else if r.ContentLength > 0 { span.AddMessageReceiveEvent(0, /* TODO: messageID */ - int64(r.ContentLength), -1) + r.ContentLength, -1) } return r.WithContext(ctx), span.End } @@ -174,8 +174,6 @@ type trackingResponseWriter struct { // Compile time assertion for ResponseWriter interface var _ http.ResponseWriter = (*trackingResponseWriter)(nil) -var logTagsErrorOnce sync.Once - func (t *trackingResponseWriter) end(tags *addedTags) { t.endOnce.Do(func() { if t.statusCode == 0 { diff --git a/stats/view/doc.go b/stats/view/doc.go index dced225c3..7bbedfe1f 100644 --- a/stats/view/doc.go +++ b/stats/view/doc.go @@ -29,7 +29,7 @@ // LastValue just keeps track of the most recently recorded measurement value. // All aggregations are cumulative. // -// Views can be registerd and unregistered at any time during program execution. +// Views can be registered and unregistered at any time during program execution. // // Libraries can define views but it is recommended that in most cases registering // views be left up to applications. diff --git a/tag/map_codec.go b/tag/map_codec.go index f8b582761..c242e695c 100644 --- a/tag/map_codec.go +++ b/tag/map_codec.go @@ -168,7 +168,7 @@ func Encode(m *Map) []byte { eg := &encoderGRPC{ buf: make([]byte, len(m.m)), } - eg.writeByte(byte(tagsVersionID)) + eg.writeByte(tagsVersionID) for k, v := range m.m { if v.m.ttl.ttl == valueTTLUnlimitedPropagation { eg.writeByte(byte(keyTypeString)) diff --git a/tag/map_codec_test.go b/tag/map_codec_test.go index 344ab0e87..b607d2fc0 100644 --- a/tag/map_codec_test.go +++ b/tag/map_codec_test.go @@ -91,7 +91,7 @@ func TestEncodeDecode(t *testing.T) { got := make([]keyValue, 0) for k, v := range decoded.m { - got = append(got, keyValue{k, string(v.value)}) + got = append(got, keyValue{k, v.value}) } want := tc.pairs From 59d1ce35d30f3c25ba762169da2a37eab6ffa041 Mon Sep 17 00:00:00 2001 From: Joshua Seaton Date: Wed, 4 Sep 2019 09:38:45 -0700 Subject: [PATCH 199/212] Update LRU usage (#1164) This change updates the LRU cache implementation used in the trace package, replacing github.com/hashicorp/golang-lru with github.com/golang/groupcache, while preserving the manner in which trace abstractions make use of the data structure. The reason for this change is that github.com/hashicorp/golang-lru has a Mozilla license, which is different from that of github.com/census-instrumentation/opencensus-go. The difference (and the mere presence of the former) makes it more difficult for open-source projects (e.g., https://fuchsia.dev) to vendor the latter. --- go.mod | 6 ++++-- go.sum | 10 ++++++---- trace/lrumap.go | 40 ++++++++++++++++++++++++++++++++-------- trace/trace.go | 6 +++--- 4 files changed, 45 insertions(+), 17 deletions(-) diff --git a/go.mod b/go.mod index cb4de80f3..7c1886e9e 100644 --- a/go.mod +++ b/go.mod @@ -1,12 +1,14 @@ module go.opencensus.io require ( + github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 github.com/golang/protobuf v1.3.1 github.com/google/go-cmp v0.3.0 - github.com/hashicorp/golang-lru v0.5.1 - golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 + golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) + +go 1.13 diff --git a/go.sum b/go.sum index 0b948c2b4..212b6b73b 100644 --- a/go.sum +++ b/go.sum @@ -3,6 +3,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03 github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= +github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -10,20 +12,19 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= -github.com/hashicorp/golang-lru v0.5.1 h1:0hERBMJE1eitiLkihrMvRVBYAkpHzc/J3QdDN+dAcgU= -github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= +golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3 h1:XQyxROzUlZH+WIQwySDgnISgOivlhjIEwaQaJEJrrN0= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a h1:oWX7TPOiFAMXLq8o0ikBYfCJVlRHBcsciT5bXOrH628= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09 h1:KaQtG+aDELoNmXYas3TVkGNYRuq8JQ1aa7LJt8EXVyo= -golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859 h1:R/3boaszxrf1GEUWTVDzSKVwLmSJpwZ1yqXm8j0v2QI= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -45,6 +46,7 @@ golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGm golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd h1:/e+gpKk9r3dJobndpTytxS2gOy6m5uvpg+ISQoEcusQ= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= diff --git a/trace/lrumap.go b/trace/lrumap.go index 3f80a3368..dc7a295c7 100644 --- a/trace/lrumap.go +++ b/trace/lrumap.go @@ -15,23 +15,47 @@ package trace import ( - "github.com/hashicorp/golang-lru/simplelru" + "github.com/golang/groupcache/lru" ) +// A simple lru.Cache wrapper that tracks the keys of the current contents and +// the cumulative number of evicted items. type lruMap struct { - simpleLruMap *simplelru.LRU + cacheKeys map[lru.Key]bool + cache *lru.Cache droppedCount int } func newLruMap(size int) *lruMap { - lm := &lruMap{} - lm.simpleLruMap, _ = simplelru.NewLRU(size, nil) + lm := &lruMap{ + cacheKeys: make(map[lru.Key]bool), + cache: lru.New(size), + droppedCount: 0, + } + lm.cache.OnEvicted = func(key lru.Key, value interface{}) { + delete(lm.cacheKeys, key) + lm.droppedCount++ + } return lm } -func (lm *lruMap) add(key, value interface{}) { - evicted := lm.simpleLruMap.Add(key, value) - if evicted { - lm.droppedCount++ +func (lm lruMap) len() int { + return lm.cache.Len() +} + +func (lm lruMap) keys() []interface{} { + keys := []interface{}{} + for k := range lm.cacheKeys { + keys = append(keys, k) } + return keys +} + +func (lm *lruMap) add(key, value interface{}) { + lm.cacheKeys[lru.Key(key)] = true + lm.cache.Add(lru.Key(key), value) +} + +func (lm *lruMap) get(key interface{}) (interface{}, bool) { + return lm.cache.Get(key) } diff --git a/trace/trace.go b/trace/trace.go index 38ead7bf0..3f8977b41 100644 --- a/trace/trace.go +++ b/trace/trace.go @@ -296,7 +296,7 @@ func (s *Span) makeSpanData() *SpanData { var sd SpanData s.mu.Lock() sd = *s.data - if s.lruAttributes.simpleLruMap.Len() > 0 { + if s.lruAttributes.len() > 0 { sd.Attributes = s.lruAttributesToAttributeMap() sd.DroppedAttributeCount = s.lruAttributes.droppedCount } @@ -370,8 +370,8 @@ func (s *Span) interfaceArrayToAnnotationArray() []Annotation { func (s *Span) lruAttributesToAttributeMap() map[string]interface{} { attributes := make(map[string]interface{}) - for _, key := range s.lruAttributes.simpleLruMap.Keys() { - value, ok := s.lruAttributes.simpleLruMap.Get(key) + for _, key := range s.lruAttributes.keys() { + value, ok := s.lruAttributes.get(key) if ok { keyStr := key.(string) attributes[keyStr] = value From ce85b6d0fba87f4b9f1bef96b9692e7ece47514b Mon Sep 17 00:00:00 2001 From: Ben Wells Date: Tue, 10 Sep 2019 17:01:06 +0100 Subject: [PATCH 200/212] Use stats.UnitDimensionless rather than deprecated stats.UnitNone (#1166) --- examples/quickstart/stats.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 04937523d..436032967 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -45,10 +45,10 @@ var ( mLatencyMs = stats.Float64("repl/latency", "The latency in milliseconds per REPL loop", stats.UnitMilliseconds) // Counts the number of lines read in from standard input - mLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", stats.UnitNone) + mLinesIn = stats.Int64("repl/lines_in", "The number of lines read in", stats.UnitDimensionless) // Encounters the number of non EOF(end-of-file) errors. - mErrors = stats.Int64("repl/errors", "The number of errors encountered", stats.UnitNone) + mErrors = stats.Int64("repl/errors", "The number of errors encountered", stats.UnitDimensionless) // Counts/groups the lengths of lines read in. mLineLengths = stats.Int64("repl/line_lengths", "The distribution of line lengths", stats.UnitBytes) From 3da91ae5e6af5a3e42dac7761866fa513a753d5e Mon Sep 17 00:00:00 2001 From: Ben Wells Date: Tue, 10 Sep 2019 22:32:05 +0100 Subject: [PATCH 201/212] Simplify use of time package (#1167) --- examples/derived_gauges/README.md | 2 +- examples/derived_gauges/derived_gauge.go | 2 +- examples/gauges/README.md | 2 +- examples/gauges/gauge.go | 2 +- examples/helloworld/main.go | 2 +- examples/http/helloworld_server/main.go | 2 +- examples/quickstart/stats.go | 2 +- plugin/ochttp/trace_test.go | 2 +- stats/view/view_to_metric_test.go | 4 ++-- 9 files changed, 10 insertions(+), 10 deletions(-) diff --git a/examples/derived_gauges/README.md b/examples/derived_gauges/README.md index f811d1f80..7ba33dbd8 100644 --- a/examples/derived_gauges/README.md +++ b/examples/derived_gauges/README.md @@ -295,7 +295,7 @@ func doWork() { func main() { // Using logexporter but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { diff --git a/examples/derived_gauges/derived_gauge.go b/examples/derived_gauges/derived_gauge.go index 07c130119..88da8a653 100644 --- a/examples/derived_gauges/derived_gauge.go +++ b/examples/derived_gauges/derived_gauge.go @@ -172,7 +172,7 @@ func doWork() { func main() { // Using logexporter but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { diff --git a/examples/gauges/README.md b/examples/gauges/README.md index c1853b173..9b21b8d87 100644 --- a/examples/gauges/README.md +++ b/examples/gauges/README.md @@ -253,7 +253,7 @@ func work() { func main() { // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { diff --git a/examples/gauges/gauge.go b/examples/gauges/gauge.go index 896effe95..cc5449072 100644 --- a/examples/gauges/gauge.go +++ b/examples/gauges/gauge.go @@ -148,7 +148,7 @@ func work() { func main() { // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { diff --git a/examples/helloworld/main.go b/examples/helloworld/main.go index ce647f6ad..f5f0294e1 100644 --- a/examples/helloworld/main.go +++ b/examples/helloworld/main.go @@ -44,7 +44,7 @@ func main() { // Register an exporter to be able to retrieve // the data from the subscribed views. - e, err := exporter.NewLogExporter(exporter.Options{ReportingInterval: time.Duration(time.Second)}) + e, err := exporter.NewLogExporter(exporter.Options{ReportingInterval: time.Second}) if err != nil { log.Fatal(err) } diff --git a/examples/http/helloworld_server/main.go b/examples/http/helloworld_server/main.go index 551915d10..fb5f7b411 100644 --- a/examples/http/helloworld_server/main.go +++ b/examples/http/helloworld_server/main.go @@ -43,7 +43,7 @@ func main() { // Using log exporter to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, TracesLogFile: tracesLogFile, }) diff --git a/examples/quickstart/stats.go b/examples/quickstart/stats.go index 436032967..51fc707f6 100644 --- a/examples/quickstart/stats.go +++ b/examples/quickstart/stats.go @@ -100,7 +100,7 @@ func main() { // Using log exporter here to export metrics but you can choose any supported exporter. exporter, err := exporter.NewLogExporter(exporter.Options{ - ReportingInterval: time.Duration(10 * time.Second), + ReportingInterval: 10 * time.Second, MetricsLogFile: metricsLogFile, }) if err != nil { diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index e1d13d155..759103781 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -353,7 +353,7 @@ func serveHTTP(handler *Handler, done chan struct{}, wait chan time.Time, status // Simulate a slow-responding server. sleepUntil := <-wait for time.Now().Before(sleepUntil) { - time.Sleep(sleepUntil.Sub(time.Now())) + time.Sleep(time.Until(sleepUntil)) } io.WriteString(w, "expected-response") diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index e4a0b2716..6c82fb9dc 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -252,7 +252,7 @@ func initMetricDescriptors() { } func Test_ViewToMetric(t *testing.T) { - startTime := time.Now().Add(-time.Duration(60 * time.Second)) + startTime := time.Now().Add(-60 * time.Second) now := time.Now() tests := []*testToMetrics{ { @@ -460,7 +460,7 @@ func Test_ViewToMetric(t *testing.T) { // Test to verify that a metric converted from a view with Aggregation Count should always // have Dimensionless unit. func TestUnitConversionForAggCount(t *testing.T) { - startTime := time.Now().Add(-time.Duration(60 * time.Second)) + startTime := time.Now().Add(-60 * time.Second) now := time.Now() tests := []*struct { name string From 1b2595daaf5b3b90f83bdba55486caa1d38c61a2 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 11 Sep 2019 10:02:45 -0700 Subject: [PATCH 202/212] fix appveyor build. (#1168) --- appveyor.yml | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 12bd7c4c7..d08f0edaf 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -6,13 +6,12 @@ clone_folder: c:\gopath\src\go.opencensus.io environment: GOPATH: 'c:\gopath' - GOVERSION: '1.11' GO111MODULE: 'on' CGO_ENABLED: '0' # See: https://github.com/appveyor/ci/issues/2613 -install: - - set PATH=%GOPATH%\bin;c:\go\bin;%PATH% - - choco upgrade golang --version 1.11.5 # Temporary fix because of a go.sum bug in 1.11 +stack: go 1.11 + +before_test: - go version - go env From 65310139a05de5c10077b75ac45eac743aa01214 Mon Sep 17 00:00:00 2001 From: rghetia Date: Wed, 11 Sep 2019 14:19:48 -0700 Subject: [PATCH 203/212] Remove Gopkg.toml (#1170) --- Gopkg.lock | 231 ----------------------------------------------------- Gopkg.toml | 36 --------- 2 files changed, 267 deletions(-) delete mode 100644 Gopkg.lock delete mode 100644 Gopkg.toml diff --git a/Gopkg.lock b/Gopkg.lock deleted file mode 100644 index 3be12ac8f..000000000 --- a/Gopkg.lock +++ /dev/null @@ -1,231 +0,0 @@ -# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. - - -[[projects]] - branch = "master" - digest = "1:eee9386329f4fcdf8d6c0def0c9771b634bdd5ba460d888aa98c17d59b37a76c" - name = "git.apache.org/thrift.git" - packages = ["lib/go/thrift"] - pruneopts = "UT" - revision = "6e67faa92827ece022380b211c2caaadd6145bf5" - source = "github.com/apache/thrift" - -[[projects]] - branch = "master" - digest = "1:d6afaeed1502aa28e80a4ed0981d570ad91b2579193404256ce672ed0a609e0d" - name = "github.com/beorn7/perks" - packages = ["quantile"] - pruneopts = "UT" - revision = "3a771d992973f24aa725d07868b467d1ddfceafb" - -[[projects]] - digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" - name = "github.com/golang/protobuf" - packages = [ - "proto", - "ptypes", - "ptypes/any", - "ptypes/duration", - "ptypes/timestamp", - ] - pruneopts = "UT" - revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" - version = "v1.2.0" - -[[projects]] - digest = "1:ff5ebae34cfbf047d505ee150de27e60570e8c394b3b8fdbb720ff6ac71985fc" - name = "github.com/matttproud/golang_protobuf_extensions" - packages = ["pbutil"] - pruneopts = "UT" - revision = "c12348ce28de40eed0136aa2b644d0ee0650e56c" - version = "v1.0.1" - -[[projects]] - digest = "1:824c8f3aa4c5f23928fa84ebbd5ed2e9443b3f0cb958a40c1f2fbed5cf5e64b1" - name = "github.com/openzipkin/zipkin-go" - packages = [ - ".", - "idgenerator", - "model", - "propagation", - "reporter", - "reporter/http", - ] - pruneopts = "UT" - revision = "d455a5674050831c1e187644faa4046d653433c2" - version = "v0.1.1" - -[[projects]] - digest = "1:d14a5f4bfecf017cb780bdde1b6483e5deb87e12c332544d2c430eda58734bcb" - name = "github.com/prometheus/client_golang" - packages = [ - "prometheus", - "prometheus/promhttp", - ] - pruneopts = "UT" - revision = "c5b7fccd204277076155f10851dad72b76a49317" - version = "v0.8.0" - -[[projects]] - branch = "master" - digest = "1:2d5cd61daa5565187e1d96bae64dbbc6080dacf741448e9629c64fd93203b0d4" - name = "github.com/prometheus/client_model" - packages = ["go"] - pruneopts = "UT" - revision = "5c3871d89910bfb32f5fcab2aa4b9ec68e65a99f" - -[[projects]] - branch = "master" - digest = "1:63b68062b8968092eb86bedc4e68894bd096ea6b24920faca8b9dcf451f54bb5" - name = "github.com/prometheus/common" - packages = [ - "expfmt", - "internal/bitbucket.org/ww/goautoneg", - "model", - ] - pruneopts = "UT" - revision = "c7de2306084e37d54b8be01f3541a8464345e9a5" - -[[projects]] - branch = "master" - digest = "1:8c49953a1414305f2ff5465147ee576dd705487c35b15918fcd4efdc0cb7a290" - name = "github.com/prometheus/procfs" - packages = [ - ".", - "internal/util", - "nfs", - "xfs", - ] - pruneopts = "UT" - revision = "05ee40e3a273f7245e8777337fc7b46e533a9a92" - -[[projects]] - branch = "master" - digest = "1:deafe4ab271911fec7de5b693d7faae3f38796d9eb8622e2b9e7df42bb3dfea9" - name = "golang.org/x/net" - packages = [ - "context", - "http/httpguts", - "http2", - "http2/hpack", - "idna", - "internal/timeseries", - "trace", - ] - pruneopts = "UT" - revision = "922f4815f713f213882e8ef45e0d315b164d705c" - -[[projects]] - branch = "master" - digest = "1:e0140c0c868c6e0f01c0380865194592c011fe521d6e12d78bfd33e756fe018a" - name = "golang.org/x/sync" - packages = ["semaphore"] - pruneopts = "UT" - revision = "1d60e4601c6fd243af51cc01ddf169918a5407ca" - -[[projects]] - branch = "master" - digest = "1:a3f00ac457c955fe86a41e1495e8f4c54cb5399d609374c5cc26aa7d72e542c8" - name = "golang.org/x/sys" - packages = ["unix"] - pruneopts = "UT" - revision = "3b58ed4ad3395d483fc92d5d14123ce2c3581fec" - -[[projects]] - digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" - name = "golang.org/x/text" - packages = [ - "collate", - "collate/build", - "internal/colltab", - "internal/gen", - "internal/tag", - "internal/triegen", - "internal/ucd", - "language", - "secure/bidirule", - "transform", - "unicode/bidi", - "unicode/cldr", - "unicode/norm", - "unicode/rangetable", - ] - pruneopts = "UT" - revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" - version = "v0.3.0" - -[[projects]] - branch = "master" - digest = "1:c0c17c94fe8bc1ab34e7f586a4a8b788c5e1f4f9f750ff23395b8b2f5a523530" - name = "google.golang.org/api" - packages = ["support/bundler"] - pruneopts = "UT" - revision = "e21acd801f91da814261b938941d193bb036441a" - -[[projects]] - branch = "master" - digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" - name = "google.golang.org/genproto" - packages = ["googleapis/rpc/status"] - pruneopts = "UT" - revision = "c66870c02cf823ceb633bcd05be3c7cda29976f4" - -[[projects]] - digest = "1:3dd7996ce6bf52dec6a2f69fa43e7c4cefea1d4dfa3c8ab7a5f8a9f7434e239d" - name = "google.golang.org/grpc" - packages = [ - ".", - "balancer", - "balancer/base", - "balancer/roundrobin", - "codes", - "connectivity", - "credentials", - "encoding", - "encoding/proto", - "grpclog", - "internal", - "internal/backoff", - "internal/channelz", - "internal/envconfig", - "internal/grpcrand", - "internal/transport", - "keepalive", - "metadata", - "naming", - "peer", - "resolver", - "resolver/dns", - "resolver/passthrough", - "stats", - "status", - "tap", - ] - pruneopts = "UT" - revision = "32fb0ac620c32ba40a4626ddf94d90d12cce3455" - version = "v1.14.0" - -[solve-meta] - analyzer-name = "dep" - analyzer-version = 1 - input-imports = [ - "git.apache.org/thrift.git/lib/go/thrift", - "github.com/golang/protobuf/proto", - "github.com/openzipkin/zipkin-go", - "github.com/openzipkin/zipkin-go/model", - "github.com/openzipkin/zipkin-go/reporter", - "github.com/openzipkin/zipkin-go/reporter/http", - "github.com/prometheus/client_golang/prometheus", - "github.com/prometheus/client_golang/prometheus/promhttp", - "golang.org/x/net/context", - "golang.org/x/net/http2", - "google.golang.org/api/support/bundler", - "google.golang.org/grpc", - "google.golang.org/grpc/codes", - "google.golang.org/grpc/grpclog", - "google.golang.org/grpc/metadata", - "google.golang.org/grpc/stats", - "google.golang.org/grpc/status", - ] - solver-name = "gps-cdcl" - solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml deleted file mode 100644 index a9f3cd68e..000000000 --- a/Gopkg.toml +++ /dev/null @@ -1,36 +0,0 @@ -# For v0.x.y dependencies, prefer adding a constraints of the form: version=">= 0.x.y" -# to avoid locking to a particular minor version which can cause dep to not be -# able to find a satisfying dependency graph. - -[[constraint]] - branch = "master" - name = "git.apache.org/thrift.git" - source = "github.com/apache/thrift" - -[[constraint]] - name = "github.com/golang/protobuf" - version = "1.0.0" - -[[constraint]] - name = "github.com/openzipkin/zipkin-go" - version = ">=0.1.0" - -[[constraint]] - name = "github.com/prometheus/client_golang" - version = ">=0.8.0" - -[[constraint]] - branch = "master" - name = "golang.org/x/net" - -[[constraint]] - branch = "master" - name = "google.golang.org/api" - -[[constraint]] - name = "google.golang.org/grpc" - version = "1.11.3" - -[prune] - go-tests = true - unused-packages = true From d7af601eacbd8e41ece0b1a9fbb9040c844d8fa4 Mon Sep 17 00:00:00 2001 From: Rey Abolofia Date: Wed, 18 Sep 2019 21:19:52 -0700 Subject: [PATCH 204/212] Add New Relic to list of available exporters. (#1172) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index a8cd09eaf..0ee4ae953 100644 --- a/README.md +++ b/README.md @@ -57,6 +57,7 @@ can implement their own exporters by implementing the exporter interfaces * [Datadog][exporter-datadog] for stats and traces * [Graphite][exporter-graphite] for stats * [Honeycomb][exporter-honeycomb] for traces +* [New Relic][exporter-newrelic] for stats and traces ## Overview @@ -261,3 +262,4 @@ release in which the functionality was marked *Deprecated*. [exporter-datadog]: https://github.com/DataDog/opencensus-go-exporter-datadog [exporter-graphite]: https://github.com/census-ecosystem/opencensus-go-exporter-graphite [exporter-honeycomb]: https://github.com/honeycombio/opencensus-exporter +[exporter-newrelic]: https://github.com/newrelic/newrelic-opencensus-exporter-go From df0549d970e2a64236cca48fa6a76f0d321c8254 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 20 Sep 2019 10:52:03 -0700 Subject: [PATCH 205/212] add opencensus and opentracing merger note (#1175) --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 0ee4ae953..1d7e83711 100644 --- a/README.md +++ b/README.md @@ -9,6 +9,8 @@ OpenCensus Go is a Go implementation of OpenCensus, a toolkit for collecting application performance and behavior monitoring data. Currently it consists of three major components: tags, stats and tracing. +#### OpenCensus and OpenTracing have merged to form OpenTelemetry, which serves as the next major version of OpenCensus and OpenTracing. OpenTelemetry will offer backwards compatibility with existing OpenCensus integrations, and we will continue to make security patches to existing OpenCensus libraries for two years. Read more about the merger [here](https://medium.com/opentracing/a-roadmap-to-convergence-b074e5815289). + ## Installation ``` From f58a71790be543eb35437f28370e9817b5d092bb Mon Sep 17 00:00:00 2001 From: Tomas Celaya Date: Mon, 30 Sep 2019 21:32:43 -0700 Subject: [PATCH 206/212] Allow overriding health endpoint check in handler (#1177) * Allow overriding health endpoint check in handler * Clarify fallback * Clarify godoc for IsHealthEndpoint * Combine TestIgnoreHealthz and TestHandlerIsHealthEndpoint into a single test TestIgnoreHealthEndpoints, and add cases * Even more explicit godoc * IsHealthEndpointFunc now accepts the http.Request --- plugin/ochttp/server.go | 8 +++- plugin/ochttp/server_test.go | 79 +++++++++++++++++++++++------------- 2 files changed, 57 insertions(+), 30 deletions(-) diff --git a/plugin/ochttp/server.go b/plugin/ochttp/server.go index dc6563a15..c7ea64235 100644 --- a/plugin/ochttp/server.go +++ b/plugin/ochttp/server.go @@ -70,6 +70,12 @@ type Handler struct { // from the information found in the incoming HTTP Request. By default the // name equals the URL Path. FormatSpanName func(*http.Request) string + + // IsHealthEndpoint holds the function to use for determining if the + // incoming HTTP request should be considered a health check. This is in + // addition to the private isHealthEndpoint func which may also indicate + // tracing should be skipped. + IsHealthEndpoint func(*http.Request) bool } func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -87,7 +93,7 @@ func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } func (h *Handler) startTrace(w http.ResponseWriter, r *http.Request) (*http.Request, func()) { - if isHealthEndpoint(r.URL.Path) { + if h.IsHealthEndpoint != nil && h.IsHealthEndpoint(r) || isHealthEndpoint(r.URL.Path) { return r, func() {} } var name string diff --git a/plugin/ochttp/server_test.go b/plugin/ochttp/server_test.go index 4cca1bcb5..6e4ed5fbb 100644 --- a/plugin/ochttp/server_test.go +++ b/plugin/ochttp/server_test.go @@ -555,40 +555,61 @@ func TestHandlerImplementsHTTPCloseNotify(t *testing.T) { } } -func TestIgnoreHealthz(t *testing.T) { - var spans int +func testHealthEndpointSkipArray(r *http.Request) bool { + for _, toSkip := range []string{"/health", "/metrics"} { + if r.URL.Path == toSkip { + return true + } + } + return false +} - ts := httptest.NewServer(&Handler{ - Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - span := trace.FromContext(r.Context()) - if span != nil { - spans++ - } - fmt.Fprint(w, "ok") - }), - StartOptions: trace.StartOptions{ - Sampler: trace.AlwaysSample(), - }, - }) - defer ts.Close() +func TestIgnoreHealthEndpoints(t *testing.T) { + var spans int client := &http.Client{} + tests := []struct { + path string + healthEndpointFunc func(*http.Request) bool + }{ + {"/healthz", nil}, + {"/_ah/health", nil}, + {"/healthz", testHealthEndpointSkipArray}, + {"/_ah/health", testHealthEndpointSkipArray}, + {"/health", testHealthEndpointSkipArray}, + {"/metrics", testHealthEndpointSkipArray}, + } + for _, tt := range tests { + t.Run(tt.path, func(t *testing.T) { + ts := httptest.NewServer(&Handler{ + Handler: http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := trace.FromContext(r.Context()) + if span != nil { + spans++ + } + fmt.Fprint(w, "ok") + }), + StartOptions: trace.StartOptions{ + Sampler: trace.AlwaysSample(), + }, + IsHealthEndpoint: tt.healthEndpointFunc, + }) + defer ts.Close() - for _, path := range []string{"/healthz", "/_ah/health"} { - resp, err := client.Get(ts.URL + path) - if err != nil { - t.Fatalf("Cannot GET %q: %v", path, err) - } - - b, err := ioutil.ReadAll(resp.Body) - if err != nil { - t.Fatalf("Cannot read body for %q: %v", path, err) - } + resp, err := client.Get(ts.URL + tt.path) + if err != nil { + t.Fatalf("Cannot GET %q: %v", tt.path, err) + } + b, err := ioutil.ReadAll(resp.Body) + if err != nil { + t.Fatalf("Cannot read body for %q: %v", tt.path, err) + } - if got, want := string(b), "ok"; got != want { - t.Fatalf("Body for %q = %q; want %q", path, got, want) - } - resp.Body.Close() + if got, want := string(b), "ok"; got != want { + t.Fatalf("Body for %q = %q; want %q", tt.path, got, want) + } + resp.Body.Close() + }) } if spans > 0 { From fa651b05963cfb6060755dc887e7d156ba66e792 Mon Sep 17 00:00:00 2001 From: Brian Hoffmann Date: Tue, 1 Oct 2019 06:45:06 +0200 Subject: [PATCH 207/212] Add runtime metrics support (#1156) * Add runtime metrics support * Rename Options to RunMetricOptions * Make runmetrics producer registration easier with Enable/Disable * Rename and cleanup metric names --- go.mod | 2 + go.sum | 10 + plugin/runmetrics/doc.go | 23 +++ plugin/runmetrics/example_test.go | 77 ++++++++ plugin/runmetrics/producer.go | 290 +++++++++++++++++++++++++++++ plugin/runmetrics/producer_test.go | 177 ++++++++++++++++++ 6 files changed, 579 insertions(+) create mode 100644 plugin/runmetrics/doc.go create mode 100644 plugin/runmetrics/example_test.go create mode 100644 plugin/runmetrics/producer.go create mode 100644 plugin/runmetrics/producer_test.go diff --git a/go.mod b/go.mod index 7c1886e9e..139157cd3 100644 --- a/go.mod +++ b/go.mod @@ -4,9 +4,11 @@ require ( github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 github.com/golang/protobuf v1.3.1 github.com/google/go-cmp v0.3.0 + github.com/stretchr/testify v1.4.0 golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect + google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/go.sum b/go.sum index 212b6b73b..ed2a1d844 100644 --- a/go.sum +++ b/go.sum @@ -1,6 +1,8 @@ cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6 h1:ZgQEtGgCBiWRM39fZuwSd1LwSqqSW0hOdXCYYDX0R3I= @@ -12,6 +14,11 @@ github.com/golang/protobuf v1.3.1 h1:YF8+flBXS5eO826T4nzqPrxfhQThhXl0YzfuUPu4SBg github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/google/go-cmp v0.3.0 h1:crn/baboCvb5fXaQ0IJ1SGTsTVrWpDsCWC8EGETZijY= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -60,4 +67,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/plugin/runmetrics/doc.go b/plugin/runmetrics/doc.go new file mode 100644 index 000000000..2bb53d4c7 --- /dev/null +++ b/plugin/runmetrics/doc.go @@ -0,0 +1,23 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package runmetrics contains support for runtime metrics. +// +// To enable collecting runtime metrics, just call Enable(): +// +// _ := runmetrics.Enable(runmetrics.RunMetricOptions{ +// EnableCPU: true, +// EnableMemory: true, +// }) +package runmetrics // import "go.opencensus.io/plugin/runmetrics" diff --git a/plugin/runmetrics/example_test.go b/plugin/runmetrics/example_test.go new file mode 100644 index 000000000..7cf8dbe03 --- /dev/null +++ b/plugin/runmetrics/example_test.go @@ -0,0 +1,77 @@ +package runmetrics_test + +import ( + "context" + "fmt" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/plugin/runmetrics" + "log" + "sort" +) + +type printExporter struct { +} + +func (l *printExporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { + mapData := make(map[string]metricdata.Metric, 0) + + for _, v := range data { + mapData[v.Descriptor.Name] = *v + } + + mapKeys := make([]string, 0, len(mapData)) + for key := range mapData { + mapKeys = append(mapKeys, key) + } + sort.Strings(mapKeys) + + // for the sake of a simple example, we cannot use the real value here + simpleVal := func(v interface{}) int { return 42 } + + for _, k := range mapKeys { + v := mapData[k] + fmt.Printf("%s %d\n", k, simpleVal(v.TimeSeries[0].Points[0].Value)) + } + + return nil +} + +func ExampleEnable() { + + // Enable collection of runtime metrics and supply options + err := runmetrics.Enable(runmetrics.RunMetricOptions{ + EnableCPU: true, + EnableMemory: true, + Prefix: "mayapp/", + }) + if err != nil { + log.Fatal(err) + } + + // Use your reader/exporter to extract values + // This part is not specific to runtime metrics and only here to make it a complete example. + metricexport.NewReader().ReadAndExport(&printExporter{}) + + // output: + // mayapp/process/cpu_cgo_calls 42 + // mayapp/process/cpu_goroutines 42 + // mayapp/process/heap_alloc 42 + // mayapp/process/heap_idle 42 + // mayapp/process/heap_inuse 42 + // mayapp/process/heap_objects 42 + // mayapp/process/heap_release 42 + // mayapp/process/memory_alloc 42 + // mayapp/process/memory_frees 42 + // mayapp/process/memory_lookups 42 + // mayapp/process/memory_malloc 42 + // mayapp/process/stack_inuse 42 + // mayapp/process/stack_mcache_inuse 42 + // mayapp/process/stack_mspan_inuse 42 + // mayapp/process/sys_heap 42 + // mayapp/process/sys_memory_alloc 42 + // mayapp/process/sys_stack 42 + // mayapp/process/sys_stack_mcache 42 + // mayapp/process/sys_stack_mspan 42 + // mayapp/process/total_memory_alloc 42 +} diff --git a/plugin/runmetrics/producer.go b/plugin/runmetrics/producer.go new file mode 100644 index 000000000..eb307fea9 --- /dev/null +++ b/plugin/runmetrics/producer.go @@ -0,0 +1,290 @@ +package runmetrics + +import ( + "errors" + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricproducer" + "runtime" + "sync" +) + +type ( + // producer produces runtime metrics. + // + // Enable collection of runtime metrics with Enable(). + producer struct { + options RunMetricOptions + reg *metric.Registry + + memStats *memStats + cpuStats *cpuStats + } + + // RunMetricOptions allows to configure runtime metrics. + RunMetricOptions struct { + EnableCPU bool // EnableCPU whether CPU metrics shall be recorded + EnableMemory bool // EnableMemory whether memory metrics shall be recorded + Prefix string // Prefix is a custom prefix for metric names + } + + memStats struct { + memStats runtime.MemStats + + memAlloc *metric.Int64GaugeEntry + memTotal *metric.Int64GaugeEntry + memSys *metric.Int64GaugeEntry + memLookups *metric.Int64GaugeEntry + memMalloc *metric.Int64GaugeEntry + memFrees *metric.Int64GaugeEntry + + heapAlloc *metric.Int64GaugeEntry + heapSys *metric.Int64GaugeEntry + heapIdle *metric.Int64GaugeEntry + heapInuse *metric.Int64GaugeEntry + heapObjects *metric.Int64GaugeEntry + heapReleased *metric.Int64GaugeEntry + + stackInuse *metric.Int64GaugeEntry + stackSys *metric.Int64GaugeEntry + stackMSpanInuse *metric.Int64GaugeEntry + stackMSpanSys *metric.Int64GaugeEntry + stackMCacheInuse *metric.Int64GaugeEntry + stackMCacheSys *metric.Int64GaugeEntry + } + + cpuStats struct { + numGoroutines *metric.Int64GaugeEntry + numCgoCalls *metric.Int64GaugeEntry + } +) + +var ( + _ metricproducer.Producer = (*producer)(nil) + enableMutex sync.Mutex + enabledProducer *producer +) + +// Enable enables collection of runtime metrics. +// +// Supply RunMetricOptions to configure the behavior of metrics collection. +// An error might be returned, if creating metrics gauges fails. +// +// Previous calls will be overwritten by subsequent ones. +func Enable(options RunMetricOptions) error { + producer := &producer{options: options, reg: metric.NewRegistry()} + var err error + + if options.EnableMemory { + producer.memStats, err = newMemStats(producer) + if err != nil { + return err + } + } + + if options.EnableCPU { + producer.cpuStats, err = newCPUStats(producer) + if err != nil { + return err + } + } + + enableMutex.Lock() + defer enableMutex.Unlock() + + metricproducer.GlobalManager().DeleteProducer(enabledProducer) + metricproducer.GlobalManager().AddProducer(producer) + enabledProducer = producer + + return nil +} + +// Disable disables collection of runtime metrics. +func Disable() { + enableMutex.Lock() + defer enableMutex.Unlock() + + metricproducer.GlobalManager().DeleteProducer(enabledProducer) + enabledProducer = nil +} + +// Read reads the current runtime metrics. +func (p *producer) Read() []*metricdata.Metric { + if p.memStats != nil { + p.memStats.read() + } + + if p.cpuStats != nil { + p.cpuStats.read() + } + + return p.reg.Read() +} + +func newMemStats(producer *producer) (*memStats, error) { + var err error + memStats := &memStats{} + + // General + memStats.memAlloc, err = producer.createInt64GaugeEntry("process/memory_alloc", "Number of bytes currently allocated in use", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.memTotal, err = producer.createInt64GaugeEntry("process/total_memory_alloc", "Number of allocations in total", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.memSys, err = producer.createInt64GaugeEntry("process/sys_memory_alloc", "Number of bytes given to the process to use in total", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.memLookups, err = producer.createInt64GaugeEntry("process/memory_lookups", "Number of pointer lookups performed by the runtime", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + memStats.memMalloc, err = producer.createInt64GaugeEntry("process/memory_malloc", "Cumulative count of heap objects allocated", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + memStats.memFrees, err = producer.createInt64GaugeEntry("process/memory_frees", "Cumulative count of heap objects freed", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + // Heap + memStats.heapAlloc, err = producer.createInt64GaugeEntry("process/heap_alloc", "Process heap allocation", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.heapSys, err = producer.createInt64GaugeEntry("process/sys_heap", "Bytes of heap memory obtained from the OS", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.heapIdle, err = producer.createInt64GaugeEntry("process/heap_idle", "Bytes in idle (unused) spans", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.heapInuse, err = producer.createInt64GaugeEntry("process/heap_inuse", "Bytes in in-use spans", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.heapObjects, err = producer.createInt64GaugeEntry("process/heap_objects", "The number of objects allocated on the heap", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + memStats.heapReleased, err = producer.createInt64GaugeEntry("process/heap_release", "The number of objects released from the heap", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + // Stack + memStats.stackInuse, err = producer.createInt64GaugeEntry("process/stack_inuse", "Bytes in stack spans", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.stackSys, err = producer.createInt64GaugeEntry("process/sys_stack", "The memory used by stack spans and OS thread stacks", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.stackMSpanInuse, err = producer.createInt64GaugeEntry("process/stack_mspan_inuse", "Bytes of allocated mspan structures", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.stackMSpanSys, err = producer.createInt64GaugeEntry("process/sys_stack_mspan", "Bytes of memory obtained from the OS for mspan structures", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.stackMCacheInuse, err = producer.createInt64GaugeEntry("process/stack_mcache_inuse", "Bytes of allocated mcache structures", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + memStats.stackMCacheSys, err = producer.createInt64GaugeEntry("process/sys_stack_mcache", "Bytes of memory obtained from the OS for mcache structures", metricdata.UnitBytes) + if err != nil { + return nil, err + } + + return memStats, nil +} + +func (m *memStats) read() { + runtime.ReadMemStats(&m.memStats) + + m.memAlloc.Set(int64(m.memStats.Alloc)) + m.memTotal.Set(int64(m.memStats.TotalAlloc)) + m.memSys.Set(int64(m.memStats.Sys)) + m.memLookups.Set(int64(m.memStats.Lookups)) + m.memMalloc.Set(int64(m.memStats.Mallocs)) + m.memFrees.Set(int64(m.memStats.Frees)) + + m.heapAlloc.Set(int64(m.memStats.HeapAlloc)) + m.heapSys.Set(int64(m.memStats.HeapSys)) + m.heapIdle.Set(int64(m.memStats.HeapIdle)) + m.heapInuse.Set(int64(m.memStats.HeapInuse)) + m.heapReleased.Set(int64(m.memStats.HeapReleased)) + m.heapObjects.Set(int64(m.memStats.HeapObjects)) + + m.stackInuse.Set(int64(m.memStats.StackInuse)) + m.stackSys.Set(int64(m.memStats.StackSys)) + m.stackMSpanInuse.Set(int64(m.memStats.MSpanInuse)) + m.stackMSpanSys.Set(int64(m.memStats.MSpanSys)) + m.stackMCacheInuse.Set(int64(m.memStats.MCacheInuse)) + m.stackMCacheSys.Set(int64(m.memStats.MCacheSys)) +} + +func newCPUStats(producer *producer) (*cpuStats, error) { + cpuStats := &cpuStats{} + var err error + + cpuStats.numGoroutines, err = producer.createInt64GaugeEntry("process/cpu_goroutines", "Number of goroutines that currently exist", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + cpuStats.numCgoCalls, err = producer.createInt64GaugeEntry("process/cpu_cgo_calls", "Number of cgo calls made by the current process", metricdata.UnitDimensionless) + if err != nil { + return nil, err + } + + return cpuStats, nil +} + +func (c *cpuStats) read() { + c.numGoroutines.Set(int64(runtime.NumGoroutine())) + c.numCgoCalls.Set(runtime.NumCgoCall()) +} + +func (p *producer) createInt64GaugeEntry(name string, description string, unit metricdata.Unit) (*metric.Int64GaugeEntry, error) { + if len(p.options.Prefix) > 0 { + name = p.options.Prefix + name + } + + gauge, err := p.reg.AddInt64Gauge( + name, + metric.WithDescription(description), + metric.WithUnit(unit)) + if err != nil { + return nil, errors.New("error creating gauge for " + name + ": " + err.Error()) + } + + entry, err := gauge.GetEntry() + if err != nil { + return nil, errors.New("error getting gauge entry for " + name + ": " + err.Error()) + } + + return entry, nil +} diff --git a/plugin/runmetrics/producer_test.go b/plugin/runmetrics/producer_test.go new file mode 100644 index 000000000..a89c1a55e --- /dev/null +++ b/plugin/runmetrics/producer_test.go @@ -0,0 +1,177 @@ +package runmetrics_test + +import ( + "context" + "github.com/stretchr/testify/assert" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/metric/metricproducer" + "go.opencensus.io/plugin/runmetrics" + "testing" +) + +type testExporter struct { + data []*metricdata.Metric +} + +func (t *testExporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { + t.data = append(t.data, data...) + return nil +} + +func TestEnable(t *testing.T) { + tests := []struct { + name string + options runmetrics.RunMetricOptions + wantMetricNames [][]string + dontWantMetricNames [][]string + }{ + { + "no stats", + runmetrics.RunMetricOptions{ + EnableCPU: false, + EnableMemory: false, + }, + [][]string{}, + [][]string{}, + }, + { + "cpu and memory stats", + runmetrics.RunMetricOptions{ + EnableCPU: true, + EnableMemory: true, + }, + [][]string{ + {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, + {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, + {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, + {"process/cpu_goroutines", "process/cpu_cgo_calls"}, + }, + [][]string{}, + }, + { + "only cpu stats", + runmetrics.RunMetricOptions{ + EnableCPU: true, + EnableMemory: false, + }, + [][]string{ + {"process/cpu_goroutines", "process/cpu_cgo_calls"}, + }, + [][]string{ + {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, + {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, + {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, + }, + }, + { + "only memory stats", + runmetrics.RunMetricOptions{ + EnableCPU: false, + EnableMemory: true, + }, + [][]string{ + {"process/memory_alloc", "process/total_memory_alloc", "process/sys_memory_alloc", "process/memory_lookups", "process/memory_malloc", "process/memory_frees"}, + {"process/heap_alloc", "process/sys_heap", "process/heap_idle", "process/heap_inuse", "process/heap_objects", "process/heap_release"}, + {"process/stack_inuse", "process/sys_stack", "process/stack_mspan_inuse", "process/sys_stack_mspan", "process/stack_mcache_inuse", "process/sys_stack_mcache"}, + }, + [][]string{ + {"process/cpu_goroutines", "process/cpu_cgo_calls"}, + }, + }, + { + "cpu and memory stats with custom prefix", + runmetrics.RunMetricOptions{ + EnableCPU: true, + EnableMemory: true, + Prefix: "test_", + }, + [][]string{ + {"test_process/memory_alloc", "test_process/total_memory_alloc", "test_process/sys_memory_alloc", "test_process/memory_lookups", "test_process/memory_malloc", "test_process/memory_frees"}, + {"test_process/heap_alloc", "test_process/sys_heap", "test_process/heap_idle", "test_process/heap_inuse", "test_process/heap_objects", "test_process/heap_release"}, + {"test_process/stack_inuse", "test_process/sys_stack", "test_process/stack_mspan_inuse", "test_process/sys_stack_mspan", "test_process/stack_mcache_inuse", "test_process/sys_stack_mcache"}, + {"test_process/cpu_goroutines", "test_process/cpu_cgo_calls"}, + }, + [][]string{}, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + + err := runmetrics.Enable(test.options) + + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + + defer runmetrics.Disable() + + exporter := &testExporter{} + reader := metricexport.NewReader() + reader.ReadAndExport(exporter) + + for _, want := range test.wantMetricNames { + assertNames(t, true, exporter, want) + } + + for _, dontWant := range test.dontWantMetricNames { + assertNames(t, false, exporter, dontWant) + } + }) + } +} + +func assertNames(t *testing.T, wantIncluded bool, exporter *testExporter, expectedNames []string) { + t.Helper() + + metricNames := make([]string, 0) + for _, v := range exporter.data { + metricNames = append(metricNames, v.Descriptor.Name) + } + + for _, want := range expectedNames { + if wantIncluded { + assert.Contains(t, metricNames, want) + } else { + assert.NotContains(t, metricNames, want) + } + } +} + +func TestEnable_RegistersWithGlobalManager(t *testing.T) { + err := runmetrics.Enable(runmetrics.RunMetricOptions{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + + registeredCount := len(metricproducer.GlobalManager().GetAll()) + assert.Equal(t, 1, registeredCount, "expected a producer to be registered") +} + +func TestEnable_RegistersNoDuplicates(t *testing.T) { + err := runmetrics.Enable(runmetrics.RunMetricOptions{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + + err = runmetrics.Enable(runmetrics.RunMetricOptions{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + + producerCount := len(metricproducer.GlobalManager().GetAll()) + assert.Equal(t, 1, producerCount, "expected one registered producer") +} + +func TestDisable(t *testing.T) { + err := runmetrics.Enable(runmetrics.RunMetricOptions{}) + if err != nil { + t.Errorf("want: nil, got: %v", err) + } + + runmetrics.Disable() + + producerCount := len(metricproducer.GlobalManager().GetAll()) + assert.Equal(t, 0, producerCount, "expected one registered producer") +} From 3b5a343282fe4b4fccdb0f24cbd1d7169d20858a Mon Sep 17 00:00:00 2001 From: Javier Kohen Date: Tue, 15 Oct 2019 15:20:41 -0400 Subject: [PATCH 208/212] Added seconds unit (#1179) --- stats/units.go | 1 + 1 file changed, 1 insertion(+) diff --git a/stats/units.go b/stats/units.go index 6931a5f29..736399652 100644 --- a/stats/units.go +++ b/stats/units.go @@ -22,4 +22,5 @@ const ( UnitDimensionless = "1" UnitBytes = "By" UnitMilliseconds = "ms" + UnitSeconds = "s" ) From aad2c527c5defcf89b5afab7f37274304195a6b2 Mon Sep 17 00:00:00 2001 From: rghetia Date: Fri, 8 Nov 2019 16:00:05 -0800 Subject: [PATCH 209/212] exclude zero bucket from aggregation_data (#1183) * exclude zero bucket from aggregation_data * fix error string in test file. * add one more testcase. --- go.mod | 1 - stats/view/aggregation.go | 9 +- stats/view/aggregation_data.go | 6 +- stats/view/aggregation_data_test.go | 10 ++- stats/view/view_to_metric_test.go | 134 ++++++++++++++++++++++++++++ 5 files changed, 150 insertions(+), 10 deletions(-) diff --git a/go.mod b/go.mod index 139157cd3..c867df5f5 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect - google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/stats/view/aggregation.go b/stats/view/aggregation.go index 8bd25314e..9d7093728 100644 --- a/stats/view/aggregation.go +++ b/stats/view/aggregation.go @@ -99,13 +99,14 @@ func Sum() *Aggregation { // If len(bounds) is 1 then there is no finite buckets, and that single // element is the common boundary of the overflow and underflow buckets. func Distribution(bounds ...float64) *Aggregation { - return &Aggregation{ + agg := &Aggregation{ Type: AggTypeDistribution, Buckets: bounds, - newData: func() AggregationData { - return newDistributionData(bounds) - }, } + agg.newData = func() AggregationData { + return newDistributionData(agg) + } + return agg } // LastValue only reports the last value recorded using this diff --git a/stats/view/aggregation_data.go b/stats/view/aggregation_data.go index d500e67f7..f331d456e 100644 --- a/stats/view/aggregation_data.go +++ b/stats/view/aggregation_data.go @@ -128,12 +128,12 @@ type DistributionData struct { bounds []float64 // histogram distribution of the values } -func newDistributionData(bounds []float64) *DistributionData { - bucketCount := len(bounds) + 1 +func newDistributionData(agg *Aggregation) *DistributionData { + bucketCount := len(agg.Buckets) + 1 return &DistributionData{ CountPerBucket: make([]int64, bucketCount), ExemplarsPerBucket: make([]*metricdata.Exemplar, bucketCount), - bounds: bounds, + bounds: agg.Buckets, Min: math.MaxFloat64, Max: math.SmallestNonzeroFloat64, } diff --git a/stats/view/aggregation_data_test.go b/stats/view/aggregation_data_test.go index a7e056752..7d09a8fe4 100644 --- a/stats/view/aggregation_data_test.go +++ b/stats/view/aggregation_data_test.go @@ -26,7 +26,10 @@ import ( ) func TestDataClone(t *testing.T) { - dist := newDistributionData([]float64{1, 2, 3, 4}) + agg := &Aggregation{ + Buckets: []float64{1, 2, 3, 4}, + } + dist := newDistributionData(agg) dist.Count = 7 dist.Max = 11 dist.Min = 1 @@ -66,7 +69,10 @@ func TestDataClone(t *testing.T) { } func TestDistributionData_addSample(t *testing.T) { - dd := newDistributionData([]float64{1, 2}) + agg := &Aggregation{ + Buckets: []float64{1, 2}, + } + dd := newDistributionData(agg) attachments1 := map[string]interface{}{"key1": "value1"} t1 := time.Now() dd.addSample(0.5, attachments1, t1) diff --git a/stats/view/view_to_metric_test.go b/stats/view/view_to_metric_test.go index 6c82fb9dc..18c877117 100644 --- a/stats/view/view_to_metric_test.go +++ b/stats/view/view_to_metric_test.go @@ -24,6 +24,7 @@ import ( "github.com/google/go-cmp/cmp" "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" "go.opencensus.io/stats" "go.opencensus.io/tag" ) @@ -516,6 +517,139 @@ func TestUnitConversionForAggCount(t *testing.T) { } } +type mockExp struct { + metrics []*metricdata.Metric +} + +func (me *mockExp) ExportMetrics(ctx context.Context, metrics []*metricdata.Metric) error { + me.metrics = append(me.metrics, metrics...) + return nil +} + +var _ metricexport.Exporter = (*mockExp)(nil) + +func TestViewToMetric_OutOfOrderWithZeroBuckets(t *testing.T) { + m := stats.Int64("OutOfOrderWithZeroBuckets", "", "") + now := time.Now() + tts := []struct { + v *View + m *metricdata.Metric + }{ + { + v: &View{ + Name: m.Name() + "_order1", + Measure: m, + Aggregation: Distribution(10, 0, 2), + }, + m: &metricdata.Metric{ + Descriptor: metricdata.Descriptor{ + Name: "OutOfOrderWithZeroBuckets_order1", + Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, + LabelKeys: []metricdata.LabelKey{}, + }, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + {Value: &metricdata.Distribution{ + Count: 3, + Sum: 9.0, + SumOfSquaredDeviation: 8, + BucketOptions: &metricdata.BucketOptions{ + Bounds: []float64{2, 10}, + }, + Buckets: []metricdata.Bucket{ + {Count: 1, Exemplar: nil}, + {Count: 2, Exemplar: nil}, + {Count: 0, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + StartTime: now, + LabelValues: []metricdata.LabelValue{}, + }, + }, + }, + }, + { + v: &View{ + Name: m.Name() + "_order2", + Measure: m, + Aggregation: Distribution(0, 5, 10), + }, + m: &metricdata.Metric{ + Descriptor: metricdata.Descriptor{ + Name: "OutOfOrderWithZeroBuckets_order2", + Unit: metricdata.UnitDimensionless, + Type: metricdata.TypeCumulativeDistribution, + LabelKeys: []metricdata.LabelKey{}, + }, + TimeSeries: []*metricdata.TimeSeries{ + {Points: []metricdata.Point{ + {Value: &metricdata.Distribution{ + Count: 3, + Sum: 9.0, + SumOfSquaredDeviation: 8, + BucketOptions: &metricdata.BucketOptions{ + Bounds: []float64{5, 10}, + }, + Buckets: []metricdata.Bucket{ + {Count: 2, Exemplar: nil}, + {Count: 1, Exemplar: nil}, + {Count: 0, Exemplar: nil}, + }, + }, + Time: now, + }, + }, + StartTime: now, + LabelValues: []metricdata.LabelValue{}, + }, + }, + }, + }, + } + for _, tt := range tts { + err := Register(tt.v) + if err != nil { + t.Fatalf("error registering view %v, err: %v", tt.v, err) + } + + } + + stats.Record(context.Background(), m.M(5), m.M(1), m.M(3)) + time.Sleep(1 * time.Second) + + me := &mockExp{} + reader := metricexport.NewReader() + reader.ReadAndExport(me) + + var got *metricdata.Metric + lookup := func(vname string, metrics []*metricdata.Metric) *metricdata.Metric { + for _, m := range metrics { + if m.Descriptor.Name == vname { + return m + } + } + return nil + } + + for _, tt := range tts { + got = lookup(tt.v.Name, me.metrics) + if got == nil { + t.Fatalf("metric %s not found in %v\n", tt.v.Name, me.metrics) + } + got.TimeSeries[0].Points[0].Time = now + got.TimeSeries[0].StartTime = now + + want := tt.m + if diff := cmp.Diff(got, want); diff != "" { + t.Errorf("buckets differ -got +want: %s \n Serialized got %v\n, Serialized want %v\n", diff, serializeAsJSON(got), serializeAsJSON(want)) + } + } +} + func serializeAsJSON(v interface{}) string { blob, _ := json.MarshalIndent(v, "", " ") return string(blob) From 643eada29081047b355cfaa1ceb9bc307a10423c Mon Sep 17 00:00:00 2001 From: Javier Kohen Date: Wed, 4 Dec 2019 22:02:48 +0000 Subject: [PATCH 210/212] Added test exporter for use in unit tests. (#1185) * Added test exporter for use in unit tests. With this exporter one can write unit tests to verify that the instrumentation is working. See the included code example. * Clarified comment. * Fixed copyright date. * Added type assertion. * Checke key vs value length. * Added example for the metric package. * Improved API usage for derived metrics. --- go.mod | 1 + metric/test/doc.go | 17 ++++++ metric/test/exporter.go | 104 +++++++++++++++++++++++++++++++++++ metric/test/exporter_test.go | 96 ++++++++++++++++++++++++++++++++ 4 files changed, 218 insertions(+) create mode 100644 metric/test/doc.go create mode 100644 metric/test/exporter.go create mode 100644 metric/test/exporter_test.go diff --git a/go.mod b/go.mod index c867df5f5..139157cd3 100644 --- a/go.mod +++ b/go.mod @@ -8,6 +8,7 @@ require ( golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect + google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/metric/test/doc.go b/metric/test/doc.go new file mode 100644 index 000000000..4ebb2b9d8 --- /dev/null +++ b/metric/test/doc.go @@ -0,0 +1,17 @@ +// Copyright 2019, OpenCensus Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// + +// Package test for testing code instrumented with the metric and stats packages. +package test diff --git a/metric/test/exporter.go b/metric/test/exporter.go new file mode 100644 index 000000000..7b579356f --- /dev/null +++ b/metric/test/exporter.go @@ -0,0 +1,104 @@ +package test + +import ( + "context" + "fmt" + "sort" + "strings" + "time" + + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats/view" +) + +// Exporter keeps exported metric data in memory to aid in testing the instrumentation. +// +// Metrics can be retrieved with `GetPoint()`. In order to deterministically retrieve the most recent values, you must first invoke `ReadAndExport()`. +type Exporter struct { + // points is a map from a label signature to the latest value for the time series represented by the signature. + // Use function `labelSignature` to get a signature from a `metricdata.Metric`. + points map[string]metricdata.Point + metricReader *metricexport.Reader +} + +var _ metricexport.Exporter = &Exporter{} + +// NewExporter returns a new exporter. +func NewExporter(metricReader *metricexport.Reader) *Exporter { + return &Exporter{points: make(map[string]metricdata.Point), metricReader: metricReader} +} + +// ExportMetrics records the view data. +func (e *Exporter) ExportMetrics(ctx context.Context, data []*metricdata.Metric) error { + for _, metric := range data { + for _, ts := range metric.TimeSeries { + signature := labelSignature(metric.Descriptor.Name, labelObjectsToKeyValue(metric.Descriptor.LabelKeys, ts.LabelValues)) + e.points[signature] = ts.Points[len(ts.Points)-1] + } + } + return nil +} + +// GetPoint returns the latest point for the time series identified by the given labels. +func (e *Exporter) GetPoint(metricName string, labels map[string]string) (metricdata.Point, bool) { + v, ok := e.points[labelSignature(metricName, labelMapToKeyValue(labels))] + return v, ok +} + +// ReadAndExport reads the current values for all metrics and makes them available to this exporter. +func (e *Exporter) ReadAndExport() { + // The next line forces the view worker to process all stats.Record* calls that + // happened within Store() before the call to ReadAndExport below. This abuses the + // worker implementation to work around lack of synchronization. + // TODO(jkohen,rghetia): figure out a clean way to make this deterministic. + view.SetReportingPeriod(time.Minute) + e.metricReader.ReadAndExport(e) +} + +// String defines the ``native'' format for the exporter. +func (e *Exporter) String() string { + return fmt.Sprintf("points{%v}", e.points) +} + +type keyValue struct { + Key string + Value string +} + +func sortKeyValue(kv []keyValue) { + sort.Slice(kv, func(i, j int) bool { return kv[i].Key < kv[j].Key }) +} + +func labelMapToKeyValue(labels map[string]string) []keyValue { + kv := make([]keyValue, 0, len(labels)) + for k, v := range labels { + kv = append(kv, keyValue{Key: k, Value: v}) + } + sortKeyValue(kv) + return kv +} + +func labelObjectsToKeyValue(keys []metricdata.LabelKey, values []metricdata.LabelValue) []keyValue { + if len(keys) != len(values) { + panic("keys and values must have the same length") + } + kv := make([]keyValue, 0, len(values)) + for i := range keys { + if values[i].Present { + kv = append(kv, keyValue{Key: keys[i].Key, Value: values[i].Value}) + } + } + sortKeyValue(kv) + return kv +} + +// labelSignature returns a string that uniquely identifies the list of labels given in the input. +func labelSignature(metricName string, kv []keyValue) string { + var builder strings.Builder + for _, x := range kv { + builder.WriteString(x.Key) + builder.WriteString(x.Value) + } + return fmt.Sprintf("%s{%s}", metricName, builder.String()) +} diff --git a/metric/test/exporter_test.go b/metric/test/exporter_test.go new file mode 100644 index 000000000..70d3ecc67 --- /dev/null +++ b/metric/test/exporter_test.go @@ -0,0 +1,96 @@ +package test + +import ( + "context" + "fmt" + + "go.opencensus.io/metric" + "go.opencensus.io/metric/metricdata" + "go.opencensus.io/metric/metricexport" + "go.opencensus.io/stats" + "go.opencensus.io/stats/view" + "go.opencensus.io/tag" +) + +var ( + myTag = tag.MustNewKey("my_label") + myMetric = stats.Int64("my_metric", "description", stats.UnitDimensionless) +) + +func init() { + if err := view.Register( + &view.View{ + Measure: myMetric, + TagKeys: []tag.Key{myTag}, + Aggregation: view.Sum(), + }, + ); err != nil { + panic(err) + } +} + +func ExampleExporter_stats() { + metricReader := metricexport.NewReader() + metrics := NewExporter(metricReader) + metrics.ReadAndExport() + metricBase := getCounter(metrics, myMetric.Name(), newMetricKey("label1")) + + for i := 1; i <= 3; i++ { + // The code under test begins here. + stats.RecordWithTags(context.Background(), + []tag.Mutator{tag.Upsert(myTag, "label1")}, + myMetric.M(int64(i))) + // The code under test ends here. + + metrics.ReadAndExport() + metricValue := getCounter(metrics, myMetric.Name(), newMetricKey("label1")) + fmt.Printf("increased by %d\n", metricValue-metricBase) + } + // Output: + // increased by 1 + // increased by 3 + // increased by 6 +} + +type derivedMetric struct { + i int64 +} + +func (m *derivedMetric) ToInt64() int64 { + return m.i +} + +func ExampleExporter_metric() { + metricReader := metricexport.NewReader() + metrics := NewExporter(metricReader) + m := derivedMetric{} + r := metric.NewRegistry() + g, _ := r.AddInt64DerivedCumulative("derived", metric.WithLabelKeys(myTag.Name())) + g.UpsertEntry(m.ToInt64, metricdata.NewLabelValue("l1")) + for i := 1; i <= 3; i++ { + // The code under test begins here. + m.i = int64(i) + // The code under test ends here. + + metrics.ExportMetrics(context.Background(), r.Read()) + metricValue := getCounter(metrics, "derived", newMetricKey("l1")) + fmt.Println(metricValue) + } + // Output: + // 1 + // 2 + // 3 +} + +func newMetricKey(v string) map[string]string { + return map[string]string{myTag.Name(): v} +} + +func getCounter(metrics *Exporter, metricName string, metricKey map[string]string) int64 { + p, ok := metrics.GetPoint(metricName, metricKey) + if !ok { + // This is expected before the metric is recorded the first time. + return 0 + } + return p.Value.(int64) +} From d851005f548fb76216358effa0fac2352fd15592 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Fredrik=20L=C3=B6nnblad?= Date: Mon, 13 Jan 2020 15:04:12 -0300 Subject: [PATCH 211/212] Added handling of HTTP code 409 when parsing trace status in ochttp (#1190) * Added handling of HTTP code 409 when parsing trace status on OCHTTP * Reverted the change of stats/view/aggregation_data.go and stats/view/view_to_metric_test.go --- go.mod | 1 - go.sum | 1 + plugin/ochttp/trace.go | 3 +++ plugin/ochttp/trace_test.go | 4 ++++ 4 files changed, 8 insertions(+), 1 deletion(-) diff --git a/go.mod b/go.mod index 139157cd3..c867df5f5 100644 --- a/go.mod +++ b/go.mod @@ -8,7 +8,6 @@ require ( golang.org/x/net v0.0.0-20190620200207-3b0461eec859 golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd // indirect golang.org/x/text v0.3.2 // indirect - google.golang.org/appengine v1.4.0 // indirect google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb // indirect google.golang.org/grpc v1.20.1 ) diff --git a/go.sum b/go.sum index ed2a1d844..01c02972c 100644 --- a/go.sum +++ b/go.sum @@ -67,6 +67,7 @@ google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZi google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1 h1:Hz2g2wirWK7H0qIIhGIqRGTuMwTE8HEKFnDZZ7lm9NU= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/plugin/ochttp/trace.go b/plugin/ochttp/trace.go index 53e71305a..ed3a5db56 100644 --- a/plugin/ochttp/trace.go +++ b/plugin/ochttp/trace.go @@ -204,7 +204,10 @@ func TraceStatus(httpStatusCode int, statusLine string) trace.Status { code = trace.StatusCodeUnavailable case http.StatusOK: code = trace.StatusCodeOK + case http.StatusConflict: + code = trace.StatusCodeAlreadyExists } + return trace.Status{Code: code, Message: codeToStr[code]} } diff --git a/plugin/ochttp/trace_test.go b/plugin/ochttp/trace_test.go index 759103781..615c271eb 100644 --- a/plugin/ochttp/trace_test.go +++ b/plugin/ochttp/trace_test.go @@ -677,6 +677,10 @@ func TestStatusUnitTest(t *testing.T) { {403, trace.Status{Code: trace.StatusCodePermissionDenied, Message: `PERMISSION_DENIED`}}, {301, trace.Status{Code: trace.StatusCodeOK, Message: `OK`}}, {501, trace.Status{Code: trace.StatusCodeUnimplemented, Message: `UNIMPLEMENTED`}}, + {409, trace.Status{Code: trace.StatusCodeAlreadyExists, Message: `ALREADY_EXISTS`}}, + {429, trace.Status{Code: trace.StatusCodeResourceExhausted, Message: `RESOURCE_EXHAUSTED`}}, + {503, trace.Status{Code: trace.StatusCodeUnavailable, Message: `UNAVAILABLE`}}, + {504, trace.Status{Code: trace.StatusCodeDeadlineExceeded, Message: `DEADLINE_EXCEEDED`}}, } for _, tt := range tests { From d835ff86be02193d324330acdb7d65546b05f814 Mon Sep 17 00:00:00 2001 From: Rahul Patel Date: Sat, 1 Feb 2020 22:13:51 -0800 Subject: [PATCH 212/212] fix config_test to run in any order. (#1194) - fixes #1193 --- trace/config_test.go | 102 ++++++++++++++++++++++++++----------------- 1 file changed, 61 insertions(+), 41 deletions(-) diff --git a/trace/config_test.go b/trace/config_test.go index 547b817d0..1811230cd 100644 --- a/trace/config_test.go +++ b/trace/config_test.go @@ -20,69 +20,89 @@ import ( ) func TestApplyConfig(t *testing.T) { - testCfgs := []Config{ - {}, + cfg := config.Load().(*Config) + defaultCfg := Config{ + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, + MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, + MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, + MaxLinksPerSpan: DefaultMaxLinksPerSpan, + } + testCases := []struct { + name string + newCfg Config + wantCfg Config + }{ { - MaxAttributesPerSpan: 1, - MaxAnnotationEventsPerSpan: 2, - MaxMessageEventsPerSpan: 3, - MaxLinksPerSpan: 4, + name: "Initialize to default config", + newCfg: defaultCfg, + wantCfg: defaultCfg, }, { - MaxAttributesPerSpan: -1, - MaxAnnotationEventsPerSpan: 3, - MaxMessageEventsPerSpan: -3, - MaxLinksPerSpan: 5, - }} - cfg := config.Load().(*Config) - wantCfgs := []Config{ - { - DefaultSampler: cfg.DefaultSampler, - IDGenerator: cfg.IDGenerator, - MaxAttributesPerSpan: DefaultMaxAttributesPerSpan, - MaxAnnotationEventsPerSpan: DefaultMaxAnnotationEventsPerSpan, - MaxMessageEventsPerSpan: DefaultMaxMessageEventsPerSpan, - MaxLinksPerSpan: DefaultMaxLinksPerSpan, + name: "Empty Config", + newCfg: Config{}, + wantCfg: defaultCfg, }, { - DefaultSampler: cfg.DefaultSampler, - IDGenerator: cfg.IDGenerator, - MaxAttributesPerSpan: 1, - MaxAnnotationEventsPerSpan: 2, - MaxMessageEventsPerSpan: 3, - MaxLinksPerSpan: 4, + name: "Valid non-default config", + newCfg: Config{ + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 2, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 4, + }, + wantCfg: Config{ + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 2, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 4, + }, }, { - DefaultSampler: cfg.DefaultSampler, - IDGenerator: cfg.IDGenerator, - MaxAttributesPerSpan: 1, - MaxAnnotationEventsPerSpan: 3, - MaxMessageEventsPerSpan: 3, - MaxLinksPerSpan: 5, - }} + name: "Partially invalid config", + newCfg: Config{ + MaxAttributesPerSpan: -1, + MaxAnnotationEventsPerSpan: 3, + MaxMessageEventsPerSpan: -3, + MaxLinksPerSpan: 5, + }, + wantCfg: Config{ + DefaultSampler: cfg.DefaultSampler, + IDGenerator: cfg.IDGenerator, + MaxAttributesPerSpan: 1, + MaxAnnotationEventsPerSpan: 3, + MaxMessageEventsPerSpan: 3, + MaxLinksPerSpan: 5, + }, + }, + } - for i, newCfg := range testCfgs { + for i, tt := range testCases { + newCfg := tt.newCfg ApplyConfig(newCfg) gotCfg := config.Load().(*Config) - wantCfg := wantCfgs[i] + wantCfg := tt.wantCfg if got, want := reflect.ValueOf(gotCfg.DefaultSampler).Pointer(), reflect.ValueOf(wantCfg.DefaultSampler).Pointer(); got != want { - t.Fatalf("testId = %d config.DefaultSampler = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.DefaultSampler = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.IDGenerator, wantCfg.IDGenerator; got != want { - t.Fatalf("testId = %d config.IDGenerator = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.IDGenerator = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxAttributesPerSpan, wantCfg.MaxAttributesPerSpan; got != want { - t.Fatalf("testId = %d config.MaxAttributesPerSpan = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.MaxAttributesPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxLinksPerSpan, wantCfg.MaxLinksPerSpan; got != want { - t.Fatalf("testId = %d config.MaxLinksPerSpan = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.MaxLinksPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxAnnotationEventsPerSpan, wantCfg.MaxAnnotationEventsPerSpan; got != want { - t.Fatalf("testId = %d config.MaxAnnotationEventsPerSpan = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.MaxAnnotationEventsPerSpan = %#v; want %#v", i, tt.name, got, want) } if got, want := gotCfg.MaxMessageEventsPerSpan, wantCfg.MaxMessageEventsPerSpan; got != want { - t.Fatalf("testId = %d config.MaxMessageEventsPerSpan = %#v; want %#v", i, got, want) + t.Fatalf("testId = %d, testName = %s: config.MaxMessageEventsPerSpan = %#v; want %#v", i, tt.name, got, want) } }