diff --git a/Gopkg.lock b/Gopkg.lock
index 84b8dd70146..d68fb49bcd7 100644
--- a/Gopkg.lock
+++ b/Gopkg.lock
@@ -54,12 +54,6 @@
packages = ["."]
revision = "bbf7a2afc14f93e1e0a5c06df524fbd75e5031e5"
-[[projects]]
- name = "github.com/Shopify/sarama"
- packages = ["."]
- revision = "0fb560e5f7fbcaee2f75e3c34174320709f69944"
- version = "v1.11.0"
-
[[projects]]
name = "github.com/apache/thrift"
packages = ["lib/go/thrift"]
@@ -170,24 +164,6 @@
revision = "a25b9ef0c9fe242ac04bb20d3a028442b7d266b6"
version = "v2.6.1"
-[[projects]]
- name = "github.com/eapache/go-resiliency"
- packages = ["breaker"]
- revision = "6800482f2c813e689c88b7ed3282262385011890"
- version = "v1.0.0"
-
-[[projects]]
- branch = "master"
- name = "github.com/eapache/go-xerial-snappy"
- packages = ["."]
- revision = "bb955e01b9346ac19dc29eb16586c90ded99a98c"
-
-[[projects]]
- name = "github.com/eapache/queue"
- packages = ["."]
- revision = "ded5959c0d4e360646dc9e9908cff48666781367"
- version = "v1.0.2"
-
[[projects]]
name = "github.com/emicklei/go-restful"
packages = [
@@ -437,12 +413,6 @@
revision = "8c199fb6259ffc1af525cc3ad52ee60ba8359669"
version = "v1.1"
-[[projects]]
- name = "github.com/klauspost/crc32"
- packages = ["."]
- revision = "cb6bfca970f6908083f26f39a79009d608efd5cd"
- version = "v1.1"
-
[[projects]]
branch = "master"
name = "github.com/kr/logfmt"
@@ -587,19 +557,6 @@
revision = "6edb48674bd9467b8e91fda004f2bd7202d60ce4"
version = "v1.0.1"
-[[projects]]
- name = "github.com/openzipkin/zipkin-go-opentracing"
- packages = [
- ".",
- "_thrift/gen-go/scribe",
- "_thrift/gen-go/zipkincore",
- "flag",
- "types",
- "wire"
- ]
- revision = "6022d4d3ed39632fad842942bda1813a9b4f63c8"
- version = "v0.2.3"
-
[[projects]]
branch = "master"
name = "github.com/petar/GoLLRB"
@@ -612,18 +569,6 @@
packages = ["."]
revision = "98c11a7a6ec829d672b03833c3d69a7fae1ca972"
-[[projects]]
- branch = "master"
- name = "github.com/pierrec/lz4"
- packages = ["."]
- revision = "f5b77fd73d83122495309c0f459b810f83cc291f"
-
-[[projects]]
- name = "github.com/pierrec/xxHash"
- packages = ["xxHash32"]
- revision = "f051bb7f1d1aaf1b5a665d74fb6b0217712c69f7"
- version = "v0.1.1"
-
[[projects]]
name = "github.com/pkg/errors"
packages = ["."]
@@ -678,7 +623,6 @@
"expfmt",
"internal/bitbucket.org/ww/goautoneg",
"model",
- "promlog",
"route",
"version"
]
@@ -752,12 +696,6 @@
]
revision = "195bc0d286b077f5633eb4bda76743620588b2fa"
-[[projects]]
- branch = "master"
- name = "github.com/rcrowley/go-metrics"
- packages = ["."]
- revision = "1f30fe9094a513ce4c700b9a54458bbb0c96996c"
-
[[projects]]
branch = "master"
name = "github.com/samuel/go-zookeeper"
@@ -832,12 +770,6 @@
packages = ["codec"]
revision = "708a42d246822952f38190a8d8c4e6b16a0e600c"
-[[projects]]
- branch = "master"
- name = "github.com/weaveworks-experiments/loki"
- packages = ["pkg/client"]
- revision = "17ff1516db3dffe25452b4cbf6e6412f303cddea"
-
[[projects]]
branch = "master"
name = "github.com/weaveworks/billing-client"
@@ -864,7 +796,7 @@
"tracing",
"user"
]
- revision = "ebab3a78900a09cebce8d0b37f2a69d474796bf4"
+ revision = "b6e3b7fd56106061b1d60ec4da46c0d6770eb111"
[[projects]]
name = "github.com/weaveworks/mesh"
@@ -1190,6 +1122,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
- inputs-digest = "fc2b611011b916790e0e1f112a7e0359692b3963b08ca30fbdfc34c37278246c"
+ inputs-digest = "dfbcc50df522b353f27b1af1328cc6389e305dd0bdf23e8ea4200d7e9bca9d2b"
solver-name = "gps-cdcl"
solver-version = 1
diff --git a/pkg/ingester/ingester.go b/pkg/ingester/ingester.go
index 86a7eaa87ff..f7e4d859294 100644
--- a/pkg/ingester/ingester.go
+++ b/pkg/ingester/ingester.go
@@ -493,7 +493,7 @@ func (i *Ingester) ReadinessHandler(w http.ResponseWriter, r *http.Request) {
if i.lifecycler.IsReady() {
w.WriteHeader(http.StatusNoContent)
} else {
- w.WriteHeader(http.StatusInternalServerError)
+ w.WriteHeader(http.StatusServiceUnavailable)
}
}
diff --git a/vendor/github.com/Shopify/sarama/.gitignore b/vendor/github.com/Shopify/sarama/.gitignore
deleted file mode 100644
index 3591f9ff305..00000000000
--- a/vendor/github.com/Shopify/sarama/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-*.test
-
-# Folders
-_obj
-_test
-.vagrant
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
diff --git a/vendor/github.com/Shopify/sarama/.travis.yml b/vendor/github.com/Shopify/sarama/.travis.yml
deleted file mode 100644
index cd5d3c4945f..00000000000
--- a/vendor/github.com/Shopify/sarama/.travis.yml
+++ /dev/null
@@ -1,33 +0,0 @@
-language: go
-go:
-- 1.6.3
-- 1.7.3
-
-env:
- global:
- - KAFKA_PEERS=localhost:9091,localhost:9092,localhost:9093,localhost:9094,localhost:9095
- - TOXIPROXY_ADDR=http://localhost:8474
- - KAFKA_INSTALL_ROOT=/home/travis/kafka
- - KAFKA_HOSTNAME=localhost
- - DEBUG=true
- matrix:
- - KAFKA_VERSION=0.9.0.1
- - KAFKA_VERSION=0.10.0.1
- - KAFKA_VERSION=0.10.1.0
-
-before_install:
-- export REPOSITORY_ROOT=${TRAVIS_BUILD_DIR}
-- vagrant/install_cluster.sh
-- vagrant/boot_cluster.sh
-- vagrant/create_topics.sh
-
-install:
-- make install_dependencies
-
-script:
-- make test
-- make vet
-- make errcheck
-- make fmt
-
-sudo: false
diff --git a/vendor/github.com/Shopify/sarama/CHANGELOG.md b/vendor/github.com/Shopify/sarama/CHANGELOG.md
deleted file mode 100644
index aad986fef9a..00000000000
--- a/vendor/github.com/Shopify/sarama/CHANGELOG.md
+++ /dev/null
@@ -1,350 +0,0 @@
-# Changelog
-
-#### Version 1.11.0 (2016-12-20)
-
-New Features:
- - Metrics! Thanks to Sébastien Launay for all his work on this feature
- ([#701](https://github.com/Shopify/sarama/pull/701),
- [#746](https://github.com/Shopify/sarama/pull/746),
- [#766](https://github.com/Shopify/sarama/pull/766)).
- - Add support for LZ4 compression
- ([#786](https://github.com/Shopify/sarama/pull/786)).
- - Add support for ListOffsetRequest v1 and Kafka 0.10.1
- ([#775](https://github.com/Shopify/sarama/pull/775)).
- - Added a `HighWaterMarks` method to the Consumer which aggregates the
- `HighWaterMarkOffset` values of its child topic/partitions
- ([#769](https://github.com/Shopify/sarama/pull/769)).
-
-Bug Fixes:
- - Fixed producing when using timestamps, compression and Kafka 0.10
- ([#759](https://github.com/Shopify/sarama/pull/759)).
- - Added missing decoder methods to DescribeGroups response
- ([#756](https://github.com/Shopify/sarama/pull/756)).
- - Fix producer shutdown when `Return.Errors` is disabled
- ([#787](https://github.com/Shopify/sarama/pull/787)).
- - Don't mutate configuration in SyncProducer
- ([#790](https://github.com/Shopify/sarama/pull/790)).
- - Fix crash on SASL initialization failure
- ([#795](https://github.com/Shopify/sarama/pull/795)).
-
-#### Version 1.10.1 (2016-08-30)
-
-Bug Fixes:
- - Fix the documentation for `HashPartitioner` which was incorrect
- ([#717](https://github.com/Shopify/sarama/pull/717)).
- - Permit client creation even when it is limited by ACLs
- ([#722](https://github.com/Shopify/sarama/pull/722)).
- - Several fixes to the consumer timer optimization code, regressions introduced
- in v1.10.0. Go's timers are finicky
- ([#730](https://github.com/Shopify/sarama/pull/730),
- [#733](https://github.com/Shopify/sarama/pull/733),
- [#734](https://github.com/Shopify/sarama/pull/734)).
- - Handle consuming compressed relative offsets with Kafka 0.10
- ([#735](https://github.com/Shopify/sarama/pull/735)).
-
-#### Version 1.10.0 (2016-08-02)
-
-_Important:_ As of Sarama 1.10 it is necessary to tell Sarama the version of
-Kafka you are running against (via the `config.Version` value) in order to use
-features that may not be compatible with old Kafka versions. If you don't
-specify this value it will default to 0.8.2 (the minimum supported), and trying
-to use more recent features (like the offset manager) will fail with an error.
-
-_Also:_ The offset-manager's behaviour has been changed to match the upstream
-java consumer (see [#705](https://github.com/Shopify/sarama/pull/705) and
-[#713](https://github.com/Shopify/sarama/pull/713)). If you use the
-offset-manager, please ensure that you are committing one *greater* than the
-last consumed message offset or else you may end up consuming duplicate
-messages.
-
-New Features:
- - Support for Kafka 0.10
- ([#672](https://github.com/Shopify/sarama/pull/672),
- [#678](https://github.com/Shopify/sarama/pull/678),
- [#681](https://github.com/Shopify/sarama/pull/681), and others).
- - Support for configuring the target Kafka version
- ([#676](https://github.com/Shopify/sarama/pull/676)).
- - Batch producing support in the SyncProducer
- ([#677](https://github.com/Shopify/sarama/pull/677)).
- - Extend producer mock to allow setting expectations on message contents
- ([#667](https://github.com/Shopify/sarama/pull/667)).
-
-Improvements:
- - Support `nil` compressed messages for deleting in compacted topics
- ([#634](https://github.com/Shopify/sarama/pull/634)).
- - Pre-allocate decoding errors, greatly reducing heap usage and GC time against
- misbehaving brokers ([#690](https://github.com/Shopify/sarama/pull/690)).
- - Re-use consumer expiry timers, removing one allocation per consumed message
- ([#707](https://github.com/Shopify/sarama/pull/707)).
-
-Bug Fixes:
- - Actually default the client ID to "sarama" like we say we do
- ([#664](https://github.com/Shopify/sarama/pull/664)).
- - Fix a rare issue where `Client.Leader` could return the wrong error
- ([#685](https://github.com/Shopify/sarama/pull/685)).
- - Fix a possible tight loop in the consumer
- ([#693](https://github.com/Shopify/sarama/pull/693)).
- - Match upstream's offset-tracking behaviour
- ([#705](https://github.com/Shopify/sarama/pull/705)).
- - Report UnknownTopicOrPartition errors from the offset manager
- ([#706](https://github.com/Shopify/sarama/pull/706)).
- - Fix possible negative partition value from the HashPartitioner
- ([#709](https://github.com/Shopify/sarama/pull/709)).
-
-#### Version 1.9.0 (2016-05-16)
-
-New Features:
- - Add support for custom offset manager retention durations
- ([#602](https://github.com/Shopify/sarama/pull/602)).
- - Publish low-level mocks to enable testing of third-party producer/consumer
- implementations ([#570](https://github.com/Shopify/sarama/pull/570)).
- - Declare support for Golang 1.6
- ([#611](https://github.com/Shopify/sarama/pull/611)).
- - Support for SASL plain-text auth
- ([#648](https://github.com/Shopify/sarama/pull/648)).
-
-Improvements:
- - Simplified broker locking scheme slightly
- ([#604](https://github.com/Shopify/sarama/pull/604)).
- - Documentation cleanup
- ([#605](https://github.com/Shopify/sarama/pull/605),
- [#621](https://github.com/Shopify/sarama/pull/621),
- [#654](https://github.com/Shopify/sarama/pull/654)).
-
-Bug Fixes:
- - Fix race condition shutting down the OffsetManager
- ([#658](https://github.com/Shopify/sarama/pull/658)).
-
-#### Version 1.8.0 (2016-02-01)
-
-New Features:
- - Full support for Kafka 0.9:
- - All protocol messages and fields
- ([#586](https://github.com/Shopify/sarama/pull/586),
- [#588](https://github.com/Shopify/sarama/pull/588),
- [#590](https://github.com/Shopify/sarama/pull/590)).
- - Verified that TLS support works
- ([#581](https://github.com/Shopify/sarama/pull/581)).
- - Fixed the OffsetManager compatibility
- ([#585](https://github.com/Shopify/sarama/pull/585)).
-
-Improvements:
- - Optimize for fewer system calls when reading from the network
- ([#584](https://github.com/Shopify/sarama/pull/584)).
- - Automatically retry `InvalidMessage` errors to match upstream behaviour
- ([#589](https://github.com/Shopify/sarama/pull/589)).
-
-#### Version 1.7.0 (2015-12-11)
-
-New Features:
- - Preliminary support for Kafka 0.9
- ([#572](https://github.com/Shopify/sarama/pull/572)). This comes with several
- caveats:
- - Protocol-layer support is mostly in place
- ([#577](https://github.com/Shopify/sarama/pull/577)), however Kafka 0.9
- renamed some messages and fields, which we did not in order to preserve API
- compatibility.
- - The producer and consumer work against 0.9, but the offset manager does
- not ([#573](https://github.com/Shopify/sarama/pull/573)).
- - TLS support may or may not work
- ([#581](https://github.com/Shopify/sarama/pull/581)).
-
-Improvements:
- - Don't wait for request timeouts on dead brokers, greatly speeding recovery
- when the TCP connection is left hanging
- ([#548](https://github.com/Shopify/sarama/pull/548)).
- - Refactored part of the producer. The new version provides a much more elegant
- solution to [#449](https://github.com/Shopify/sarama/pull/449). It is also
- slightly more efficient, and much more precise in calculating batch sizes
- when compression is used
- ([#549](https://github.com/Shopify/sarama/pull/549),
- [#550](https://github.com/Shopify/sarama/pull/550),
- [#551](https://github.com/Shopify/sarama/pull/551)).
-
-Bug Fixes:
- - Fix race condition in consumer test mock
- ([#553](https://github.com/Shopify/sarama/pull/553)).
-
-#### Version 1.6.1 (2015-09-25)
-
-Bug Fixes:
- - Fix panic that could occur if a user-supplied message value failed to encode
- ([#449](https://github.com/Shopify/sarama/pull/449)).
-
-#### Version 1.6.0 (2015-09-04)
-
-New Features:
- - Implementation of a consumer offset manager using the APIs introduced in
- Kafka 0.8.2. The API is designed mainly for integration into a future
- high-level consumer, not for direct use, although it is *possible* to use it
- directly.
- ([#461](https://github.com/Shopify/sarama/pull/461)).
-
-Improvements:
- - CRC32 calculation is much faster on machines with SSE4.2 instructions,
- removing a major hotspot from most profiles
- ([#255](https://github.com/Shopify/sarama/pull/255)).
-
-Bug Fixes:
- - Make protocol decoding more robust against some malformed packets generated
- by go-fuzz ([#523](https://github.com/Shopify/sarama/pull/523),
- [#525](https://github.com/Shopify/sarama/pull/525)) or found in other ways
- ([#528](https://github.com/Shopify/sarama/pull/528)).
- - Fix a potential race condition panic in the consumer on shutdown
- ([#529](https://github.com/Shopify/sarama/pull/529)).
-
-#### Version 1.5.0 (2015-08-17)
-
-New Features:
- - TLS-encrypted network connections are now supported. This feature is subject
- to change when Kafka releases built-in TLS support, but for now this is
- enough to work with TLS-terminating proxies
- ([#154](https://github.com/Shopify/sarama/pull/154)).
-
-Improvements:
- - The consumer will not block if a single partition is not drained by the user;
- all other partitions will continue to consume normally
- ([#485](https://github.com/Shopify/sarama/pull/485)).
- - Formatting of error strings has been much improved
- ([#495](https://github.com/Shopify/sarama/pull/495)).
- - Internal refactoring of the producer for code cleanliness and to enable
- future work ([#300](https://github.com/Shopify/sarama/pull/300)).
-
-Bug Fixes:
- - Fix a potential deadlock in the consumer on shutdown
- ([#475](https://github.com/Shopify/sarama/pull/475)).
-
-#### Version 1.4.3 (2015-07-21)
-
-Bug Fixes:
- - Don't include the partitioner in the producer's "fetch partitions"
- circuit-breaker ([#466](https://github.com/Shopify/sarama/pull/466)).
- - Don't retry messages until the broker is closed when abandoning a broker in
- the producer ([#468](https://github.com/Shopify/sarama/pull/468)).
- - Update the import path for snappy-go, it has moved again and the API has
- changed slightly ([#486](https://github.com/Shopify/sarama/pull/486)).
-
-#### Version 1.4.2 (2015-05-27)
-
-Bug Fixes:
- - Update the import path for snappy-go, it has moved from google code to github
- ([#456](https://github.com/Shopify/sarama/pull/456)).
-
-#### Version 1.4.1 (2015-05-25)
-
-Improvements:
- - Optimizations when decoding snappy messages, thanks to John Potocny
- ([#446](https://github.com/Shopify/sarama/pull/446)).
-
-Bug Fixes:
- - Fix hypothetical race conditions on producer shutdown
- ([#450](https://github.com/Shopify/sarama/pull/450),
- [#451](https://github.com/Shopify/sarama/pull/451)).
-
-#### Version 1.4.0 (2015-05-01)
-
-New Features:
- - The consumer now implements `Topics()` and `Partitions()` methods to enable
- users to dynamically choose what topics/partitions to consume without
- instantiating a full client
- ([#431](https://github.com/Shopify/sarama/pull/431)).
- - The partition-consumer now exposes the high water mark offset value returned
- by the broker via the `HighWaterMarkOffset()` method ([#339](https://github.com/Shopify/sarama/pull/339)).
- - Added a `kafka-console-consumer` tool capable of handling multiple
- partitions, and deprecated the now-obsolete `kafka-console-partitionConsumer`
- ([#439](https://github.com/Shopify/sarama/pull/439),
- [#442](https://github.com/Shopify/sarama/pull/442)).
-
-Improvements:
- - The producer's logging during retry scenarios is more consistent, more
- useful, and slightly less verbose
- ([#429](https://github.com/Shopify/sarama/pull/429)).
- - The client now shuffles its initial list of seed brokers in order to prevent
- thundering herd on the first broker in the list
- ([#441](https://github.com/Shopify/sarama/pull/441)).
-
-Bug Fixes:
- - The producer now correctly manages its state if retries occur when it is
- shutting down, fixing several instances of confusing behaviour and at least
- one potential deadlock ([#419](https://github.com/Shopify/sarama/pull/419)).
- - The consumer now handles messages for different partitions asynchronously,
- making it much more resilient to specific user code ordering
- ([#325](https://github.com/Shopify/sarama/pull/325)).
-
-#### Version 1.3.0 (2015-04-16)
-
-New Features:
- - The client now tracks consumer group coordinators using
- ConsumerMetadataRequests similar to how it tracks partition leadership using
- regular MetadataRequests ([#411](https://github.com/Shopify/sarama/pull/411)).
- This adds two methods to the client API:
- - `Coordinator(consumerGroup string) (*Broker, error)`
- - `RefreshCoordinator(consumerGroup string) error`
-
-Improvements:
- - ConsumerMetadataResponses now automatically create a Broker object out of the
- ID/address/port combination for the Coordinator; accessing the fields
- individually has been deprecated
- ([#413](https://github.com/Shopify/sarama/pull/413)).
- - Much improved handling of `OffsetOutOfRange` errors in the consumer.
- Consumers will fail to start if the provided offset is out of range
- ([#418](https://github.com/Shopify/sarama/pull/418))
- and they will automatically shut down if the offset falls out of range
- ([#424](https://github.com/Shopify/sarama/pull/424)).
- - Small performance improvement in encoding and decoding protocol messages
- ([#427](https://github.com/Shopify/sarama/pull/427)).
-
-Bug Fixes:
- - Fix a rare race condition in the client's background metadata refresher if
- it happens to be activated while the client is being closed
- ([#422](https://github.com/Shopify/sarama/pull/422)).
-
-#### Version 1.2.0 (2015-04-07)
-
-Improvements:
- - The producer's behaviour when `Flush.Frequency` is set is now more intuitive
- ([#389](https://github.com/Shopify/sarama/pull/389)).
- - The producer is now somewhat more memory-efficient during and after retrying
- messages due to an improved queue implementation
- ([#396](https://github.com/Shopify/sarama/pull/396)).
- - The consumer produces much more useful logging output when leadership
- changes ([#385](https://github.com/Shopify/sarama/pull/385)).
- - The client's `GetOffset` method will now automatically refresh metadata and
- retry once in the event of stale information or similar
- ([#394](https://github.com/Shopify/sarama/pull/394)).
- - Broker connections now have support for using TCP keepalives
- ([#407](https://github.com/Shopify/sarama/issues/407)).
-
-Bug Fixes:
- - The OffsetCommitRequest message now correctly implements all three possible
- API versions ([#390](https://github.com/Shopify/sarama/pull/390),
- [#400](https://github.com/Shopify/sarama/pull/400)).
-
-#### Version 1.1.0 (2015-03-20)
-
-Improvements:
- - Wrap the producer's partitioner call in a circuit-breaker so that repeatedly
- broken topics don't choke throughput
- ([#373](https://github.com/Shopify/sarama/pull/373)).
-
-Bug Fixes:
- - Fix the producer's internal reference counting in certain unusual scenarios
- ([#367](https://github.com/Shopify/sarama/pull/367)).
- - Fix the consumer's internal reference counting in certain unusual scenarios
- ([#369](https://github.com/Shopify/sarama/pull/369)).
- - Fix a condition where the producer's internal control messages could have
- gotten stuck ([#368](https://github.com/Shopify/sarama/pull/368)).
- - Fix an issue where invalid partition lists would be cached when asking for
- metadata for a non-existant topic ([#372](https://github.com/Shopify/sarama/pull/372)).
-
-
-#### Version 1.0.0 (2015-03-17)
-
-Version 1.0.0 is the first tagged version, and is almost a complete rewrite. The primary differences with previous untagged versions are:
-
-- The producer has been rewritten; there is now a `SyncProducer` with a blocking API, and an `AsyncProducer` that is non-blocking.
-- The consumer has been rewritten to only open one connection per broker instead of one connection per partition.
-- The main types of Sarama are now interfaces to make depedency injection easy; mock implementations for `Consumer`, `SyncProducer` and `AsyncProducer` are provided in the `github.com/Shopify/sarama/mocks` package.
-- For most uses cases, it is no longer necessary to open a `Client`; this will be done for you.
-- All the configuration values have been unified in the `Config` struct.
-- Much improved test suite.
diff --git a/vendor/github.com/Shopify/sarama/MIT-LICENSE b/vendor/github.com/Shopify/sarama/MIT-LICENSE
deleted file mode 100644
index 8121b63b1c4..00000000000
--- a/vendor/github.com/Shopify/sarama/MIT-LICENSE
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2013 Evan Huus
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/vendor/github.com/Shopify/sarama/Makefile b/vendor/github.com/Shopify/sarama/Makefile
deleted file mode 100644
index 626b09a5451..00000000000
--- a/vendor/github.com/Shopify/sarama/Makefile
+++ /dev/null
@@ -1,21 +0,0 @@
-default: fmt vet errcheck test
-
-test:
- go test -v -timeout 60s -race ./...
-
-vet:
- go vet ./...
-
-errcheck:
- errcheck github.com/Shopify/sarama/...
-
-fmt:
- @if [ -n "$$(go fmt ./...)" ]; then echo 'Please run go fmt on your code.' && exit 1; fi
-
-install_dependencies: install_errcheck get
-
-install_errcheck:
- go get github.com/kisielk/errcheck
-
-get:
- go get -t
diff --git a/vendor/github.com/Shopify/sarama/README.md b/vendor/github.com/Shopify/sarama/README.md
deleted file mode 100644
index c2968b92c35..00000000000
--- a/vendor/github.com/Shopify/sarama/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-sarama
-======
-
-[](https://godoc.org/github.com/Shopify/sarama)
-[](https://travis-ci.org/Shopify/sarama)
-
-Sarama is an MIT-licensed Go client library for [Apache Kafka](https://kafka.apache.org/) version 0.8 (and later).
-
-### Getting started
-
-- API documentation and examples are available via [godoc](https://godoc.org/github.com/Shopify/sarama).
-- Mocks for testing are available in the [mocks](./mocks) subpackage.
-- The [examples](./examples) directory contains more elaborate example applications.
-- The [tools](./tools) directory contains command line tools that can be useful for testing, diagnostics, and instrumentation.
-
-### Compatibility and API stability
-
-Sarama provides a "2 releases + 2 months" compatibility guarantee: we support
-the two latest stable releases of Kafka and Go, and we provide a two month
-grace period for older releases. This means we currently officially support
-Go 1.7 and 1.6, and Kafka 0.10.0 and 0.9.0, although older releases are
-still likely to work.
-
-Sarama follows semantic versioning and provides API stability via the gopkg.in service.
-You can import a version with a guaranteed stable API via http://gopkg.in/Shopify/sarama.v1.
-A changelog is available [here](CHANGELOG.md).
-
-### Contributing
-
-* Get started by checking our [contribution guidelines](https://github.com/Shopify/sarama/blob/master/CONTRIBUTING.md).
-* Read the [Sarama wiki](https://github.com/Shopify/sarama/wiki) for more
- technical and design details.
-* The [Kafka Protocol Specification](https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol)
- contains a wealth of useful information.
-* For more general issues, there is [a google group](https://groups.google.com/forum/#!forum/kafka-clients) for Kafka client developers.
-* If you have any questions, just ask!
diff --git a/vendor/github.com/Shopify/sarama/Vagrantfile b/vendor/github.com/Shopify/sarama/Vagrantfile
deleted file mode 100644
index f4b848a301b..00000000000
--- a/vendor/github.com/Shopify/sarama/Vagrantfile
+++ /dev/null
@@ -1,20 +0,0 @@
-# -*- mode: ruby -*-
-# vi: set ft=ruby :
-
-# Vagrantfile API/syntax version. Don't touch unless you know what you're doing!
-VAGRANTFILE_API_VERSION = "2"
-
-# We have 5 * 192MB ZK processes and 5 * 320MB Kafka processes => 2560MB
-MEMORY = 3072
-
-Vagrant.configure(VAGRANTFILE_API_VERSION) do |config|
- config.vm.box = "ubuntu/trusty64"
-
- config.vm.provision :shell, path: "vagrant/provision.sh"
-
- config.vm.network "private_network", ip: "192.168.100.67"
-
- config.vm.provider "virtualbox" do |v|
- v.memory = MEMORY
- end
-end
diff --git a/vendor/github.com/Shopify/sarama/api_versions_request.go b/vendor/github.com/Shopify/sarama/api_versions_request.go
deleted file mode 100644
index ab65f01ccff..00000000000
--- a/vendor/github.com/Shopify/sarama/api_versions_request.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package sarama
-
-type ApiVersionsRequest struct {
-}
-
-func (r *ApiVersionsRequest) encode(pe packetEncoder) error {
- return nil
-}
-
-func (r *ApiVersionsRequest) decode(pd packetDecoder, version int16) (err error) {
- return nil
-}
-
-func (r *ApiVersionsRequest) key() int16 {
- return 18
-}
-
-func (r *ApiVersionsRequest) version() int16 {
- return 0
-}
-
-func (r *ApiVersionsRequest) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/api_versions_response.go b/vendor/github.com/Shopify/sarama/api_versions_response.go
deleted file mode 100644
index 16d62db2d30..00000000000
--- a/vendor/github.com/Shopify/sarama/api_versions_response.go
+++ /dev/null
@@ -1,86 +0,0 @@
-package sarama
-
-type ApiVersionsResponseBlock struct {
- ApiKey int16
- MinVersion int16
- MaxVersion int16
-}
-
-func (b *ApiVersionsResponseBlock) encode(pe packetEncoder) error {
- pe.putInt16(b.ApiKey)
- pe.putInt16(b.MinVersion)
- pe.putInt16(b.MaxVersion)
- return nil
-}
-
-func (b *ApiVersionsResponseBlock) decode(pd packetDecoder) error {
- var err error
-
- if b.ApiKey, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MinVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- if b.MaxVersion, err = pd.getInt16(); err != nil {
- return err
- }
-
- return nil
-}
-
-type ApiVersionsResponse struct {
- Err KError
- ApiVersions []*ApiVersionsResponseBlock
-}
-
-func (r *ApiVersionsResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- if err := pe.putArrayLength(len(r.ApiVersions)); err != nil {
- return err
- }
- for _, apiVersion := range r.ApiVersions {
- if err := apiVersion.encode(pe); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *ApiVersionsResponse) decode(pd packetDecoder, version int16) error {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.ApiVersions = make([]*ApiVersionsResponseBlock, numBlocks)
- for i := 0; i < numBlocks; i++ {
- block := new(ApiVersionsResponseBlock)
- if err := block.decode(pd); err != nil {
- return err
- }
- r.ApiVersions[i] = block
- }
-
- return nil
-}
-
-func (r *ApiVersionsResponse) key() int16 {
- return 18
-}
-
-func (r *ApiVersionsResponse) version() int16 {
- return 0
-}
-
-func (r *ApiVersionsResponse) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/async_producer.go b/vendor/github.com/Shopify/sarama/async_producer.go
deleted file mode 100644
index 3af47fddd8e..00000000000
--- a/vendor/github.com/Shopify/sarama/async_producer.go
+++ /dev/null
@@ -1,905 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "sync"
- "time"
-
- "github.com/eapache/go-resiliency/breaker"
- "github.com/eapache/queue"
-)
-
-// AsyncProducer publishes Kafka messages using a non-blocking API. It routes messages
-// to the correct broker for the provided topic-partition, refreshing metadata as appropriate,
-// and parses responses for errors. You must read from the Errors() channel or the
-// producer will deadlock. You must call Close() or AsyncClose() on a producer to avoid
-// leaks: it will not be garbage-collected automatically when it passes out of
-// scope.
-type AsyncProducer interface {
-
- // AsyncClose triggers a shutdown of the producer, flushing any messages it may
- // have buffered. The shutdown has completed when both the Errors and Successes
- // channels have been closed. When calling AsyncClose, you *must* continue to
- // read from those channels in order to drain the results of any messages in
- // flight.
- AsyncClose()
-
- // Close shuts down the producer and flushes any messages it may have buffered.
- // You must call this function before a producer object passes out of scope, as
- // it may otherwise leak memory. You must call this before calling Close on the
- // underlying client.
- Close() error
-
- // Input is the input channel for the user to write messages to that they
- // wish to send.
- Input() chan<- *ProducerMessage
-
- // Successes is the success output channel back to the user when AckSuccesses is
- // enabled. If Return.Successes is true, you MUST read from this channel or the
- // Producer will deadlock. It is suggested that you send and read messages
- // together in a single select statement.
- Successes() <-chan *ProducerMessage
-
- // Errors is the error output channel back to the user. You MUST read from this
- // channel or the Producer will deadlock when the channel is full. Alternatively,
- // you can set Producer.Return.Errors in your config to false, which prevents
- // errors to be returned.
- Errors() <-chan *ProducerError
-}
-
-type asyncProducer struct {
- client Client
- conf *Config
- ownClient bool
-
- errors chan *ProducerError
- input, successes, retries chan *ProducerMessage
- inFlight sync.WaitGroup
-
- brokers map[*Broker]chan<- *ProducerMessage
- brokerRefs map[chan<- *ProducerMessage]int
- brokerLock sync.Mutex
-}
-
-// NewAsyncProducer creates a new AsyncProducer using the given broker addresses and configuration.
-func NewAsyncProducer(addrs []string, conf *Config) (AsyncProducer, error) {
- client, err := NewClient(addrs, conf)
- if err != nil {
- return nil, err
- }
-
- p, err := NewAsyncProducerFromClient(client)
- if err != nil {
- return nil, err
- }
- p.(*asyncProducer).ownClient = true
- return p, nil
-}
-
-// NewAsyncProducerFromClient creates a new Producer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewAsyncProducerFromClient(client Client) (AsyncProducer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- p := &asyncProducer{
- client: client,
- conf: client.Config(),
- errors: make(chan *ProducerError),
- input: make(chan *ProducerMessage),
- successes: make(chan *ProducerMessage),
- retries: make(chan *ProducerMessage),
- brokers: make(map[*Broker]chan<- *ProducerMessage),
- brokerRefs: make(map[chan<- *ProducerMessage]int),
- }
-
- // launch our singleton dispatchers
- go withRecover(p.dispatcher)
- go withRecover(p.retryHandler)
-
- return p, nil
-}
-
-type flagSet int8
-
-const (
- syn flagSet = 1 << iota // first message from partitionProducer to brokerProducer
- fin // final message from partitionProducer to brokerProducer and back
- shutdown // start the shutdown process
-)
-
-// ProducerMessage is the collection of elements passed to the Producer in order to send a message.
-type ProducerMessage struct {
- Topic string // The Kafka topic for this message.
- // The partitioning key for this message. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Key Encoder
- // The actual message to store in Kafka. Pre-existing Encoders include
- // StringEncoder and ByteEncoder.
- Value Encoder
-
- // This field is used to hold arbitrary data you wish to include so it
- // will be available when receiving on the Successes and Errors channels.
- // Sarama completely ignores this field and is only to be used for
- // pass-through data.
- Metadata interface{}
-
- // Below this point are filled in by the producer as the message is processed
-
- // Offset is the offset of the message stored on the broker. This is only
- // guaranteed to be defined if the message was successfully delivered and
- // RequiredAcks is not NoResponse.
- Offset int64
- // Partition is the partition that the message was sent to. This is only
- // guaranteed to be defined if the message was successfully delivered.
- Partition int32
- // Timestamp is the timestamp assigned to the message by the broker. This
- // is only guaranteed to be defined if the message was successfully
- // delivered, RequiredAcks is not NoResponse, and the Kafka broker is at
- // least version 0.10.0.
- Timestamp time.Time
-
- retries int
- flags flagSet
-}
-
-const producerMessageOverhead = 26 // the metadata overhead of CRC, flags, etc.
-
-func (m *ProducerMessage) byteSize() int {
- size := producerMessageOverhead
- if m.Key != nil {
- size += m.Key.Length()
- }
- if m.Value != nil {
- size += m.Value.Length()
- }
- return size
-}
-
-func (m *ProducerMessage) clear() {
- m.flags = 0
- m.retries = 0
-}
-
-// ProducerError is the type of error generated when the producer fails to deliver a message.
-// It contains the original ProducerMessage as well as the actual error value.
-type ProducerError struct {
- Msg *ProducerMessage
- Err error
-}
-
-func (pe ProducerError) Error() string {
- return fmt.Sprintf("kafka: Failed to produce message to topic %s: %s", pe.Msg.Topic, pe.Err)
-}
-
-// ProducerErrors is a type that wraps a batch of "ProducerError"s and implements the Error interface.
-// It can be returned from the Producer's Close method to avoid the need to manually drain the Errors channel
-// when closing a producer.
-type ProducerErrors []*ProducerError
-
-func (pe ProducerErrors) Error() string {
- return fmt.Sprintf("kafka: Failed to deliver %d messages.", len(pe))
-}
-
-func (p *asyncProducer) Errors() <-chan *ProducerError {
- return p.errors
-}
-
-func (p *asyncProducer) Successes() <-chan *ProducerMessage {
- return p.successes
-}
-
-func (p *asyncProducer) Input() chan<- *ProducerMessage {
- return p.input
-}
-
-func (p *asyncProducer) Close() error {
- p.AsyncClose()
-
- if p.conf.Producer.Return.Successes {
- go withRecover(func() {
- for _ = range p.successes {
- }
- })
- }
-
- var errors ProducerErrors
- if p.conf.Producer.Return.Errors {
- for event := range p.errors {
- errors = append(errors, event)
- }
- } else {
- <-p.errors
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (p *asyncProducer) AsyncClose() {
- go withRecover(p.shutdown)
-}
-
-// singleton
-// dispatches messages by topic
-func (p *asyncProducer) dispatcher() {
- handlers := make(map[string]chan<- *ProducerMessage)
- shuttingDown := false
-
- for msg := range p.input {
- if msg == nil {
- Logger.Println("Something tried to send a nil message, it was ignored.")
- continue
- }
-
- if msg.flags&shutdown != 0 {
- shuttingDown = true
- p.inFlight.Done()
- continue
- } else if msg.retries == 0 {
- if shuttingDown {
- // we can't just call returnError here because that decrements the wait group,
- // which hasn't been incremented yet for this message, and shouldn't be
- pErr := &ProducerError{Msg: msg, Err: ErrShuttingDown}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- continue
- }
- p.inFlight.Add(1)
- }
-
- if msg.byteSize() > p.conf.Producer.MaxMessageBytes {
- p.returnError(msg, ErrMessageSizeTooLarge)
- continue
- }
-
- handler := handlers[msg.Topic]
- if handler == nil {
- handler = p.newTopicProducer(msg.Topic)
- handlers[msg.Topic] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range handlers {
- close(handler)
- }
-}
-
-// one per topic
-// partitions messages, then dispatches them by partition
-type topicProducer struct {
- parent *asyncProducer
- topic string
- input <-chan *ProducerMessage
-
- breaker *breaker.Breaker
- handlers map[int32]chan<- *ProducerMessage
- partitioner Partitioner
-}
-
-func (p *asyncProducer) newTopicProducer(topic string) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- tp := &topicProducer{
- parent: p,
- topic: topic,
- input: input,
- breaker: breaker.New(3, 1, 10*time.Second),
- handlers: make(map[int32]chan<- *ProducerMessage),
- partitioner: p.conf.Producer.Partitioner(topic),
- }
- go withRecover(tp.dispatch)
- return input
-}
-
-func (tp *topicProducer) dispatch() {
- for msg := range tp.input {
- if msg.retries == 0 {
- if err := tp.partitionMessage(msg); err != nil {
- tp.parent.returnError(msg, err)
- continue
- }
- }
-
- handler := tp.handlers[msg.Partition]
- if handler == nil {
- handler = tp.parent.newPartitionProducer(msg.Topic, msg.Partition)
- tp.handlers[msg.Partition] = handler
- }
-
- handler <- msg
- }
-
- for _, handler := range tp.handlers {
- close(handler)
- }
-}
-
-func (tp *topicProducer) partitionMessage(msg *ProducerMessage) error {
- var partitions []int32
-
- err := tp.breaker.Run(func() (err error) {
- if tp.partitioner.RequiresConsistency() {
- partitions, err = tp.parent.client.Partitions(msg.Topic)
- } else {
- partitions, err = tp.parent.client.WritablePartitions(msg.Topic)
- }
- return
- })
-
- if err != nil {
- return err
- }
-
- numPartitions := int32(len(partitions))
-
- if numPartitions == 0 {
- return ErrLeaderNotAvailable
- }
-
- choice, err := tp.partitioner.Partition(msg, numPartitions)
-
- if err != nil {
- return err
- } else if choice < 0 || choice >= numPartitions {
- return ErrInvalidPartition
- }
-
- msg.Partition = partitions[choice]
-
- return nil
-}
-
-// one per partition per topic
-// dispatches messages to the appropriate broker
-// also responsible for maintaining message order during retries
-type partitionProducer struct {
- parent *asyncProducer
- topic string
- partition int32
- input <-chan *ProducerMessage
-
- leader *Broker
- breaker *breaker.Breaker
- output chan<- *ProducerMessage
-
- // highWatermark tracks the "current" retry level, which is the only one where we actually let messages through,
- // all other messages get buffered in retryState[msg.retries].buf to preserve ordering
- // retryState[msg.retries].expectChaser simply tracks whether we've seen a fin message for a given level (and
- // therefore whether our buffer is complete and safe to flush)
- highWatermark int
- retryState []partitionRetryState
-}
-
-type partitionRetryState struct {
- buf []*ProducerMessage
- expectChaser bool
-}
-
-func (p *asyncProducer) newPartitionProducer(topic string, partition int32) chan<- *ProducerMessage {
- input := make(chan *ProducerMessage, p.conf.ChannelBufferSize)
- pp := &partitionProducer{
- parent: p,
- topic: topic,
- partition: partition,
- input: input,
-
- breaker: breaker.New(3, 1, 10*time.Second),
- retryState: make([]partitionRetryState, p.conf.Producer.Retry.Max+1),
- }
- go withRecover(pp.dispatch)
- return input
-}
-
-func (pp *partitionProducer) dispatch() {
- // try to prefetch the leader; if this doesn't work, we'll do a proper call to `updateLeader`
- // on the first message
- pp.leader, _ = pp.parent.client.Leader(pp.topic, pp.partition)
- if pp.leader != nil {
- pp.output = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
- }
-
- for msg := range pp.input {
- if msg.retries > pp.highWatermark {
- // a new, higher, retry level; handle it and then back off
- pp.newHighWatermark(msg.retries)
- time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
- } else if pp.highWatermark > 0 {
- // we are retrying something (else highWatermark would be 0) but this message is not a *new* retry level
- if msg.retries < pp.highWatermark {
- // in fact this message is not even the current retry level, so buffer it for now (unless it's a just a fin)
- if msg.flags&fin == fin {
- pp.retryState[msg.retries].expectChaser = false
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- } else {
- pp.retryState[msg.retries].buf = append(pp.retryState[msg.retries].buf, msg)
- }
- continue
- } else if msg.flags&fin == fin {
- // this message is of the current retry level (msg.retries == highWatermark) and the fin flag is set,
- // meaning this retry level is done and we can go down (at least) one level and flush that
- pp.retryState[pp.highWatermark].expectChaser = false
- pp.flushRetryBuffers()
- pp.parent.inFlight.Done() // this fin is now handled and will be garbage collected
- continue
- }
- }
-
- // if we made it this far then the current msg contains real data, and can be sent to the next goroutine
- // without breaking any of our ordering guarantees
-
- if pp.output == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnError(msg, err)
- time.Sleep(pp.parent.conf.Producer.Retry.Backoff)
- continue
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- pp.output <- msg
- }
-
- if pp.output != nil {
- pp.parent.unrefBrokerProducer(pp.leader, pp.output)
- }
-}
-
-func (pp *partitionProducer) newHighWatermark(hwm int) {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, hwm)
- pp.highWatermark = hwm
-
- // send off a fin so that we know when everything "in between" has made it
- // back to us and we can safely flush the backlog (otherwise we risk re-ordering messages)
- pp.retryState[pp.highWatermark].expectChaser = true
- pp.parent.inFlight.Add(1) // we're generating a fin message; track it so we don't shut down while it's still inflight
- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: fin, retries: pp.highWatermark - 1}
-
- // a new HWM means that our current broker selection is out of date
- Logger.Printf("producer/leader/%s/%d abandoning broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- pp.parent.unrefBrokerProducer(pp.leader, pp.output)
- pp.output = nil
-}
-
-func (pp *partitionProducer) flushRetryBuffers() {
- Logger.Printf("producer/leader/%s/%d state change to [flushing-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- for {
- pp.highWatermark--
-
- if pp.output == nil {
- if err := pp.updateLeader(); err != nil {
- pp.parent.returnErrors(pp.retryState[pp.highWatermark].buf, err)
- goto flushDone
- }
- Logger.Printf("producer/leader/%s/%d selected broker %d\n", pp.topic, pp.partition, pp.leader.ID())
- }
-
- for _, msg := range pp.retryState[pp.highWatermark].buf {
- pp.output <- msg
- }
-
- flushDone:
- pp.retryState[pp.highWatermark].buf = nil
- if pp.retryState[pp.highWatermark].expectChaser {
- Logger.Printf("producer/leader/%s/%d state change to [retrying-%d]\n", pp.topic, pp.partition, pp.highWatermark)
- break
- } else if pp.highWatermark == 0 {
- Logger.Printf("producer/leader/%s/%d state change to [normal]\n", pp.topic, pp.partition)
- break
- }
- }
-}
-
-func (pp *partitionProducer) updateLeader() error {
- return pp.breaker.Run(func() (err error) {
- if err = pp.parent.client.RefreshMetadata(pp.topic); err != nil {
- return err
- }
-
- if pp.leader, err = pp.parent.client.Leader(pp.topic, pp.partition); err != nil {
- return err
- }
-
- pp.output = pp.parent.getBrokerProducer(pp.leader)
- pp.parent.inFlight.Add(1) // we're generating a syn message; track it so we don't shut down while it's still inflight
- pp.output <- &ProducerMessage{Topic: pp.topic, Partition: pp.partition, flags: syn}
-
- return nil
- })
-}
-
-// one per broker; also constructs an associated flusher
-func (p *asyncProducer) newBrokerProducer(broker *Broker) chan<- *ProducerMessage {
- var (
- input = make(chan *ProducerMessage)
- bridge = make(chan *produceSet)
- responses = make(chan *brokerProducerResponse)
- )
-
- bp := &brokerProducer{
- parent: p,
- broker: broker,
- input: input,
- output: bridge,
- responses: responses,
- buffer: newProduceSet(p),
- currentRetries: make(map[string]map[int32]error),
- }
- go withRecover(bp.run)
-
- // minimal bridge to make the network response `select`able
- go withRecover(func() {
- for set := range bridge {
- request := set.buildRequest()
-
- response, err := broker.Produce(request)
-
- responses <- &brokerProducerResponse{
- set: set,
- err: err,
- res: response,
- }
- }
- close(responses)
- })
-
- return input
-}
-
-type brokerProducerResponse struct {
- set *produceSet
- err error
- res *ProduceResponse
-}
-
-// groups messages together into appropriately-sized batches for sending to the broker
-// handles state related to retries etc
-type brokerProducer struct {
- parent *asyncProducer
- broker *Broker
-
- input <-chan *ProducerMessage
- output chan<- *produceSet
- responses <-chan *brokerProducerResponse
-
- buffer *produceSet
- timer <-chan time.Time
- timerFired bool
-
- closing error
- currentRetries map[string]map[int32]error
-}
-
-func (bp *brokerProducer) run() {
- var output chan<- *produceSet
- Logger.Printf("producer/broker/%d starting up\n", bp.broker.ID())
-
- for {
- select {
- case msg := <-bp.input:
- if msg == nil {
- bp.shutdown()
- return
- }
-
- if msg.flags&syn == syn {
- Logger.Printf("producer/broker/%d state change to [open] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- if bp.currentRetries[msg.Topic] == nil {
- bp.currentRetries[msg.Topic] = make(map[int32]error)
- }
- bp.currentRetries[msg.Topic][msg.Partition] = nil
- bp.parent.inFlight.Done()
- continue
- }
-
- if reason := bp.needsRetry(msg); reason != nil {
- bp.parent.retryMessage(msg, reason)
-
- if bp.closing == nil && msg.flags&fin == fin {
- // we were retrying this partition but we can start processing again
- delete(bp.currentRetries[msg.Topic], msg.Partition)
- Logger.Printf("producer/broker/%d state change to [closed] on %s/%d\n",
- bp.broker.ID(), msg.Topic, msg.Partition)
- }
-
- continue
- }
-
- if bp.buffer.wouldOverflow(msg) {
- if err := bp.waitForSpace(msg); err != nil {
- bp.parent.retryMessage(msg, err)
- continue
- }
- }
-
- if err := bp.buffer.add(msg); err != nil {
- bp.parent.returnError(msg, err)
- continue
- }
-
- if bp.parent.conf.Producer.Flush.Frequency > 0 && bp.timer == nil {
- bp.timer = time.After(bp.parent.conf.Producer.Flush.Frequency)
- }
- case <-bp.timer:
- bp.timerFired = true
- case output <- bp.buffer:
- bp.rollOver()
- case response := <-bp.responses:
- bp.handleResponse(response)
- }
-
- if bp.timerFired || bp.buffer.readyToFlush() {
- output = bp.output
- } else {
- output = nil
- }
- }
-}
-
-func (bp *brokerProducer) shutdown() {
- for !bp.buffer.empty() {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- case bp.output <- bp.buffer:
- bp.rollOver()
- }
- }
- close(bp.output)
- for response := range bp.responses {
- bp.handleResponse(response)
- }
-
- Logger.Printf("producer/broker/%d shut down\n", bp.broker.ID())
-}
-
-func (bp *brokerProducer) needsRetry(msg *ProducerMessage) error {
- if bp.closing != nil {
- return bp.closing
- }
-
- return bp.currentRetries[msg.Topic][msg.Partition]
-}
-
-func (bp *brokerProducer) waitForSpace(msg *ProducerMessage) error {
- Logger.Printf("producer/broker/%d maximum request accumulated, waiting for space\n", bp.broker.ID())
-
- for {
- select {
- case response := <-bp.responses:
- bp.handleResponse(response)
- // handling a response can change our state, so re-check some things
- if reason := bp.needsRetry(msg); reason != nil {
- return reason
- } else if !bp.buffer.wouldOverflow(msg) {
- return nil
- }
- case bp.output <- bp.buffer:
- bp.rollOver()
- return nil
- }
- }
-}
-
-func (bp *brokerProducer) rollOver() {
- bp.timer = nil
- bp.timerFired = false
- bp.buffer = newProduceSet(bp.parent)
-}
-
-func (bp *brokerProducer) handleResponse(response *brokerProducerResponse) {
- if response.err != nil {
- bp.handleError(response.set, response.err)
- } else {
- bp.handleSuccess(response.set, response.res)
- }
-
- if bp.buffer.empty() {
- bp.rollOver() // this can happen if the response invalidated our buffer
- }
-}
-
-func (bp *brokerProducer) handleSuccess(sent *produceSet, response *ProduceResponse) {
- // we iterate through the blocks in the request set, not the response, so that we notice
- // if the response is missing a block completely
- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
- if response == nil {
- // this only happens when RequiredAcks is NoResponse, so we have to assume success
- bp.parent.returnSuccesses(msgs)
- return
- }
-
- block := response.GetBlock(topic, partition)
- if block == nil {
- bp.parent.returnErrors(msgs, ErrIncompleteResponse)
- return
- }
-
- switch block.Err {
- // Success
- case ErrNoError:
- if bp.parent.conf.Version.IsAtLeast(V0_10_0_0) && !block.Timestamp.IsZero() {
- for _, msg := range msgs {
- msg.Timestamp = block.Timestamp
- }
- }
- for i, msg := range msgs {
- msg.Offset = block.Offset + int64(i)
- }
- bp.parent.returnSuccesses(msgs)
- // Retriable errors
- case ErrInvalidMessage, ErrUnknownTopicOrPartition, ErrLeaderNotAvailable, ErrNotLeaderForPartition,
- ErrRequestTimedOut, ErrNotEnoughReplicas, ErrNotEnoughReplicasAfterAppend:
- Logger.Printf("producer/broker/%d state change to [retrying] on %s/%d because %v\n",
- bp.broker.ID(), topic, partition, block.Err)
- bp.currentRetries[topic][partition] = block.Err
- bp.parent.retryMessages(msgs, block.Err)
- bp.parent.retryMessages(bp.buffer.dropPartition(topic, partition), block.Err)
- // Other non-retriable errors
- default:
- bp.parent.returnErrors(msgs, block.Err)
- }
- })
-}
-
-func (bp *brokerProducer) handleError(sent *produceSet, err error) {
- switch err.(type) {
- case PacketEncodingError:
- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
- bp.parent.returnErrors(msgs, err)
- })
- default:
- Logger.Printf("producer/broker/%d state change to [closing] because %s\n", bp.broker.ID(), err)
- bp.parent.abandonBrokerConnection(bp.broker)
- _ = bp.broker.Close()
- bp.closing = err
- sent.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
- bp.parent.retryMessages(msgs, err)
- })
- bp.buffer.eachPartition(func(topic string, partition int32, msgs []*ProducerMessage) {
- bp.parent.retryMessages(msgs, err)
- })
- bp.rollOver()
- }
-}
-
-// singleton
-// effectively a "bridge" between the flushers and the dispatcher in order to avoid deadlock
-// based on https://godoc.org/github.com/eapache/channels#InfiniteChannel
-func (p *asyncProducer) retryHandler() {
- var msg *ProducerMessage
- buf := queue.New()
-
- for {
- if buf.Length() == 0 {
- msg = <-p.retries
- } else {
- select {
- case msg = <-p.retries:
- case p.input <- buf.Peek().(*ProducerMessage):
- buf.Remove()
- continue
- }
- }
-
- if msg == nil {
- return
- }
-
- buf.Add(msg)
- }
-}
-
-// utility functions
-
-func (p *asyncProducer) shutdown() {
- Logger.Println("Producer shutting down.")
- p.inFlight.Add(1)
- p.input <- &ProducerMessage{flags: shutdown}
-
- p.inFlight.Wait()
-
- if p.ownClient {
- err := p.client.Close()
- if err != nil {
- Logger.Println("producer/shutdown failed to close the embedded client:", err)
- }
- }
-
- close(p.input)
- close(p.retries)
- close(p.errors)
- close(p.successes)
-}
-
-func (p *asyncProducer) returnError(msg *ProducerMessage, err error) {
- msg.clear()
- pErr := &ProducerError{Msg: msg, Err: err}
- if p.conf.Producer.Return.Errors {
- p.errors <- pErr
- } else {
- Logger.Println(pErr)
- }
- p.inFlight.Done()
-}
-
-func (p *asyncProducer) returnErrors(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.returnError(msg, err)
- }
-}
-
-func (p *asyncProducer) returnSuccesses(batch []*ProducerMessage) {
- for _, msg := range batch {
- if p.conf.Producer.Return.Successes {
- msg.clear()
- p.successes <- msg
- }
- p.inFlight.Done()
- }
-}
-
-func (p *asyncProducer) retryMessage(msg *ProducerMessage, err error) {
- if msg.retries >= p.conf.Producer.Retry.Max {
- p.returnError(msg, err)
- } else {
- msg.retries++
- p.retries <- msg
- }
-}
-
-func (p *asyncProducer) retryMessages(batch []*ProducerMessage, err error) {
- for _, msg := range batch {
- p.retryMessage(msg, err)
- }
-}
-
-func (p *asyncProducer) getBrokerProducer(broker *Broker) chan<- *ProducerMessage {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- bp := p.brokers[broker]
-
- if bp == nil {
- bp = p.newBrokerProducer(broker)
- p.brokers[broker] = bp
- p.brokerRefs[bp] = 0
- }
-
- p.brokerRefs[bp]++
-
- return bp
-}
-
-func (p *asyncProducer) unrefBrokerProducer(broker *Broker, bp chan<- *ProducerMessage) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- p.brokerRefs[bp]--
- if p.brokerRefs[bp] == 0 {
- close(bp)
- delete(p.brokerRefs, bp)
-
- if p.brokers[broker] == bp {
- delete(p.brokers, broker)
- }
- }
-}
-
-func (p *asyncProducer) abandonBrokerConnection(broker *Broker) {
- p.brokerLock.Lock()
- defer p.brokerLock.Unlock()
-
- delete(p.brokers, broker)
-}
diff --git a/vendor/github.com/Shopify/sarama/broker.go b/vendor/github.com/Shopify/sarama/broker.go
deleted file mode 100644
index 89beecc8648..00000000000
--- a/vendor/github.com/Shopify/sarama/broker.go
+++ /dev/null
@@ -1,674 +0,0 @@
-package sarama
-
-import (
- "crypto/tls"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Broker represents a single Kafka broker connection. All operations on this object are entirely concurrency-safe.
-type Broker struct {
- id int32
- addr string
-
- conf *Config
- correlationID int32
- conn net.Conn
- connErr error
- lock sync.Mutex
- opened int32
-
- responses chan responsePromise
- done chan bool
-
- incomingByteRate metrics.Meter
- requestRate metrics.Meter
- requestSize metrics.Histogram
- requestLatency metrics.Histogram
- outgoingByteRate metrics.Meter
- responseRate metrics.Meter
- responseSize metrics.Histogram
- brokerIncomingByteRate metrics.Meter
- brokerRequestRate metrics.Meter
- brokerRequestSize metrics.Histogram
- brokerRequestLatency metrics.Histogram
- brokerOutgoingByteRate metrics.Meter
- brokerResponseRate metrics.Meter
- brokerResponseSize metrics.Histogram
-}
-
-type responsePromise struct {
- requestTime time.Time
- correlationID int32
- packets chan []byte
- errors chan error
-}
-
-// NewBroker creates and returns a Broker targetting the given host:port address.
-// This does not attempt to actually connect, you have to call Open() for that.
-func NewBroker(addr string) *Broker {
- return &Broker{id: -1, addr: addr}
-}
-
-// Open tries to connect to the Broker if it is not already connected or connecting, but does not block
-// waiting for the connection to complete. This means that any subsequent operations on the broker will
-// block waiting for the connection to succeed or fail. To get the effect of a fully synchronous Open call,
-// follow it by a call to Connected(). The only errors Open will return directly are ConfigurationError or
-// AlreadyConnected. If conf is nil, the result of NewConfig() is used.
-func (b *Broker) Open(conf *Config) error {
- if !atomic.CompareAndSwapInt32(&b.opened, 0, 1) {
- return ErrAlreadyConnected
- }
-
- if conf == nil {
- conf = NewConfig()
- }
-
- err := conf.Validate()
- if err != nil {
- return err
- }
-
- b.lock.Lock()
-
- go withRecover(func() {
- defer b.lock.Unlock()
-
- dialer := net.Dialer{
- Timeout: conf.Net.DialTimeout,
- KeepAlive: conf.Net.KeepAlive,
- }
-
- if conf.Net.TLS.Enable {
- b.conn, b.connErr = tls.DialWithDialer(&dialer, "tcp", b.addr, conf.Net.TLS.Config)
- } else {
- b.conn, b.connErr = dialer.Dial("tcp", b.addr)
- }
- if b.connErr != nil {
- Logger.Printf("Failed to connect to broker %s: %s\n", b.addr, b.connErr)
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- b.conn = newBufConn(b.conn)
-
- b.conf = conf
-
- // Create or reuse the global metrics shared between brokers
- b.incomingByteRate = metrics.GetOrRegisterMeter("incoming-byte-rate", conf.MetricRegistry)
- b.requestRate = metrics.GetOrRegisterMeter("request-rate", conf.MetricRegistry)
- b.requestSize = getOrRegisterHistogram("request-size", conf.MetricRegistry)
- b.requestLatency = getOrRegisterHistogram("request-latency-in-ms", conf.MetricRegistry)
- b.outgoingByteRate = metrics.GetOrRegisterMeter("outgoing-byte-rate", conf.MetricRegistry)
- b.responseRate = metrics.GetOrRegisterMeter("response-rate", conf.MetricRegistry)
- b.responseSize = getOrRegisterHistogram("response-size", conf.MetricRegistry)
- // Do not gather metrics for seeded broker (only used during bootstrap) because they share
- // the same id (-1) and are already exposed through the global metrics above
- if b.id >= 0 {
- b.brokerIncomingByteRate = getOrRegisterBrokerMeter("incoming-byte-rate", b, conf.MetricRegistry)
- b.brokerRequestRate = getOrRegisterBrokerMeter("request-rate", b, conf.MetricRegistry)
- b.brokerRequestSize = getOrRegisterBrokerHistogram("request-size", b, conf.MetricRegistry)
- b.brokerRequestLatency = getOrRegisterBrokerHistogram("request-latency-in-ms", b, conf.MetricRegistry)
- b.brokerOutgoingByteRate = getOrRegisterBrokerMeter("outgoing-byte-rate", b, conf.MetricRegistry)
- b.brokerResponseRate = getOrRegisterBrokerMeter("response-rate", b, conf.MetricRegistry)
- b.brokerResponseSize = getOrRegisterBrokerHistogram("response-size", b, conf.MetricRegistry)
- }
-
- if conf.Net.SASL.Enable {
- b.connErr = b.sendAndReceiveSASLPlainAuth()
- if b.connErr != nil {
- err = b.conn.Close()
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
- b.conn = nil
- atomic.StoreInt32(&b.opened, 0)
- return
- }
- }
-
- b.done = make(chan bool)
- b.responses = make(chan responsePromise, b.conf.Net.MaxOpenRequests-1)
-
- if b.id >= 0 {
- Logger.Printf("Connected to broker at %s (registered as #%d)\n", b.addr, b.id)
- } else {
- Logger.Printf("Connected to broker at %s (unregistered)\n", b.addr)
- }
- go withRecover(b.responseReceiver)
- })
-
- return nil
-}
-
-// Connected returns true if the broker is connected and false otherwise. If the broker is not
-// connected but it had tried to connect, the error from that connection attempt is also returned.
-func (b *Broker) Connected() (bool, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- return b.conn != nil, b.connErr
-}
-
-func (b *Broker) Close() error {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- return ErrNotConnected
- }
-
- close(b.responses)
- <-b.done
-
- err := b.conn.Close()
-
- b.conn = nil
- b.connErr = nil
- b.done = nil
- b.responses = nil
-
- if err == nil {
- Logger.Printf("Closed connection to broker %s\n", b.addr)
- } else {
- Logger.Printf("Error while closing connection to broker %s: %s\n", b.addr, err)
- }
-
- atomic.StoreInt32(&b.opened, 0)
-
- return err
-}
-
-// ID returns the broker ID retrieved from Kafka's metadata, or -1 if that is not known.
-func (b *Broker) ID() int32 {
- return b.id
-}
-
-// Addr returns the broker address as either retrieved from Kafka's metadata or passed to NewBroker.
-func (b *Broker) Addr() string {
- return b.addr
-}
-
-func (b *Broker) GetMetadata(request *MetadataRequest) (*MetadataResponse, error) {
- response := new(MetadataResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) GetConsumerMetadata(request *ConsumerMetadataRequest) (*ConsumerMetadataResponse, error) {
- response := new(ConsumerMetadataResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) GetAvailableOffsets(request *OffsetRequest) (*OffsetResponse, error) {
- response := new(OffsetResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) Produce(request *ProduceRequest) (*ProduceResponse, error) {
- var response *ProduceResponse
- var err error
-
- if request.RequiredAcks == NoResponse {
- err = b.sendAndReceive(request, nil)
- } else {
- response = new(ProduceResponse)
- err = b.sendAndReceive(request, response)
- }
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) Fetch(request *FetchRequest) (*FetchResponse, error) {
- response := new(FetchResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) CommitOffset(request *OffsetCommitRequest) (*OffsetCommitResponse, error) {
- response := new(OffsetCommitResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) FetchOffset(request *OffsetFetchRequest) (*OffsetFetchResponse, error) {
- response := new(OffsetFetchResponse)
-
- err := b.sendAndReceive(request, response)
-
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) JoinGroup(request *JoinGroupRequest) (*JoinGroupResponse, error) {
- response := new(JoinGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) SyncGroup(request *SyncGroupRequest) (*SyncGroupResponse, error) {
- response := new(SyncGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) LeaveGroup(request *LeaveGroupRequest) (*LeaveGroupResponse, error) {
- response := new(LeaveGroupResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) Heartbeat(request *HeartbeatRequest) (*HeartbeatResponse, error) {
- response := new(HeartbeatResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) ListGroups(request *ListGroupsRequest) (*ListGroupsResponse, error) {
- response := new(ListGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) DescribeGroups(request *DescribeGroupsRequest) (*DescribeGroupsResponse, error) {
- response := new(DescribeGroupsResponse)
-
- err := b.sendAndReceive(request, response)
- if err != nil {
- return nil, err
- }
-
- return response, nil
-}
-
-func (b *Broker) send(rb protocolBody, promiseResponse bool) (*responsePromise, error) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if b.conn == nil {
- if b.connErr != nil {
- return nil, b.connErr
- }
- return nil, ErrNotConnected
- }
-
- if !b.conf.Version.IsAtLeast(rb.requiredVersion()) {
- return nil, ErrUnsupportedVersion
- }
-
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return nil, err
- }
-
- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- return nil, err
- }
-
- requestTime := time.Now()
- bytes, err := b.conn.Write(buf)
- b.updateOutgoingCommunicationMetrics(bytes)
- if err != nil {
- return nil, err
- }
- b.correlationID++
-
- if !promiseResponse {
- // Record request latency without the response
- b.updateRequestLatencyMetrics(time.Since(requestTime))
- return nil, nil
- }
-
- promise := responsePromise{requestTime, req.correlationID, make(chan []byte), make(chan error)}
- b.responses <- promise
-
- return &promise, nil
-}
-
-func (b *Broker) sendAndReceive(req protocolBody, res versionedDecoder) error {
- promise, err := b.send(req, res != nil)
-
- if err != nil {
- return err
- }
-
- if promise == nil {
- return nil
- }
-
- select {
- case buf := <-promise.packets:
- return versionedDecode(buf, res, req.version())
- case err = <-promise.errors:
- return err
- }
-}
-
-func (b *Broker) decode(pd packetDecoder) (err error) {
- b.id, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- host, err := pd.getString()
- if err != nil {
- return err
- }
-
- port, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- b.addr = net.JoinHostPort(host, fmt.Sprint(port))
- if _, _, err := net.SplitHostPort(b.addr); err != nil {
- return err
- }
-
- return nil
-}
-
-func (b *Broker) encode(pe packetEncoder) (err error) {
-
- host, portstr, err := net.SplitHostPort(b.addr)
- if err != nil {
- return err
- }
- port, err := strconv.Atoi(portstr)
- if err != nil {
- return err
- }
-
- pe.putInt32(b.id)
-
- err = pe.putString(host)
- if err != nil {
- return err
- }
-
- pe.putInt32(int32(port))
-
- return nil
-}
-
-func (b *Broker) responseReceiver() {
- var dead error
- header := make([]byte, 8)
- for response := range b.responses {
- if dead != nil {
- response.errors <- dead
- continue
- }
-
- err := b.conn.SetReadDeadline(time.Now().Add(b.conf.Net.ReadTimeout))
- if err != nil {
- dead = err
- response.errors <- err
- continue
- }
-
- bytesReadHeader, err := io.ReadFull(b.conn, header)
- requestLatency := time.Since(response.requestTime)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
-
- decodedHeader := responseHeader{}
- err = decode(header, &decodedHeader)
- if err != nil {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- dead = err
- response.errors <- err
- continue
- }
- if decodedHeader.correlationID != response.correlationID {
- b.updateIncomingCommunicationMetrics(bytesReadHeader, requestLatency)
- // TODO if decoded ID < cur ID, discard until we catch up
- // TODO if decoded ID > cur ID, save it so when cur ID catches up we have a response
- dead = PacketDecodingError{fmt.Sprintf("correlation ID didn't match, wanted %d, got %d", response.correlationID, decodedHeader.correlationID)}
- response.errors <- dead
- continue
- }
-
- buf := make([]byte, decodedHeader.length-4)
- bytesReadBody, err := io.ReadFull(b.conn, buf)
- b.updateIncomingCommunicationMetrics(bytesReadHeader+bytesReadBody, requestLatency)
- if err != nil {
- dead = err
- response.errors <- err
- continue
- }
-
- response.packets <- buf
- }
- close(b.done)
-}
-
-func (b *Broker) sendAndReceiveSASLPlainHandshake() error {
- rb := &SaslHandshakeRequest{"PLAIN"}
- req := &request{correlationID: b.correlationID, clientID: b.conf.ClientID, body: rb}
- buf, err := encode(req, b.conf.MetricRegistry)
- if err != nil {
- return err
- }
-
- err = b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- return err
- }
-
- requestTime := time.Now()
- bytes, err := b.conn.Write(buf)
- b.updateOutgoingCommunicationMetrics(bytes)
- if err != nil {
- Logger.Printf("Failed to send SASL handshake %s: %s\n", b.addr, err.Error())
- return err
- }
- b.correlationID++
- //wait for the response
- header := make([]byte, 8) // response header
- _, err = io.ReadFull(b.conn, header)
- if err != nil {
- Logger.Printf("Failed to read SASL handshake header : %s\n", err.Error())
- return err
- }
- length := binary.BigEndian.Uint32(header[:4])
- payload := make([]byte, length-4)
- n, err := io.ReadFull(b.conn, payload)
- if err != nil {
- Logger.Printf("Failed to read SASL handshake payload : %s\n", err.Error())
- return err
- }
- b.updateIncomingCommunicationMetrics(n+8, time.Since(requestTime))
- res := &SaslHandshakeResponse{}
- err = versionedDecode(payload, res, 0)
- if err != nil {
- Logger.Printf("Failed to parse SASL handshake : %s\n", err.Error())
- return err
- }
- if res.Err != ErrNoError {
- Logger.Printf("Invalid SASL Mechanism : %s\n", res.Err.Error())
- return res.Err
- }
- Logger.Print("Successful SASL handshake")
- return nil
-}
-
-// Kafka 0.10.0 plans to support SASL Plain and Kerberos as per PR #812 (KIP-43)/(JIRA KAFKA-3149)
-// Some hosted kafka services such as IBM Message Hub already offer SASL/PLAIN auth with Kafka 0.9
-//
-// In SASL Plain, Kafka expects the auth header to be in the following format
-// Message format (from https://tools.ietf.org/html/rfc4616):
-//
-// message = [authzid] UTF8NUL authcid UTF8NUL passwd
-// authcid = 1*SAFE ; MUST accept up to 255 octets
-// authzid = 1*SAFE ; MUST accept up to 255 octets
-// passwd = 1*SAFE ; MUST accept up to 255 octets
-// UTF8NUL = %x00 ; UTF-8 encoded NUL character
-//
-// SAFE = UTF1 / UTF2 / UTF3 / UTF4
-// ;; any UTF-8 encoded Unicode character except NUL
-//
-// When credentials are valid, Kafka returns a 4 byte array of null characters.
-// When credentials are invalid, Kafka closes the connection. This does not seem to be the ideal way
-// of responding to bad credentials but thats how its being done today.
-func (b *Broker) sendAndReceiveSASLPlainAuth() error {
- if b.conf.Net.SASL.Handshake {
- handshakeErr := b.sendAndReceiveSASLPlainHandshake()
- if handshakeErr != nil {
- Logger.Printf("Error while performing SASL handshake %s\n", b.addr)
- return handshakeErr
- }
- }
- length := 1 + len(b.conf.Net.SASL.User) + 1 + len(b.conf.Net.SASL.Password)
- authBytes := make([]byte, length+4) //4 byte length header + auth data
- binary.BigEndian.PutUint32(authBytes, uint32(length))
- copy(authBytes[4:], []byte("\x00"+b.conf.Net.SASL.User+"\x00"+b.conf.Net.SASL.Password))
-
- err := b.conn.SetWriteDeadline(time.Now().Add(b.conf.Net.WriteTimeout))
- if err != nil {
- Logger.Printf("Failed to set write deadline when doing SASL auth with broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- requestTime := time.Now()
- bytesWritten, err := b.conn.Write(authBytes)
- b.updateOutgoingCommunicationMetrics(bytesWritten)
- if err != nil {
- Logger.Printf("Failed to write SASL auth header to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- header := make([]byte, 4)
- n, err := io.ReadFull(b.conn, header)
- b.updateIncomingCommunicationMetrics(n, time.Since(requestTime))
- // If the credentials are valid, we would get a 4 byte response filled with null characters.
- // Otherwise, the broker closes the connection and we get an EOF
- if err != nil {
- Logger.Printf("Failed to read response while authenticating with SASL to broker %s: %s\n", b.addr, err.Error())
- return err
- }
-
- Logger.Printf("SASL authentication successful with broker %s:%v - %v\n", b.addr, n, header)
- return nil
-}
-
-func (b *Broker) updateIncomingCommunicationMetrics(bytes int, requestLatency time.Duration) {
- b.updateRequestLatencyMetrics(requestLatency)
- b.responseRate.Mark(1)
- if b.brokerResponseRate != nil {
- b.brokerResponseRate.Mark(1)
- }
- responseSize := int64(bytes)
- b.incomingByteRate.Mark(responseSize)
- if b.brokerIncomingByteRate != nil {
- b.brokerIncomingByteRate.Mark(responseSize)
- }
- b.responseSize.Update(responseSize)
- if b.brokerResponseSize != nil {
- b.brokerResponseSize.Update(responseSize)
- }
-}
-
-func (b *Broker) updateRequestLatencyMetrics(requestLatency time.Duration) {
- requestLatencyInMs := int64(requestLatency / time.Millisecond)
- b.requestLatency.Update(requestLatencyInMs)
- if b.brokerRequestLatency != nil {
- b.brokerRequestLatency.Update(requestLatencyInMs)
- }
-}
-
-func (b *Broker) updateOutgoingCommunicationMetrics(bytes int) {
- b.requestRate.Mark(1)
- if b.brokerRequestRate != nil {
- b.brokerRequestRate.Mark(1)
- }
- requestSize := int64(bytes)
- b.outgoingByteRate.Mark(requestSize)
- if b.brokerOutgoingByteRate != nil {
- b.brokerOutgoingByteRate.Mark(requestSize)
- }
- b.requestSize.Update(requestSize)
- if b.brokerRequestSize != nil {
- b.brokerRequestSize.Update(requestSize)
- }
-}
diff --git a/vendor/github.com/Shopify/sarama/client.go b/vendor/github.com/Shopify/sarama/client.go
deleted file mode 100644
index f869a1434f7..00000000000
--- a/vendor/github.com/Shopify/sarama/client.go
+++ /dev/null
@@ -1,736 +0,0 @@
-package sarama
-
-import (
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-// Client is a generic Kafka client. It manages connections to one or more Kafka brokers.
-// You MUST call Close() on a client to avoid leaks, it will not be garbage-collected
-// automatically when it passes out of scope. It is safe to share a client amongst many
-// users, however Kafka will process requests from a single client strictly in serial,
-// so it is generally more efficient to use the default one client per producer/consumer.
-type Client interface {
- // Config returns the Config struct of the client. This struct should not be
- // altered after it has been created.
- Config() *Config
-
- // Topics returns the set of available topics as retrieved from cluster metadata.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- Partitions(topic string) ([]int32, error)
-
- // WritablePartitions returns the sorted list of all writable partition IDs for
- // the given topic, where "writable" means "having a valid leader accepting
- // writes".
- WritablePartitions(topic string) ([]int32, error)
-
- // Leader returns the broker object that is the leader of the current
- // topic/partition, as determined by querying the cluster metadata.
- Leader(topic string, partitionID int32) (*Broker, error)
-
- // Replicas returns the set of all replica IDs for the given partition.
- Replicas(topic string, partitionID int32) ([]int32, error)
-
- // RefreshMetadata takes a list of topics and queries the cluster to refresh the
- // available metadata for those topics. If no topics are provided, it will refresh
- // metadata for all topics.
- RefreshMetadata(topics ...string) error
-
- // GetOffset queries the cluster to get the most recent available offset at the
- // given time on the topic/partition combination. Time should be OffsetOldest for
- // the earliest available offset, OffsetNewest for the offset of the message that
- // will be produced next, or a time.
- GetOffset(topic string, partitionID int32, time int64) (int64, error)
-
- // Coordinator returns the coordinating broker for a consumer group. It will
- // return a locally cached value if it's available. You can call
- // RefreshCoordinator to update the cached value. This function only works on
- // Kafka 0.8.2 and higher.
- Coordinator(consumerGroup string) (*Broker, error)
-
- // RefreshCoordinator retrieves the coordinator for a consumer group and stores it
- // in local cache. This function only works on Kafka 0.8.2 and higher.
- RefreshCoordinator(consumerGroup string) error
-
- // Close shuts down all broker connections managed by this client. It is required
- // to call this function before a client object passes out of scope, as it will
- // otherwise leak memory. You must close any Producers or Consumers using a client
- // before you close the client.
- Close() error
-
- // Closed returns true if the client has already had Close called on it
- Closed() bool
-}
-
-const (
- // OffsetNewest stands for the log head offset, i.e. the offset that will be
- // assigned to the next message that will be produced to the partition. You
- // can send this to a client's GetOffset method to get this offset, or when
- // calling ConsumePartition to start consuming new messages.
- OffsetNewest int64 = -1
- // OffsetOldest stands for the oldest offset available on the broker for a
- // partition. You can send this to a client's GetOffset method to get this
- // offset, or when calling ConsumePartition to start consuming from the
- // oldest offset that is still available on the broker.
- OffsetOldest int64 = -2
-)
-
-type client struct {
- conf *Config
- closer, closed chan none // for shutting down background metadata updater
-
- // the broker addresses given to us through the constructor are not guaranteed to be returned in
- // the cluster metadata (I *think* it only returns brokers who are currently leading partitions?)
- // so we store them separately
- seedBrokers []*Broker
- deadSeeds []*Broker
-
- brokers map[int32]*Broker // maps broker ids to brokers
- metadata map[string]map[int32]*PartitionMetadata // maps topics to partition ids to metadata
- coordinators map[string]int32 // Maps consumer group names to coordinating broker IDs
-
- // If the number of partitions is large, we can get some churn calling cachedPartitions,
- // so the result is cached. It is important to update this value whenever metadata is changed
- cachedPartitionsResults map[string][maxPartitionIndex][]int32
-
- lock sync.RWMutex // protects access to the maps that hold cluster state.
-}
-
-// NewClient creates a new Client. It connects to one of the given broker addresses
-// and uses that broker to automatically fetch metadata on the rest of the kafka cluster. If metadata cannot
-// be retrieved from any of the given broker addresses, the client is not created.
-func NewClient(addrs []string, conf *Config) (Client, error) {
- Logger.Println("Initializing new client")
-
- if conf == nil {
- conf = NewConfig()
- }
-
- if err := conf.Validate(); err != nil {
- return nil, err
- }
-
- if len(addrs) < 1 {
- return nil, ConfigurationError("You must provide at least one broker address")
- }
-
- client := &client{
- conf: conf,
- closer: make(chan none),
- closed: make(chan none),
- brokers: make(map[int32]*Broker),
- metadata: make(map[string]map[int32]*PartitionMetadata),
- cachedPartitionsResults: make(map[string][maxPartitionIndex][]int32),
- coordinators: make(map[string]int32),
- }
-
- random := rand.New(rand.NewSource(time.Now().UnixNano()))
- for _, index := range random.Perm(len(addrs)) {
- client.seedBrokers = append(client.seedBrokers, NewBroker(addrs[index]))
- }
-
- // do an initial fetch of all cluster metadata by specifing an empty list of topics
- err := client.RefreshMetadata()
- switch err {
- case nil:
- break
- case ErrLeaderNotAvailable, ErrReplicaNotAvailable, ErrTopicAuthorizationFailed, ErrClusterAuthorizationFailed:
- // indicates that maybe part of the cluster is down, but is not fatal to creating the client
- Logger.Println(err)
- default:
- close(client.closed) // we haven't started the background updater yet, so we have to do this manually
- _ = client.Close()
- return nil, err
- }
- go withRecover(client.backgroundMetadataUpdater)
-
- Logger.Println("Successfully initialized new client")
-
- return client, nil
-}
-
-func (client *client) Config() *Config {
- return client.conf
-}
-
-func (client *client) Close() error {
- if client.Closed() {
- // Chances are this is being called from a defer() and the error will go unobserved
- // so we go ahead and log the event in this case.
- Logger.Printf("Close() called on already closed client")
- return ErrClosedClient
- }
-
- // shutdown and wait for the background thread before we take the lock, to avoid races
- close(client.closer)
- <-client.closed
-
- client.lock.Lock()
- defer client.lock.Unlock()
- Logger.Println("Closing Client")
-
- for _, broker := range client.brokers {
- safeAsyncClose(broker)
- }
-
- for _, broker := range client.seedBrokers {
- safeAsyncClose(broker)
- }
-
- client.brokers = nil
- client.metadata = nil
-
- return nil
-}
-
-func (client *client) Closed() bool {
- return client.brokers == nil
-}
-
-func (client *client) Topics() ([]string, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- ret := make([]string, 0, len(client.metadata))
- for topic := range client.metadata {
- ret = append(ret, topic)
- }
-
- return ret, nil
-}
-
-func (client *client) Partitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, allPartitions)
-
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, allPartitions)
- }
-
- if partitions == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) WritablePartitions(topic string) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- partitions := client.cachedPartitions(topic, writablePartitions)
-
- // len==0 catches when it's nil (no such topic) and the odd case when every single
- // partition is undergoing leader election simultaneously. Callers have to be able to handle
- // this function returning an empty slice (which is a valid return value) but catching it
- // here the first time (note we *don't* catch it below where we return ErrUnknownTopicOrPartition) triggers
- // a metadata refresh as a nicety so callers can just try again and don't have to manually
- // trigger a refresh (otherwise they'd just keep getting a stale cached copy).
- if len(partitions) == 0 {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- partitions = client.cachedPartitions(topic, writablePartitions)
- }
-
- if partitions == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- return partitions, nil
-}
-
-func (client *client) Replicas(topic string, partitionID int32) ([]int32, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- metadata := client.cachedMetadata(topic, partitionID)
-
- if metadata == nil {
- err := client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- metadata = client.cachedMetadata(topic, partitionID)
- }
-
- if metadata == nil {
- return nil, ErrUnknownTopicOrPartition
- }
-
- if metadata.Err == ErrReplicaNotAvailable {
- return nil, metadata.Err
- }
- return dupeAndSort(metadata.Replicas), nil
-}
-
-func (client *client) Leader(topic string, partitionID int32) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- leader, err := client.cachedLeader(topic, partitionID)
-
- if leader == nil {
- err = client.RefreshMetadata(topic)
- if err != nil {
- return nil, err
- }
- leader, err = client.cachedLeader(topic, partitionID)
- }
-
- return leader, err
-}
-
-func (client *client) RefreshMetadata(topics ...string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- // Prior to 0.8.2, Kafka will throw exceptions on an empty topic and not return a proper
- // error. This handles the case by returning an error instead of sending it
- // off to Kafka. See: https://github.com/Shopify/sarama/pull/38#issuecomment-26362310
- for _, topic := range topics {
- if len(topic) == 0 {
- return ErrInvalidTopic // this is the error that 0.8.2 and later correctly return
- }
- }
-
- return client.tryRefreshMetadata(topics, client.conf.Metadata.Retry.Max)
-}
-
-func (client *client) GetOffset(topic string, partitionID int32, time int64) (int64, error) {
- if client.Closed() {
- return -1, ErrClosedClient
- }
-
- offset, err := client.getOffset(topic, partitionID, time)
-
- if err != nil {
- if err := client.RefreshMetadata(topic); err != nil {
- return -1, err
- }
- return client.getOffset(topic, partitionID, time)
- }
-
- return offset, err
-}
-
-func (client *client) Coordinator(consumerGroup string) (*Broker, error) {
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- coordinator := client.cachedCoordinator(consumerGroup)
-
- if coordinator == nil {
- if err := client.RefreshCoordinator(consumerGroup); err != nil {
- return nil, err
- }
- coordinator = client.cachedCoordinator(consumerGroup)
- }
-
- if coordinator == nil {
- return nil, ErrConsumerCoordinatorNotAvailable
- }
-
- _ = coordinator.Open(client.conf)
- return coordinator, nil
-}
-
-func (client *client) RefreshCoordinator(consumerGroup string) error {
- if client.Closed() {
- return ErrClosedClient
- }
-
- response, err := client.getConsumerMetadata(consumerGroup, client.conf.Metadata.Retry.Max)
- if err != nil {
- return err
- }
-
- client.lock.Lock()
- defer client.lock.Unlock()
- client.registerBroker(response.Coordinator)
- client.coordinators[consumerGroup] = response.Coordinator.ID()
- return nil
-}
-
-// private broker management helpers
-
-// registerBroker makes sure a broker received by a Metadata or Coordinator request is registered
-// in the brokers map. It returns the broker that is registered, which may be the provided broker,
-// or a previously registered Broker instance. You must hold the write lock before calling this function.
-func (client *client) registerBroker(broker *Broker) {
- if client.brokers[broker.ID()] == nil {
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers registered new broker #%d at %s", broker.ID(), broker.Addr())
- } else if broker.Addr() != client.brokers[broker.ID()].Addr() {
- safeAsyncClose(client.brokers[broker.ID()])
- client.brokers[broker.ID()] = broker
- Logger.Printf("client/brokers replaced registered broker #%d with %s", broker.ID(), broker.Addr())
- }
-}
-
-// deregisterBroker removes a broker from the seedsBroker list, and if it's
-// not the seedbroker, removes it from brokers map completely.
-func (client *client) deregisterBroker(broker *Broker) {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- if len(client.seedBrokers) > 0 && broker == client.seedBrokers[0] {
- client.deadSeeds = append(client.deadSeeds, broker)
- client.seedBrokers = client.seedBrokers[1:]
- } else {
- // we do this so that our loop in `tryRefreshMetadata` doesn't go on forever,
- // but we really shouldn't have to; once that loop is made better this case can be
- // removed, and the function generally can be renamed from `deregisterBroker` to
- // `nextSeedBroker` or something
- Logger.Printf("client/brokers deregistered broker #%d at %s", broker.ID(), broker.Addr())
- delete(client.brokers, broker.ID())
- }
-}
-
-func (client *client) resurrectDeadBrokers() {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- Logger.Printf("client/brokers resurrecting %d dead seed brokers", len(client.deadSeeds))
- client.seedBrokers = append(client.seedBrokers, client.deadSeeds...)
- client.deadSeeds = nil
-}
-
-func (client *client) any() *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- if len(client.seedBrokers) > 0 {
- _ = client.seedBrokers[0].Open(client.conf)
- return client.seedBrokers[0]
- }
-
- // not guaranteed to be random *or* deterministic
- for _, broker := range client.brokers {
- _ = broker.Open(client.conf)
- return broker
- }
-
- return nil
-}
-
-// private caching/lazy metadata helpers
-
-type partitionType int
-
-const (
- allPartitions partitionType = iota
- writablePartitions
- // If you add any more types, update the partition cache in update()
-
- // Ensure this is the last partition type value
- maxPartitionIndex
-)
-
-func (client *client) cachedMetadata(topic string, partitionID int32) *PartitionMetadata {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- return partitions[partitionID]
- }
-
- return nil
-}
-
-func (client *client) cachedPartitions(topic string, partitionSet partitionType) []int32 {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions, exists := client.cachedPartitionsResults[topic]
-
- if !exists {
- return nil
- }
- return partitions[partitionSet]
-}
-
-func (client *client) setPartitionCache(topic string, partitionSet partitionType) []int32 {
- partitions := client.metadata[topic]
-
- if partitions == nil {
- return nil
- }
-
- ret := make([]int32, 0, len(partitions))
- for _, partition := range partitions {
- if partitionSet == writablePartitions && partition.Err == ErrLeaderNotAvailable {
- continue
- }
- ret = append(ret, partition.ID)
- }
-
- sort.Sort(int32Slice(ret))
- return ret
-}
-
-func (client *client) cachedLeader(topic string, partitionID int32) (*Broker, error) {
- client.lock.RLock()
- defer client.lock.RUnlock()
-
- partitions := client.metadata[topic]
- if partitions != nil {
- metadata, ok := partitions[partitionID]
- if ok {
- if metadata.Err == ErrLeaderNotAvailable {
- return nil, ErrLeaderNotAvailable
- }
- b := client.brokers[metadata.Leader]
- if b == nil {
- return nil, ErrLeaderNotAvailable
- }
- _ = b.Open(client.conf)
- return b, nil
- }
- }
-
- return nil, ErrUnknownTopicOrPartition
-}
-
-func (client *client) getOffset(topic string, partitionID int32, time int64) (int64, error) {
- broker, err := client.Leader(topic, partitionID)
- if err != nil {
- return -1, err
- }
-
- request := &OffsetRequest{}
- if client.conf.Version.IsAtLeast(V0_10_1_0) {
- request.Version = 1
- }
- request.AddBlock(topic, partitionID, time, 1)
-
- response, err := broker.GetAvailableOffsets(request)
- if err != nil {
- _ = broker.Close()
- return -1, err
- }
-
- block := response.GetBlock(topic, partitionID)
- if block == nil {
- _ = broker.Close()
- return -1, ErrIncompleteResponse
- }
- if block.Err != ErrNoError {
- return -1, block.Err
- }
- if len(block.Offsets) != 1 {
- return -1, ErrOffsetOutOfRange
- }
-
- return block.Offsets[0], nil
-}
-
-// core metadata update logic
-
-func (client *client) backgroundMetadataUpdater() {
- defer close(client.closed)
-
- if client.conf.Metadata.RefreshFrequency == time.Duration(0) {
- return
- }
-
- ticker := time.NewTicker(client.conf.Metadata.RefreshFrequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- if err := client.RefreshMetadata(); err != nil {
- Logger.Println("Client background metadata update:", err)
- }
- case <-client.closer:
- return
- }
- }
-}
-
-func (client *client) tryRefreshMetadata(topics []string, attemptsRemaining int) error {
- retry := func(err error) error {
- if attemptsRemaining > 0 {
- Logger.Printf("client/metadata retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
- time.Sleep(client.conf.Metadata.Retry.Backoff)
- return client.tryRefreshMetadata(topics, attemptsRemaining-1)
- }
- return err
- }
-
- for broker := client.any(); broker != nil; broker = client.any() {
- if len(topics) > 0 {
- Logger.Printf("client/metadata fetching metadata for %v from broker %s\n", topics, broker.addr)
- } else {
- Logger.Printf("client/metadata fetching metadata for all topics from broker %s\n", broker.addr)
- }
- response, err := broker.GetMetadata(&MetadataRequest{Topics: topics})
-
- switch err.(type) {
- case nil:
- // valid response, use it
- if shouldRetry, err := client.updateMetadata(response); shouldRetry {
- Logger.Println("client/metadata found some partitions to be leaderless")
- return retry(err) // note: err can be nil
- } else {
- return err
- }
-
- case PacketEncodingError:
- // didn't even send, return the error
- return err
- default:
- // some other error, remove that broker and try again
- Logger.Println("client/metadata got error from broker while fetching metadata:", err)
- _ = broker.Close()
- client.deregisterBroker(broker)
- }
- }
-
- Logger.Println("client/metadata no available broker to send metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
-
-// if no fatal error, returns a list of topics that need retrying due to ErrLeaderNotAvailable
-func (client *client) updateMetadata(data *MetadataResponse) (retry bool, err error) {
- client.lock.Lock()
- defer client.lock.Unlock()
-
- // For all the brokers we received:
- // - if it is a new ID, save it
- // - if it is an existing ID, but the address we have is stale, discard the old one and save it
- // - otherwise ignore it, replacing our existing one would just bounce the connection
- for _, broker := range data.Brokers {
- client.registerBroker(broker)
- }
-
- for _, topic := range data.Topics {
- delete(client.metadata, topic.Name)
- delete(client.cachedPartitionsResults, topic.Name)
-
- switch topic.Err {
- case ErrNoError:
- break
- case ErrInvalidTopic, ErrTopicAuthorizationFailed: // don't retry, don't store partial results
- err = topic.Err
- continue
- case ErrUnknownTopicOrPartition: // retry, do not store partial partition results
- err = topic.Err
- retry = true
- continue
- case ErrLeaderNotAvailable: // retry, but store partial partition results
- retry = true
- break
- default: // don't retry, don't store partial results
- Logger.Printf("Unexpected topic-level metadata error: %s", topic.Err)
- err = topic.Err
- continue
- }
-
- client.metadata[topic.Name] = make(map[int32]*PartitionMetadata, len(topic.Partitions))
- for _, partition := range topic.Partitions {
- client.metadata[topic.Name][partition.ID] = partition
- if partition.Err == ErrLeaderNotAvailable {
- retry = true
- }
- }
-
- var partitionCache [maxPartitionIndex][]int32
- partitionCache[allPartitions] = client.setPartitionCache(topic.Name, allPartitions)
- partitionCache[writablePartitions] = client.setPartitionCache(topic.Name, writablePartitions)
- client.cachedPartitionsResults[topic.Name] = partitionCache
- }
-
- return
-}
-
-func (client *client) cachedCoordinator(consumerGroup string) *Broker {
- client.lock.RLock()
- defer client.lock.RUnlock()
- if coordinatorID, ok := client.coordinators[consumerGroup]; ok {
- return client.brokers[coordinatorID]
- }
- return nil
-}
-
-func (client *client) getConsumerMetadata(consumerGroup string, attemptsRemaining int) (*ConsumerMetadataResponse, error) {
- retry := func(err error) (*ConsumerMetadataResponse, error) {
- if attemptsRemaining > 0 {
- Logger.Printf("client/coordinator retrying after %dms... (%d attempts remaining)\n", client.conf.Metadata.Retry.Backoff/time.Millisecond, attemptsRemaining)
- time.Sleep(client.conf.Metadata.Retry.Backoff)
- return client.getConsumerMetadata(consumerGroup, attemptsRemaining-1)
- }
- return nil, err
- }
-
- for broker := client.any(); broker != nil; broker = client.any() {
- Logger.Printf("client/coordinator requesting coordinator for consumergroup %s from %s\n", consumerGroup, broker.Addr())
-
- request := new(ConsumerMetadataRequest)
- request.ConsumerGroup = consumerGroup
-
- response, err := broker.GetConsumerMetadata(request)
-
- if err != nil {
- Logger.Printf("client/coordinator request to broker %s failed: %s\n", broker.Addr(), err)
-
- switch err.(type) {
- case PacketEncodingError:
- return nil, err
- default:
- _ = broker.Close()
- client.deregisterBroker(broker)
- continue
- }
- }
-
- switch response.Err {
- case ErrNoError:
- Logger.Printf("client/coordinator coordinator for consumergroup %s is #%d (%s)\n", consumerGroup, response.Coordinator.ID(), response.Coordinator.Addr())
- return response, nil
-
- case ErrConsumerCoordinatorNotAvailable:
- Logger.Printf("client/coordinator coordinator for consumer group %s is not available\n", consumerGroup)
-
- // This is very ugly, but this scenario will only happen once per cluster.
- // The __consumer_offsets topic only has to be created one time.
- // The number of partitions not configurable, but partition 0 should always exist.
- if _, err := client.Leader("__consumer_offsets", 0); err != nil {
- Logger.Printf("client/coordinator the __consumer_offsets topic is not initialized completely yet. Waiting 2 seconds...\n")
- time.Sleep(2 * time.Second)
- }
-
- return retry(ErrConsumerCoordinatorNotAvailable)
- default:
- return nil, response.Err
- }
- }
-
- Logger.Println("client/coordinator no available broker to send consumer metadata request to")
- client.resurrectDeadBrokers()
- return retry(ErrOutOfBrokers)
-}
diff --git a/vendor/github.com/Shopify/sarama/config.go b/vendor/github.com/Shopify/sarama/config.go
deleted file mode 100644
index a417a38b2e6..00000000000
--- a/vendor/github.com/Shopify/sarama/config.go
+++ /dev/null
@@ -1,417 +0,0 @@
-package sarama
-
-import (
- "crypto/tls"
- "regexp"
- "time"
-
- "github.com/rcrowley/go-metrics"
-)
-
-const defaultClientID = "sarama"
-
-var validID = regexp.MustCompile(`\A[A-Za-z0-9._-]+\z`)
-
-// Config is used to pass multiple configuration options to Sarama's constructors.
-type Config struct {
- // Net is the namespace for network-level properties used by the Broker, and
- // shared by the Client/Producer/Consumer.
- Net struct {
- // How many outstanding requests a connection is allowed to have before
- // sending on it blocks (default 5).
- MaxOpenRequests int
-
- // All three of the below configurations are similar to the
- // `socket.timeout.ms` setting in JVM kafka. All of them default
- // to 30 seconds.
- DialTimeout time.Duration // How long to wait for the initial connection.
- ReadTimeout time.Duration // How long to wait for a response.
- WriteTimeout time.Duration // How long to wait for a transmit.
-
- TLS struct {
- // Whether or not to use TLS when connecting to the broker
- // (defaults to false).
- Enable bool
- // The TLS configuration to use for secure connections if
- // enabled (defaults to nil).
- Config *tls.Config
- }
-
- // SASL based authentication with broker. While there are multiple SASL authentication methods
- // the current implementation is limited to plaintext (SASL/PLAIN) authentication
- SASL struct {
- // Whether or not to use SASL authentication when connecting to the broker
- // (defaults to false).
- Enable bool
- // Whether or not to send the Kafka SASL handshake first if enabled
- // (defaults to true). You should only set this to false if you're using
- // a non-Kafka SASL proxy.
- Handshake bool
- //username and password for SASL/PLAIN authentication
- User string
- Password string
- }
-
- // KeepAlive specifies the keep-alive period for an active network connection.
- // If zero, keep-alives are disabled. (default is 0: disabled).
- KeepAlive time.Duration
- }
-
- // Metadata is the namespace for metadata management properties used by the
- // Client, and shared by the Producer/Consumer.
- Metadata struct {
- Retry struct {
- // The total number of times to retry a metadata request when the
- // cluster is in the middle of a leader election (default 3).
- Max int
- // How long to wait for leader election to occur before retrying
- // (default 250ms). Similar to the JVM's `retry.backoff.ms`.
- Backoff time.Duration
- }
- // How frequently to refresh the cluster metadata in the background.
- // Defaults to 10 minutes. Set to 0 to disable. Similar to
- // `topic.metadata.refresh.interval.ms` in the JVM version.
- RefreshFrequency time.Duration
- }
-
- // Producer is the namespace for configuration related to producing messages,
- // used by the Producer.
- Producer struct {
- // The maximum permitted size of a message (defaults to 1000000). Should be
- // set equal to or smaller than the broker's `message.max.bytes`.
- MaxMessageBytes int
- // The level of acknowledgement reliability needed from the broker (defaults
- // to WaitForLocal). Equivalent to the `request.required.acks` setting of the
- // JVM producer.
- RequiredAcks RequiredAcks
- // The maximum duration the broker will wait the receipt of the number of
- // RequiredAcks (defaults to 10 seconds). This is only relevant when
- // RequiredAcks is set to WaitForAll or a number > 1. Only supports
- // millisecond resolution, nanoseconds will be truncated. Equivalent to
- // the JVM producer's `request.timeout.ms` setting.
- Timeout time.Duration
- // The type of compression to use on messages (defaults to no compression).
- // Similar to `compression.codec` setting of the JVM producer.
- Compression CompressionCodec
- // Generates partitioners for choosing the partition to send messages to
- // (defaults to hashing the message key). Similar to the `partitioner.class`
- // setting for the JVM producer.
- Partitioner PartitionerConstructor
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from the respective channels to prevent deadlock.
- Return struct {
- // If enabled, successfully delivered messages will be returned on the
- // Successes channel (default disabled).
- Successes bool
-
- // If enabled, messages that failed to deliver will be returned on the
- // Errors channel, including error (default enabled).
- Errors bool
- }
-
- // The following config options control how often messages are batched up and
- // sent to the broker. By default, messages are sent as fast as possible, and
- // all messages received while the current batch is in-flight are placed
- // into the subsequent batch.
- Flush struct {
- // The best-effort number of bytes needed to trigger a flush. Use the
- // global sarama.MaxRequestSize to set a hard upper limit.
- Bytes int
- // The best-effort number of messages needed to trigger a flush. Use
- // `MaxMessages` to set a hard upper limit.
- Messages int
- // The best-effort frequency of flushes. Equivalent to
- // `queue.buffering.max.ms` setting of JVM producer.
- Frequency time.Duration
- // The maximum number of messages the producer will send in a single
- // broker request. Defaults to 0 for unlimited. Similar to
- // `queue.buffering.max.messages` in the JVM producer.
- MaxMessages int
- }
-
- Retry struct {
- // The total number of times to retry sending a message (default 3).
- // Similar to the `message.send.max.retries` setting of the JVM producer.
- Max int
- // How long to wait for the cluster to settle between retries
- // (default 100ms). Similar to the `retry.backoff.ms` setting of the
- // JVM producer.
- Backoff time.Duration
- }
- }
-
- // Consumer is the namespace for configuration related to consuming messages,
- // used by the Consumer.
- //
- // Note that Sarama's Consumer type does not currently support automatic
- // consumer-group rebalancing and offset tracking. For Zookeeper-based
- // tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka
- // library builds on Sarama to add this support. For Kafka-based tracking
- // (Kafka 0.9 and later), the https://github.com/bsm/sarama-cluster library
- // builds on Sarama to add this support.
- Consumer struct {
- Retry struct {
- // How long to wait after a failing to read from a partition before
- // trying again (default 2s).
- Backoff time.Duration
- }
-
- // Fetch is the namespace for controlling how many bytes are retrieved by any
- // given request.
- Fetch struct {
- // The minimum number of message bytes to fetch in a request - the broker
- // will wait until at least this many are available. The default is 1,
- // as 0 causes the consumer to spin when no messages are available.
- // Equivalent to the JVM's `fetch.min.bytes`.
- Min int32
- // The default number of message bytes to fetch from the broker in each
- // request (default 32768). This should be larger than the majority of
- // your messages, or else the consumer will spend a lot of time
- // negotiating sizes and not actually consuming. Similar to the JVM's
- // `fetch.message.max.bytes`.
- Default int32
- // The maximum number of message bytes to fetch from the broker in a
- // single request. Messages larger than this will return
- // ErrMessageTooLarge and will not be consumable, so you must be sure
- // this is at least as large as your largest message. Defaults to 0
- // (no limit). Similar to the JVM's `fetch.message.max.bytes`. The
- // global `sarama.MaxResponseSize` still applies.
- Max int32
- }
- // The maximum amount of time the broker will wait for Consumer.Fetch.Min
- // bytes to become available before it returns fewer than that anyways. The
- // default is 250ms, since 0 causes the consumer to spin when no events are
- // available. 100-500ms is a reasonable range for most cases. Kafka only
- // supports precision up to milliseconds; nanoseconds will be truncated.
- // Equivalent to the JVM's `fetch.wait.max.ms`.
- MaxWaitTime time.Duration
-
- // The maximum amount of time the consumer expects a message takes to process
- // for the user. If writing to the Messages channel takes longer than this,
- // that partition will stop fetching more messages until it can proceed again.
- // Note that, since the Messages channel is buffered, the actual grace time is
- // (MaxProcessingTime * ChanneBufferSize). Defaults to 100ms.
- MaxProcessingTime time.Duration
-
- // Return specifies what channels will be populated. If they are set to true,
- // you must read from them to prevent deadlock.
- Return struct {
- // If enabled, any errors that occurred while consuming are returned on
- // the Errors channel (default disabled).
- Errors bool
- }
-
- // Offsets specifies configuration for how and when to commit consumed
- // offsets. This currently requires the manual use of an OffsetManager
- // but will eventually be automated.
- Offsets struct {
- // How frequently to commit updated offsets. Defaults to 1s.
- CommitInterval time.Duration
-
- // The initial offset to use if no offset was previously committed.
- // Should be OffsetNewest or OffsetOldest. Defaults to OffsetNewest.
- Initial int64
-
- // The retention duration for committed offsets. If zero, disabled
- // (in which case the `offsets.retention.minutes` option on the
- // broker will be used). Kafka only supports precision up to
- // milliseconds; nanoseconds will be truncated. Requires Kafka
- // broker version 0.9.0 or later.
- // (default is 0: disabled).
- Retention time.Duration
- }
- }
-
- // A user-provided string sent with every request to the brokers for logging,
- // debugging, and auditing purposes. Defaults to "sarama", but you should
- // probably set it to something specific to your application.
- ClientID string
- // The number of events to buffer in internal and external channels. This
- // permits the producer and consumer to continue processing some messages
- // in the background while user code is working, greatly improving throughput.
- // Defaults to 256.
- ChannelBufferSize int
- // The version of Kafka that Sarama will assume it is running against.
- // Defaults to the oldest supported stable version. Since Kafka provides
- // backwards-compatibility, setting it to a version older than you have
- // will not break anything, although it may prevent you from using the
- // latest features. Setting it to a version greater than you are actually
- // running may lead to random breakage.
- Version KafkaVersion
- // The registry to define metrics into.
- // Defaults to a local registry.
- // If you want to disable metrics gathering, set "metrics.UseNilMetrics" to "true"
- // prior to starting Sarama.
- // See Examples on how to use the metrics registry
- MetricRegistry metrics.Registry
-}
-
-// NewConfig returns a new configuration instance with sane defaults.
-func NewConfig() *Config {
- c := &Config{}
-
- c.Net.MaxOpenRequests = 5
- c.Net.DialTimeout = 30 * time.Second
- c.Net.ReadTimeout = 30 * time.Second
- c.Net.WriteTimeout = 30 * time.Second
- c.Net.SASL.Handshake = true
-
- c.Metadata.Retry.Max = 3
- c.Metadata.Retry.Backoff = 250 * time.Millisecond
- c.Metadata.RefreshFrequency = 10 * time.Minute
-
- c.Producer.MaxMessageBytes = 1000000
- c.Producer.RequiredAcks = WaitForLocal
- c.Producer.Timeout = 10 * time.Second
- c.Producer.Partitioner = NewHashPartitioner
- c.Producer.Retry.Max = 3
- c.Producer.Retry.Backoff = 100 * time.Millisecond
- c.Producer.Return.Errors = true
-
- c.Consumer.Fetch.Min = 1
- c.Consumer.Fetch.Default = 32768
- c.Consumer.Retry.Backoff = 2 * time.Second
- c.Consumer.MaxWaitTime = 250 * time.Millisecond
- c.Consumer.MaxProcessingTime = 100 * time.Millisecond
- c.Consumer.Return.Errors = false
- c.Consumer.Offsets.CommitInterval = 1 * time.Second
- c.Consumer.Offsets.Initial = OffsetNewest
-
- c.ClientID = defaultClientID
- c.ChannelBufferSize = 256
- c.Version = minVersion
- c.MetricRegistry = metrics.NewRegistry()
-
- return c
-}
-
-// Validate checks a Config instance. It will return a
-// ConfigurationError if the specified values don't make sense.
-func (c *Config) Validate() error {
- // some configuration values should be warned on but not fail completely, do those first
- if c.Net.TLS.Enable == false && c.Net.TLS.Config != nil {
- Logger.Println("Net.TLS is disabled but a non-nil configuration was provided.")
- }
- if c.Net.SASL.Enable == false {
- if c.Net.SASL.User != "" {
- Logger.Println("Net.SASL is disabled but a non-empty username was provided.")
- }
- if c.Net.SASL.Password != "" {
- Logger.Println("Net.SASL is disabled but a non-empty password was provided.")
- }
- }
- if c.Producer.RequiredAcks > 1 {
- Logger.Println("Producer.RequiredAcks > 1 is deprecated and will raise an exception with kafka >= 0.8.2.0.")
- }
- if c.Producer.MaxMessageBytes >= int(MaxRequestSize) {
- Logger.Println("Producer.MaxMessageBytes is larger than MaxRequestSize; it will be ignored.")
- }
- if c.Producer.Flush.Bytes >= int(MaxRequestSize) {
- Logger.Println("Producer.Flush.Bytes is larger than MaxRequestSize; it will be ignored.")
- }
- if c.Producer.Timeout%time.Millisecond != 0 {
- Logger.Println("Producer.Timeout only supports millisecond resolution; nanoseconds will be truncated.")
- }
- if c.Consumer.MaxWaitTime < 100*time.Millisecond {
- Logger.Println("Consumer.MaxWaitTime is very low, which can cause high CPU and network usage. See documentation for details.")
- }
- if c.Consumer.MaxWaitTime%time.Millisecond != 0 {
- Logger.Println("Consumer.MaxWaitTime only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.Consumer.Offsets.Retention%time.Millisecond != 0 {
- Logger.Println("Consumer.Offsets.Retention only supports millisecond precision; nanoseconds will be truncated.")
- }
- if c.ClientID == defaultClientID {
- Logger.Println("ClientID is the default of 'sarama', you should consider setting it to something application-specific.")
- }
-
- // validate Net values
- switch {
- case c.Net.MaxOpenRequests <= 0:
- return ConfigurationError("Net.MaxOpenRequests must be > 0")
- case c.Net.DialTimeout <= 0:
- return ConfigurationError("Net.DialTimeout must be > 0")
- case c.Net.ReadTimeout <= 0:
- return ConfigurationError("Net.ReadTimeout must be > 0")
- case c.Net.WriteTimeout <= 0:
- return ConfigurationError("Net.WriteTimeout must be > 0")
- case c.Net.KeepAlive < 0:
- return ConfigurationError("Net.KeepAlive must be >= 0")
- case c.Net.SASL.Enable == true && c.Net.SASL.User == "":
- return ConfigurationError("Net.SASL.User must not be empty when SASL is enabled")
- case c.Net.SASL.Enable == true && c.Net.SASL.Password == "":
- return ConfigurationError("Net.SASL.Password must not be empty when SASL is enabled")
- }
-
- // validate the Metadata values
- switch {
- case c.Metadata.Retry.Max < 0:
- return ConfigurationError("Metadata.Retry.Max must be >= 0")
- case c.Metadata.Retry.Backoff < 0:
- return ConfigurationError("Metadata.Retry.Backoff must be >= 0")
- case c.Metadata.RefreshFrequency < 0:
- return ConfigurationError("Metadata.RefreshFrequency must be >= 0")
- }
-
- // validate the Producer values
- switch {
- case c.Producer.MaxMessageBytes <= 0:
- return ConfigurationError("Producer.MaxMessageBytes must be > 0")
- case c.Producer.RequiredAcks < -1:
- return ConfigurationError("Producer.RequiredAcks must be >= -1")
- case c.Producer.Timeout <= 0:
- return ConfigurationError("Producer.Timeout must be > 0")
- case c.Producer.Partitioner == nil:
- return ConfigurationError("Producer.Partitioner must not be nil")
- case c.Producer.Flush.Bytes < 0:
- return ConfigurationError("Producer.Flush.Bytes must be >= 0")
- case c.Producer.Flush.Messages < 0:
- return ConfigurationError("Producer.Flush.Messages must be >= 0")
- case c.Producer.Flush.Frequency < 0:
- return ConfigurationError("Producer.Flush.Frequency must be >= 0")
- case c.Producer.Flush.MaxMessages < 0:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= 0")
- case c.Producer.Flush.MaxMessages > 0 && c.Producer.Flush.MaxMessages < c.Producer.Flush.Messages:
- return ConfigurationError("Producer.Flush.MaxMessages must be >= Producer.Flush.Messages when set")
- case c.Producer.Retry.Max < 0:
- return ConfigurationError("Producer.Retry.Max must be >= 0")
- case c.Producer.Retry.Backoff < 0:
- return ConfigurationError("Producer.Retry.Backoff must be >= 0")
- }
-
- if c.Producer.Compression == CompressionLZ4 && !c.Version.IsAtLeast(V0_10_0_0) {
- return ConfigurationError("lz4 compression requires Version >= V0_10_0_0")
- }
-
- // validate the Consumer values
- switch {
- case c.Consumer.Fetch.Min <= 0:
- return ConfigurationError("Consumer.Fetch.Min must be > 0")
- case c.Consumer.Fetch.Default <= 0:
- return ConfigurationError("Consumer.Fetch.Default must be > 0")
- case c.Consumer.Fetch.Max < 0:
- return ConfigurationError("Consumer.Fetch.Max must be >= 0")
- case c.Consumer.MaxWaitTime < 1*time.Millisecond:
- return ConfigurationError("Consumer.MaxWaitTime must be >= 1ms")
- case c.Consumer.MaxProcessingTime <= 0:
- return ConfigurationError("Consumer.MaxProcessingTime must be > 0")
- case c.Consumer.Retry.Backoff < 0:
- return ConfigurationError("Consumer.Retry.Backoff must be >= 0")
- case c.Consumer.Offsets.CommitInterval <= 0:
- return ConfigurationError("Consumer.Offsets.CommitInterval must be > 0")
- case c.Consumer.Offsets.Initial != OffsetOldest && c.Consumer.Offsets.Initial != OffsetNewest:
- return ConfigurationError("Consumer.Offsets.Initial must be OffsetOldest or OffsetNewest")
-
- }
-
- // validate misc shared values
- switch {
- case c.ChannelBufferSize < 0:
- return ConfigurationError("ChannelBufferSize must be >= 0")
- case !validID.MatchString(c.ClientID):
- return ConfigurationError("ClientID is invalid")
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer.go b/vendor/github.com/Shopify/sarama/consumer.go
deleted file mode 100644
index ddac564ff5a..00000000000
--- a/vendor/github.com/Shopify/sarama/consumer.go
+++ /dev/null
@@ -1,735 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// ConsumerMessage encapsulates a Kafka message returned by the consumer.
-type ConsumerMessage struct {
- Key, Value []byte
- Topic string
- Partition int32
- Offset int64
- Timestamp time.Time // only set if kafka is version 0.10+
-}
-
-// ConsumerError is what is provided to the user when an error occurs.
-// It wraps an error and includes the topic and partition.
-type ConsumerError struct {
- Topic string
- Partition int32
- Err error
-}
-
-func (ce ConsumerError) Error() string {
- return fmt.Sprintf("kafka: error while consuming %s/%d: %s", ce.Topic, ce.Partition, ce.Err)
-}
-
-// ConsumerErrors is a type that wraps a batch of errors and implements the Error interface.
-// It can be returned from the PartitionConsumer's Close methods to avoid the need to manually drain errors
-// when stopping.
-type ConsumerErrors []*ConsumerError
-
-func (ce ConsumerErrors) Error() string {
- return fmt.Sprintf("kafka: %d errors while consuming", len(ce))
-}
-
-// Consumer manages PartitionConsumers which process Kafka messages from brokers. You MUST call Close()
-// on a consumer to avoid leaks, it will not be garbage-collected automatically when it passes out of
-// scope.
-//
-// Sarama's Consumer type does not currently support automatic consumer-group rebalancing and offset tracking.
-// For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the https://github.com/wvanbergen/kafka library
-// builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9 and later), the
-// https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
-type Consumer interface {
-
- // Topics returns the set of available topics as retrieved from the cluster
- // metadata. This method is the same as Client.Topics(), and is provided for
- // convenience.
- Topics() ([]string, error)
-
- // Partitions returns the sorted list of all partition IDs for the given topic.
- // This method is the same as Client.Partitions(), and is provided for convenience.
- Partitions(topic string) ([]int32, error)
-
- // ConsumePartition creates a PartitionConsumer on the given topic/partition with
- // the given offset. It will return an error if this Consumer is already consuming
- // on the given topic/partition. Offset can be a literal offset, or OffsetNewest
- // or OffsetOldest
- ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error)
-
- // HighWaterMarks returns the current high water marks for each topic and partition.
- // Consistency between partitions is not guaranteed since high water marks are updated separately.
- HighWaterMarks() map[string]map[int32]int64
-
- // Close shuts down the consumer. It must be called after all child
- // PartitionConsumers have already been closed.
- Close() error
-}
-
-type consumer struct {
- client Client
- conf *Config
- ownClient bool
-
- lock sync.Mutex
- children map[string]map[int32]*partitionConsumer
- brokerConsumers map[*Broker]*brokerConsumer
-}
-
-// NewConsumer creates a new consumer using the given broker addresses and configuration.
-func NewConsumer(addrs []string, config *Config) (Consumer, error) {
- client, err := NewClient(addrs, config)
- if err != nil {
- return nil, err
- }
-
- c, err := NewConsumerFromClient(client)
- if err != nil {
- return nil, err
- }
- c.(*consumer).ownClient = true
- return c, nil
-}
-
-// NewConsumerFromClient creates a new consumer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this consumer.
-func NewConsumerFromClient(client Client) (Consumer, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- c := &consumer{
- client: client,
- conf: client.Config(),
- children: make(map[string]map[int32]*partitionConsumer),
- brokerConsumers: make(map[*Broker]*brokerConsumer),
- }
-
- return c, nil
-}
-
-func (c *consumer) Close() error {
- if c.ownClient {
- return c.client.Close()
- }
- return nil
-}
-
-func (c *consumer) Topics() ([]string, error) {
- return c.client.Topics()
-}
-
-func (c *consumer) Partitions(topic string) ([]int32, error) {
- return c.client.Partitions(topic)
-}
-
-func (c *consumer) ConsumePartition(topic string, partition int32, offset int64) (PartitionConsumer, error) {
- child := &partitionConsumer{
- consumer: c,
- conf: c.conf,
- topic: topic,
- partition: partition,
- messages: make(chan *ConsumerMessage, c.conf.ChannelBufferSize),
- errors: make(chan *ConsumerError, c.conf.ChannelBufferSize),
- feeder: make(chan *FetchResponse, 1),
- trigger: make(chan none, 1),
- dying: make(chan none),
- fetchSize: c.conf.Consumer.Fetch.Default,
- }
-
- if err := child.chooseStartingOffset(offset); err != nil {
- return nil, err
- }
-
- var leader *Broker
- var err error
- if leader, err = c.client.Leader(child.topic, child.partition); err != nil {
- return nil, err
- }
-
- if err := c.addChild(child); err != nil {
- return nil, err
- }
-
- go withRecover(child.dispatcher)
- go withRecover(child.responseFeeder)
-
- child.broker = c.refBrokerConsumer(leader)
- child.broker.input <- child
-
- return child, nil
-}
-
-func (c *consumer) HighWaterMarks() map[string]map[int32]int64 {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- hwms := make(map[string]map[int32]int64)
- for topic, p := range c.children {
- hwm := make(map[int32]int64, len(p))
- for partition, pc := range p {
- hwm[partition] = pc.HighWaterMarkOffset()
- }
- hwms[topic] = hwm
- }
-
- return hwms
-}
-
-func (c *consumer) addChild(child *partitionConsumer) error {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- topicChildren := c.children[child.topic]
- if topicChildren == nil {
- topicChildren = make(map[int32]*partitionConsumer)
- c.children[child.topic] = topicChildren
- }
-
- if topicChildren[child.partition] != nil {
- return ConfigurationError("That topic/partition is already being consumed")
- }
-
- topicChildren[child.partition] = child
- return nil
-}
-
-func (c *consumer) removeChild(child *partitionConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.children[child.topic], child.partition)
-}
-
-func (c *consumer) refBrokerConsumer(broker *Broker) *brokerConsumer {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- bc := c.brokerConsumers[broker]
- if bc == nil {
- bc = c.newBrokerConsumer(broker)
- c.brokerConsumers[broker] = bc
- }
-
- bc.refs++
-
- return bc
-}
-
-func (c *consumer) unrefBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- brokerWorker.refs--
-
- if brokerWorker.refs == 0 {
- close(brokerWorker.input)
- if c.brokerConsumers[brokerWorker.broker] == brokerWorker {
- delete(c.brokerConsumers, brokerWorker.broker)
- }
- }
-}
-
-func (c *consumer) abandonBrokerConsumer(brokerWorker *brokerConsumer) {
- c.lock.Lock()
- defer c.lock.Unlock()
-
- delete(c.brokerConsumers, brokerWorker.broker)
-}
-
-// PartitionConsumer
-
-// PartitionConsumer processes Kafka messages from a given topic and partition. You MUST call Close()
-// or AsyncClose() on a PartitionConsumer to avoid leaks, it will not be garbage-collected automatically
-// when it passes out of scope.
-//
-// The simplest way of using a PartitionConsumer is to loop over its Messages channel using a for/range
-// loop. The PartitionConsumer will only stop itself in one case: when the offset being consumed is reported
-// as out of range by the brokers. In this case you should decide what you want to do (try a different offset,
-// notify a human, etc) and handle it appropriately. For all other error cases, it will just keep retrying.
-// By default, it logs these errors to sarama.Logger; if you want to be notified directly of all errors, set
-// your config's Consumer.Return.Errors to true and read from the Errors channel, using a select statement
-// or a separate goroutine. Check out the Consumer examples to see implementations of these different approaches.
-type PartitionConsumer interface {
-
- // AsyncClose initiates a shutdown of the PartitionConsumer. This method will
- // return immediately, after which you should wait until the 'messages' and
- // 'errors' channel are drained. It is required to call this function, or
- // Close before a consumer object passes out of scope, as it will otherwise
- // leak memory. You must call this before calling Close on the underlying client.
- AsyncClose()
-
- // Close stops the PartitionConsumer from fetching messages. It is required to
- // call this function (or AsyncClose) before a consumer object passes out of
- // scope, as it will otherwise leak memory. You must call this before calling
- // Close on the underlying client.
- Close() error
-
- // Messages returns the read channel for the messages that are returned by
- // the broker.
- Messages() <-chan *ConsumerMessage
-
- // Errors returns a read channel of errors that occurred during consuming, if
- // enabled. By default, errors are logged and not returned over this channel.
- // If you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan *ConsumerError
-
- // HighWaterMarkOffset returns the high water mark offset of the partition,
- // i.e. the offset that will be used for the next message that will be produced.
- // You can use this to determine how far behind the processing is.
- HighWaterMarkOffset() int64
-}
-
-type partitionConsumer struct {
- consumer *consumer
- conf *Config
- topic string
- partition int32
-
- broker *brokerConsumer
- messages chan *ConsumerMessage
- errors chan *ConsumerError
- feeder chan *FetchResponse
-
- trigger, dying chan none
- responseResult error
-
- fetchSize int32
- offset int64
- highWaterMarkOffset int64
-}
-
-var errTimedOut = errors.New("timed out feeding messages to the user") // not user-facing
-
-func (child *partitionConsumer) sendError(err error) {
- cErr := &ConsumerError{
- Topic: child.topic,
- Partition: child.partition,
- Err: err,
- }
-
- if child.conf.Consumer.Return.Errors {
- child.errors <- cErr
- } else {
- Logger.Println(cErr)
- }
-}
-
-func (child *partitionConsumer) dispatcher() {
- for _ = range child.trigger {
- select {
- case <-child.dying:
- close(child.trigger)
- case <-time.After(child.conf.Consumer.Retry.Backoff):
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- child.broker = nil
- }
-
- Logger.Printf("consumer/%s/%d finding new broker\n", child.topic, child.partition)
- if err := child.dispatch(); err != nil {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
- }
-
- if child.broker != nil {
- child.consumer.unrefBrokerConsumer(child.broker)
- }
- child.consumer.removeChild(child)
- close(child.feeder)
-}
-
-func (child *partitionConsumer) dispatch() error {
- if err := child.consumer.client.RefreshMetadata(child.topic); err != nil {
- return err
- }
-
- var leader *Broker
- var err error
- if leader, err = child.consumer.client.Leader(child.topic, child.partition); err != nil {
- return err
- }
-
- child.broker = child.consumer.refBrokerConsumer(leader)
-
- child.broker.input <- child
-
- return nil
-}
-
-func (child *partitionConsumer) chooseStartingOffset(offset int64) error {
- newestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetNewest)
- if err != nil {
- return err
- }
- oldestOffset, err := child.consumer.client.GetOffset(child.topic, child.partition, OffsetOldest)
- if err != nil {
- return err
- }
-
- switch {
- case offset == OffsetNewest:
- child.offset = newestOffset
- case offset == OffsetOldest:
- child.offset = oldestOffset
- case offset >= oldestOffset && offset <= newestOffset:
- child.offset = offset
- default:
- return ErrOffsetOutOfRange
- }
-
- return nil
-}
-
-func (child *partitionConsumer) Messages() <-chan *ConsumerMessage {
- return child.messages
-}
-
-func (child *partitionConsumer) Errors() <-chan *ConsumerError {
- return child.errors
-}
-
-func (child *partitionConsumer) AsyncClose() {
- // this triggers whatever broker owns this child to abandon it and close its trigger channel, which causes
- // the dispatcher to exit its loop, which removes it from the consumer then closes its 'messages' and
- // 'errors' channel (alternatively, if the child is already at the dispatcher for some reason, that will
- // also just close itself)
- close(child.dying)
-}
-
-func (child *partitionConsumer) Close() error {
- child.AsyncClose()
-
- go withRecover(func() {
- for _ = range child.messages {
- // drain
- }
- })
-
- var errors ConsumerErrors
- for err := range child.errors {
- errors = append(errors, err)
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (child *partitionConsumer) HighWaterMarkOffset() int64 {
- return atomic.LoadInt64(&child.highWaterMarkOffset)
-}
-
-func (child *partitionConsumer) responseFeeder() {
- var msgs []*ConsumerMessage
- expiryTimer := time.NewTimer(child.conf.Consumer.MaxProcessingTime)
- expireTimedOut := false
-
-feederLoop:
- for response := range child.feeder {
- msgs, child.responseResult = child.parseResponse(response)
-
- for i, msg := range msgs {
- if !expiryTimer.Stop() && !expireTimedOut {
- // expiryTimer was expired; clear out the waiting msg
- <-expiryTimer.C
- }
- expiryTimer.Reset(child.conf.Consumer.MaxProcessingTime)
- expireTimedOut = false
-
- select {
- case child.messages <- msg:
- case <-expiryTimer.C:
- expireTimedOut = true
- child.responseResult = errTimedOut
- child.broker.acks.Done()
- for _, msg = range msgs[i:] {
- child.messages <- msg
- }
- child.broker.input <- child
- continue feederLoop
- }
- }
-
- child.broker.acks.Done()
- }
-
- close(child.messages)
- close(child.errors)
-}
-
-func (child *partitionConsumer) parseResponse(response *FetchResponse) ([]*ConsumerMessage, error) {
- block := response.GetBlock(child.topic, child.partition)
- if block == nil {
- return nil, ErrIncompleteResponse
- }
-
- if block.Err != ErrNoError {
- return nil, block.Err
- }
-
- if len(block.MsgSet.Messages) == 0 {
- // We got no messages. If we got a trailing one then we need to ask for more data.
- // Otherwise we just poll again and wait for one to be produced...
- if block.MsgSet.PartialTrailingMessage {
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize == child.conf.Consumer.Fetch.Max {
- // we can't ask for more data, we've hit the configured limit
- child.sendError(ErrMessageTooLarge)
- child.offset++ // skip this one so we can keep processing future messages
- } else {
- child.fetchSize *= 2
- if child.conf.Consumer.Fetch.Max > 0 && child.fetchSize > child.conf.Consumer.Fetch.Max {
- child.fetchSize = child.conf.Consumer.Fetch.Max
- }
- }
- }
-
- return nil, nil
- }
-
- // we got messages, reset our fetch size in case it was increased for a previous request
- child.fetchSize = child.conf.Consumer.Fetch.Default
- atomic.StoreInt64(&child.highWaterMarkOffset, block.HighWaterMarkOffset)
-
- incomplete := false
- prelude := true
- var messages []*ConsumerMessage
- for _, msgBlock := range block.MsgSet.Messages {
-
- for _, msg := range msgBlock.Messages() {
- offset := msg.Offset
- if msg.Msg.Version >= 1 {
- baseOffset := msgBlock.Offset - msgBlock.Messages()[len(msgBlock.Messages())-1].Offset
- offset += baseOffset
- }
- if prelude && offset < child.offset {
- continue
- }
- prelude = false
-
- if offset >= child.offset {
- messages = append(messages, &ConsumerMessage{
- Topic: child.topic,
- Partition: child.partition,
- Key: msg.Msg.Key,
- Value: msg.Msg.Value,
- Offset: offset,
- Timestamp: msg.Msg.Timestamp,
- })
- child.offset = offset + 1
- } else {
- incomplete = true
- }
- }
-
- }
-
- if incomplete || len(messages) == 0 {
- return nil, ErrIncompleteResponse
- }
- return messages, nil
-}
-
-// brokerConsumer
-
-type brokerConsumer struct {
- consumer *consumer
- broker *Broker
- input chan *partitionConsumer
- newSubscriptions chan []*partitionConsumer
- wait chan none
- subscriptions map[*partitionConsumer]none
- acks sync.WaitGroup
- refs int
-}
-
-func (c *consumer) newBrokerConsumer(broker *Broker) *brokerConsumer {
- bc := &brokerConsumer{
- consumer: c,
- broker: broker,
- input: make(chan *partitionConsumer),
- newSubscriptions: make(chan []*partitionConsumer),
- wait: make(chan none),
- subscriptions: make(map[*partitionConsumer]none),
- refs: 0,
- }
-
- go withRecover(bc.subscriptionManager)
- go withRecover(bc.subscriptionConsumer)
-
- return bc
-}
-
-func (bc *brokerConsumer) subscriptionManager() {
- var buffer []*partitionConsumer
-
- // The subscriptionManager constantly accepts new subscriptions on `input` (even when the main subscriptionConsumer
- // goroutine is in the middle of a network request) and batches it up. The main worker goroutine picks
- // up a batch of new subscriptions between every network request by reading from `newSubscriptions`, so we give
- // it nil if no new subscriptions are available. We also write to `wait` only when new subscriptions is available,
- // so the main goroutine can block waiting for work if it has none.
- for {
- if len(buffer) > 0 {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- buffer:
- buffer = nil
- case bc.wait <- none{}:
- }
- } else {
- select {
- case event, ok := <-bc.input:
- if !ok {
- goto done
- }
- buffer = append(buffer, event)
- case bc.newSubscriptions <- nil:
- }
- }
- }
-
-done:
- close(bc.wait)
- if len(buffer) > 0 {
- bc.newSubscriptions <- buffer
- }
- close(bc.newSubscriptions)
-}
-
-func (bc *brokerConsumer) subscriptionConsumer() {
- <-bc.wait // wait for our first piece of work
-
- // the subscriptionConsumer ensures we will get nil right away if no new subscriptions is available
- for newSubscriptions := range bc.newSubscriptions {
- bc.updateSubscriptions(newSubscriptions)
-
- if len(bc.subscriptions) == 0 {
- // We're about to be shut down or we're about to receive more subscriptions.
- // Either way, the signal just hasn't propagated to our goroutine yet.
- <-bc.wait
- continue
- }
-
- response, err := bc.fetchNewMessages()
-
- if err != nil {
- Logger.Printf("consumer/broker/%d disconnecting due to error processing FetchRequest: %s\n", bc.broker.ID(), err)
- bc.abort(err)
- return
- }
-
- bc.acks.Add(len(bc.subscriptions))
- for child := range bc.subscriptions {
- child.feeder <- response
- }
- bc.acks.Wait()
- bc.handleResponses()
- }
-}
-
-func (bc *brokerConsumer) updateSubscriptions(newSubscriptions []*partitionConsumer) {
- for _, child := range newSubscriptions {
- bc.subscriptions[child] = none{}
- Logger.Printf("consumer/broker/%d added subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- }
-
- for child := range bc.subscriptions {
- select {
- case <-child.dying:
- Logger.Printf("consumer/broker/%d closed dead subscription to %s/%d\n", bc.broker.ID(), child.topic, child.partition)
- close(child.trigger)
- delete(bc.subscriptions, child)
- default:
- break
- }
- }
-}
-
-func (bc *brokerConsumer) handleResponses() {
- // handles the response codes left for us by our subscriptions, and abandons ones that have been closed
- for child := range bc.subscriptions {
- result := child.responseResult
- child.responseResult = nil
-
- switch result {
- case nil:
- break
- case errTimedOut:
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because consuming was taking too long\n",
- bc.broker.ID(), child.topic, child.partition)
- delete(bc.subscriptions, child)
- case ErrOffsetOutOfRange:
- // there's no point in retrying this it will just fail the same way again
- // shut it down and force the user to choose what to do
- child.sendError(result)
- Logger.Printf("consumer/%s/%d shutting down because %s\n", child.topic, child.partition, result)
- close(child.trigger)
- delete(bc.subscriptions, child)
- case ErrUnknownTopicOrPartition, ErrNotLeaderForPartition, ErrLeaderNotAvailable, ErrReplicaNotAvailable:
- // not an error, but does need redispatching
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- default:
- // dunno, tell the user and try redispatching
- child.sendError(result)
- Logger.Printf("consumer/broker/%d abandoned subscription to %s/%d because %s\n",
- bc.broker.ID(), child.topic, child.partition, result)
- child.trigger <- none{}
- delete(bc.subscriptions, child)
- }
- }
-}
-
-func (bc *brokerConsumer) abort(err error) {
- bc.consumer.abandonBrokerConsumer(bc)
- _ = bc.broker.Close() // we don't care about the error this might return, we already have one
-
- for child := range bc.subscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
-
- for newSubscriptions := range bc.newSubscriptions {
- if len(newSubscriptions) == 0 {
- <-bc.wait
- continue
- }
- for _, child := range newSubscriptions {
- child.sendError(err)
- child.trigger <- none{}
- }
- }
-}
-
-func (bc *brokerConsumer) fetchNewMessages() (*FetchResponse, error) {
- request := &FetchRequest{
- MinBytes: bc.consumer.conf.Consumer.Fetch.Min,
- MaxWaitTime: int32(bc.consumer.conf.Consumer.MaxWaitTime / time.Millisecond),
- }
- if bc.consumer.conf.Version.IsAtLeast(V0_10_0_0) {
- request.Version = 2
- }
-
- for child := range bc.subscriptions {
- request.AddBlock(child.topic, child.partition, child.offset, child.fetchSize)
- }
-
- return bc.broker.Fetch(request)
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_group_members.go b/vendor/github.com/Shopify/sarama/consumer_group_members.go
deleted file mode 100644
index 9d92d350a5d..00000000000
--- a/vendor/github.com/Shopify/sarama/consumer_group_members.go
+++ /dev/null
@@ -1,94 +0,0 @@
-package sarama
-
-type ConsumerGroupMemberMetadata struct {
- Version int16
- Topics []string
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberMetadata) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putStringArray(m.Topics); err != nil {
- return err
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberMetadata) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- if m.Topics, err = pd.getStringArray(); err != nil {
- return
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-type ConsumerGroupMemberAssignment struct {
- Version int16
- Topics map[string][]int32
- UserData []byte
-}
-
-func (m *ConsumerGroupMemberAssignment) encode(pe packetEncoder) error {
- pe.putInt16(m.Version)
-
- if err := pe.putArrayLength(len(m.Topics)); err != nil {
- return err
- }
-
- for topic, partitions := range m.Topics {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
-
- if err := pe.putBytes(m.UserData); err != nil {
- return err
- }
-
- return nil
-}
-
-func (m *ConsumerGroupMemberAssignment) decode(pd packetDecoder) (err error) {
- if m.Version, err = pd.getInt16(); err != nil {
- return
- }
-
- var topicLen int
- if topicLen, err = pd.getArrayLength(); err != nil {
- return
- }
-
- m.Topics = make(map[string][]int32, topicLen)
- for i := 0; i < topicLen; i++ {
- var topic string
- if topic, err = pd.getString(); err != nil {
- return
- }
- if m.Topics[topic], err = pd.getInt32Array(); err != nil {
- return
- }
- }
-
- if m.UserData, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go b/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
deleted file mode 100644
index 483be3354df..00000000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_request.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package sarama
-
-type ConsumerMetadataRequest struct {
- ConsumerGroup string
-}
-
-func (r *ConsumerMetadataRequest) encode(pe packetEncoder) error {
- return pe.putString(r.ConsumerGroup)
-}
-
-func (r *ConsumerMetadataRequest) decode(pd packetDecoder, version int16) (err error) {
- r.ConsumerGroup, err = pd.getString()
- return err
-}
-
-func (r *ConsumerMetadataRequest) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataRequest) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataRequest) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go b/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
deleted file mode 100644
index 6b9632bbafe..00000000000
--- a/vendor/github.com/Shopify/sarama/consumer_metadata_response.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-import (
- "net"
- "strconv"
-)
-
-type ConsumerMetadataResponse struct {
- Err KError
- Coordinator *Broker
- CoordinatorID int32 // deprecated: use Coordinator.ID()
- CoordinatorHost string // deprecated: use Coordinator.Addr()
- CoordinatorPort int32 // deprecated: use Coordinator.Addr()
-}
-
-func (r *ConsumerMetadataResponse) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Err = KError(tmp)
-
- coordinator := new(Broker)
- if err := coordinator.decode(pd); err != nil {
- return err
- }
- if coordinator.addr == ":0" {
- return nil
- }
- r.Coordinator = coordinator
-
- // this can all go away in 2.0, but we have to fill in deprecated fields to maintain
- // backwards compatibility
- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
- if err != nil {
- return err
- }
- port, err := strconv.ParseInt(portstr, 10, 32)
- if err != nil {
- return err
- }
- r.CoordinatorID = r.Coordinator.ID()
- r.CoordinatorHost = host
- r.CoordinatorPort = int32(port)
-
- return nil
-}
-
-func (r *ConsumerMetadataResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- if r.Coordinator != nil {
- host, portstr, err := net.SplitHostPort(r.Coordinator.Addr())
- if err != nil {
- return err
- }
- port, err := strconv.ParseInt(portstr, 10, 32)
- if err != nil {
- return err
- }
- pe.putInt32(r.Coordinator.ID())
- if err := pe.putString(host); err != nil {
- return err
- }
- pe.putInt32(int32(port))
- return nil
- }
- pe.putInt32(r.CoordinatorID)
- if err := pe.putString(r.CoordinatorHost); err != nil {
- return err
- }
- pe.putInt32(r.CoordinatorPort)
- return nil
-}
-
-func (r *ConsumerMetadataResponse) key() int16 {
- return 10
-}
-
-func (r *ConsumerMetadataResponse) version() int16 {
- return 0
-}
-
-func (r *ConsumerMetadataResponse) requiredVersion() KafkaVersion {
- return V0_8_2_0
-}
diff --git a/vendor/github.com/Shopify/sarama/crc32_field.go b/vendor/github.com/Shopify/sarama/crc32_field.go
deleted file mode 100644
index 5c286079056..00000000000
--- a/vendor/github.com/Shopify/sarama/crc32_field.go
+++ /dev/null
@@ -1,36 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
-
- "github.com/klauspost/crc32"
-)
-
-// crc32Field implements the pushEncoder and pushDecoder interfaces for calculating CRC32s.
-type crc32Field struct {
- startOffset int
-}
-
-func (c *crc32Field) saveOffset(in int) {
- c.startOffset = in
-}
-
-func (c *crc32Field) reserveLength() int {
- return 4
-}
-
-func (c *crc32Field) run(curOffset int, buf []byte) error {
- crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
- binary.BigEndian.PutUint32(buf[c.startOffset:], crc)
- return nil
-}
-
-func (c *crc32Field) check(curOffset int, buf []byte) error {
- crc := crc32.ChecksumIEEE(buf[c.startOffset+4 : curOffset])
-
- if crc != binary.BigEndian.Uint32(buf[c.startOffset:]) {
- return PacketDecodingError{"CRC didn't match"}
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_request.go b/vendor/github.com/Shopify/sarama/describe_groups_request.go
deleted file mode 100644
index 1fb35677708..00000000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_request.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package sarama
-
-type DescribeGroupsRequest struct {
- Groups []string
-}
-
-func (r *DescribeGroupsRequest) encode(pe packetEncoder) error {
- return pe.putStringArray(r.Groups)
-}
-
-func (r *DescribeGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Groups, err = pd.getStringArray()
- return
-}
-
-func (r *DescribeGroupsRequest) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *DescribeGroupsRequest) AddGroup(group string) {
- r.Groups = append(r.Groups, group)
-}
diff --git a/vendor/github.com/Shopify/sarama/describe_groups_response.go b/vendor/github.com/Shopify/sarama/describe_groups_response.go
deleted file mode 100644
index d2c2071e1e6..00000000000
--- a/vendor/github.com/Shopify/sarama/describe_groups_response.go
+++ /dev/null
@@ -1,186 +0,0 @@
-package sarama
-
-type DescribeGroupsResponse struct {
- Groups []*GroupDescription
-}
-
-func (r *DescribeGroupsResponse) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Groups)); err != nil {
- return err
- }
-
- for _, groupDescription := range r.Groups {
- if err := groupDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Groups = make([]*GroupDescription, n)
- for i := 0; i < n; i++ {
- r.Groups[i] = new(GroupDescription)
- if err := r.Groups[i].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *DescribeGroupsResponse) key() int16 {
- return 15
-}
-
-func (r *DescribeGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *DescribeGroupsResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-type GroupDescription struct {
- Err KError
- GroupId string
- State string
- ProtocolType string
- Protocol string
- Members map[string]*GroupMemberDescription
-}
-
-func (gd *GroupDescription) encode(pe packetEncoder) error {
- pe.putInt16(int16(gd.Err))
-
- if err := pe.putString(gd.GroupId); err != nil {
- return err
- }
- if err := pe.putString(gd.State); err != nil {
- return err
- }
- if err := pe.putString(gd.ProtocolType); err != nil {
- return err
- }
- if err := pe.putString(gd.Protocol); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(gd.Members)); err != nil {
- return err
- }
-
- for memberId, groupMemberDescription := range gd.Members {
- if err := pe.putString(memberId); err != nil {
- return err
- }
- if err := groupMemberDescription.encode(pe); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (gd *GroupDescription) decode(pd packetDecoder) (err error) {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- gd.Err = KError(kerr)
- }
-
- if gd.GroupId, err = pd.getString(); err != nil {
- return
- }
- if gd.State, err = pd.getString(); err != nil {
- return
- }
- if gd.ProtocolType, err = pd.getString(); err != nil {
- return
- }
- if gd.Protocol, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- gd.Members = make(map[string]*GroupMemberDescription)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
-
- gd.Members[memberId] = new(GroupMemberDescription)
- if err := gd.Members[memberId].decode(pd); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type GroupMemberDescription struct {
- ClientId string
- ClientHost string
- MemberMetadata []byte
- MemberAssignment []byte
-}
-
-func (gmd *GroupMemberDescription) encode(pe packetEncoder) error {
- if err := pe.putString(gmd.ClientId); err != nil {
- return err
- }
- if err := pe.putString(gmd.ClientHost); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberMetadata); err != nil {
- return err
- }
- if err := pe.putBytes(gmd.MemberAssignment); err != nil {
- return err
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) decode(pd packetDecoder) (err error) {
- if gmd.ClientId, err = pd.getString(); err != nil {
- return
- }
- if gmd.ClientHost, err = pd.getString(); err != nil {
- return
- }
- if gmd.MemberMetadata, err = pd.getBytes(); err != nil {
- return
- }
- if gmd.MemberAssignment, err = pd.getBytes(); err != nil {
- return
- }
-
- return nil
-}
-
-func (gmd *GroupMemberDescription) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
- assignment := new(ConsumerGroupMemberAssignment)
- err := decode(gmd.MemberAssignment, assignment)
- return assignment, err
-}
-
-func (gmd *GroupMemberDescription) GetMemberMetadata() (*ConsumerGroupMemberMetadata, error) {
- metadata := new(ConsumerGroupMemberMetadata)
- err := decode(gmd.MemberMetadata, metadata)
- return metadata, err
-}
diff --git a/vendor/github.com/Shopify/sarama/dev.yml b/vendor/github.com/Shopify/sarama/dev.yml
deleted file mode 100644
index e014316feb0..00000000000
--- a/vendor/github.com/Shopify/sarama/dev.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-name: sarama
-
-up:
- - go: 1.7.3
-
-commands:
- test:
- run: make test
- desc: 'run unit tests'
-
-packages:
- - git@github.com:Shopify/dev-shopify.git
-
diff --git a/vendor/github.com/Shopify/sarama/encoder_decoder.go b/vendor/github.com/Shopify/sarama/encoder_decoder.go
deleted file mode 100644
index 7ce3bc0f6e2..00000000000
--- a/vendor/github.com/Shopify/sarama/encoder_decoder.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-import (
- "fmt"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Encoder is the interface that wraps the basic Encode method.
-// Anything implementing Encoder can be turned into bytes using Kafka's encoding rules.
-type encoder interface {
- encode(pe packetEncoder) error
-}
-
-// Encode takes an Encoder and turns it into bytes while potentially recording metrics.
-func encode(e encoder, metricRegistry metrics.Registry) ([]byte, error) {
- if e == nil {
- return nil, nil
- }
-
- var prepEnc prepEncoder
- var realEnc realEncoder
-
- err := e.encode(&prepEnc)
- if err != nil {
- return nil, err
- }
-
- if prepEnc.length < 0 || prepEnc.length > int(MaxRequestSize) {
- return nil, PacketEncodingError{fmt.Sprintf("invalid request size (%d)", prepEnc.length)}
- }
-
- realEnc.raw = make([]byte, prepEnc.length)
- realEnc.registry = metricRegistry
- err = e.encode(&realEnc)
- if err != nil {
- return nil, err
- }
-
- return realEnc.raw, nil
-}
-
-// Decoder is the interface that wraps the basic Decode method.
-// Anything implementing Decoder can be extracted from bytes using Kafka's encoding rules.
-type decoder interface {
- decode(pd packetDecoder) error
-}
-
-type versionedDecoder interface {
- decode(pd packetDecoder, version int16) error
-}
-
-// Decode takes bytes and a Decoder and fills the fields of the decoder from the bytes,
-// interpreted using Kafka's encoding rules.
-func decode(buf []byte, in decoder) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
-
-func versionedDecode(buf []byte, in versionedDecoder, version int16) error {
- if buf == nil {
- return nil
- }
-
- helper := realDecoder{raw: buf}
- err := in.decode(&helper, version)
- if err != nil {
- return err
- }
-
- if helper.off != len(buf) {
- return PacketDecodingError{"invalid length"}
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/errors.go b/vendor/github.com/Shopify/sarama/errors.go
deleted file mode 100644
index cc3f623d0d7..00000000000
--- a/vendor/github.com/Shopify/sarama/errors.go
+++ /dev/null
@@ -1,197 +0,0 @@
-package sarama
-
-import (
- "errors"
- "fmt"
-)
-
-// ErrOutOfBrokers is the error returned when the client has run out of brokers to talk to because all of them errored
-// or otherwise failed to respond.
-var ErrOutOfBrokers = errors.New("kafka: client has run out of available brokers to talk to (Is your cluster reachable?)")
-
-// ErrClosedClient is the error returned when a method is called on a client that has been closed.
-var ErrClosedClient = errors.New("kafka: tried to use a client that was closed")
-
-// ErrIncompleteResponse is the error returned when the server returns a syntactically valid response, but it does
-// not contain the expected information.
-var ErrIncompleteResponse = errors.New("kafka: response did not contain all the expected topic/partition blocks")
-
-// ErrInvalidPartition is the error returned when a partitioner returns an invalid partition index
-// (meaning one outside of the range [0...numPartitions-1]).
-var ErrInvalidPartition = errors.New("kafka: partitioner returned an invalid partition index")
-
-// ErrAlreadyConnected is the error returned when calling Open() on a Broker that is already connected or connecting.
-var ErrAlreadyConnected = errors.New("kafka: broker connection already initiated")
-
-// ErrNotConnected is the error returned when trying to send or call Close() on a Broker that is not connected.
-var ErrNotConnected = errors.New("kafka: broker not connected")
-
-// ErrInsufficientData is returned when decoding and the packet is truncated. This can be expected
-// when requesting messages, since as an optimization the server is allowed to return a partial message at the end
-// of the message set.
-var ErrInsufficientData = errors.New("kafka: insufficient data to decode packet, more bytes expected")
-
-// ErrShuttingDown is returned when a producer receives a message during shutdown.
-var ErrShuttingDown = errors.New("kafka: message received by producer in process of shutting down")
-
-// ErrMessageTooLarge is returned when the next message to consume is larger than the configured Consumer.Fetch.Max
-var ErrMessageTooLarge = errors.New("kafka: message is larger than Consumer.Fetch.Max")
-
-// PacketEncodingError is returned from a failure while encoding a Kafka packet. This can happen, for example,
-// if you try to encode a string over 2^15 characters in length, since Kafka's encoding rules do not permit that.
-type PacketEncodingError struct {
- Info string
-}
-
-func (err PacketEncodingError) Error() string {
- return fmt.Sprintf("kafka: error encoding packet: %s", err.Info)
-}
-
-// PacketDecodingError is returned when there was an error (other than truncated data) decoding the Kafka broker's response.
-// This can be a bad CRC or length field, or any other invalid value.
-type PacketDecodingError struct {
- Info string
-}
-
-func (err PacketDecodingError) Error() string {
- return fmt.Sprintf("kafka: error decoding packet: %s", err.Info)
-}
-
-// ConfigurationError is the type of error returned from a constructor (e.g. NewClient, or NewConsumer)
-// when the specified configuration is invalid.
-type ConfigurationError string
-
-func (err ConfigurationError) Error() string {
- return "kafka: invalid configuration (" + string(err) + ")"
-}
-
-// KError is the type of error that can be returned directly by the Kafka broker.
-// See https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-ErrorCodes
-type KError int16
-
-// Numeric error codes returned by the Kafka server.
-const (
- ErrNoError KError = 0
- ErrUnknown KError = -1
- ErrOffsetOutOfRange KError = 1
- ErrInvalidMessage KError = 2
- ErrUnknownTopicOrPartition KError = 3
- ErrInvalidMessageSize KError = 4
- ErrLeaderNotAvailable KError = 5
- ErrNotLeaderForPartition KError = 6
- ErrRequestTimedOut KError = 7
- ErrBrokerNotAvailable KError = 8
- ErrReplicaNotAvailable KError = 9
- ErrMessageSizeTooLarge KError = 10
- ErrStaleControllerEpochCode KError = 11
- ErrOffsetMetadataTooLarge KError = 12
- ErrNetworkException KError = 13
- ErrOffsetsLoadInProgress KError = 14
- ErrConsumerCoordinatorNotAvailable KError = 15
- ErrNotCoordinatorForConsumer KError = 16
- ErrInvalidTopic KError = 17
- ErrMessageSetSizeTooLarge KError = 18
- ErrNotEnoughReplicas KError = 19
- ErrNotEnoughReplicasAfterAppend KError = 20
- ErrInvalidRequiredAcks KError = 21
- ErrIllegalGeneration KError = 22
- ErrInconsistentGroupProtocol KError = 23
- ErrInvalidGroupId KError = 24
- ErrUnknownMemberId KError = 25
- ErrInvalidSessionTimeout KError = 26
- ErrRebalanceInProgress KError = 27
- ErrInvalidCommitOffsetSize KError = 28
- ErrTopicAuthorizationFailed KError = 29
- ErrGroupAuthorizationFailed KError = 30
- ErrClusterAuthorizationFailed KError = 31
- ErrInvalidTimestamp KError = 32
- ErrUnsupportedSASLMechanism KError = 33
- ErrIllegalSASLState KError = 34
- ErrUnsupportedVersion KError = 35
- ErrUnsupportedForMessageFormat KError = 43
-)
-
-func (err KError) Error() string {
- // Error messages stolen/adapted from
- // https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
- switch err {
- case ErrNoError:
- return "kafka server: Not an error, why are you printing me?"
- case ErrUnknown:
- return "kafka server: Unexpected (unknown?) server error."
- case ErrOffsetOutOfRange:
- return "kafka server: The requested offset is outside the range of offsets maintained by the server for the given topic/partition."
- case ErrInvalidMessage:
- return "kafka server: Message contents does not match its CRC."
- case ErrUnknownTopicOrPartition:
- return "kafka server: Request was for a topic or partition that does not exist on this broker."
- case ErrInvalidMessageSize:
- return "kafka server: The message has a negative size."
- case ErrLeaderNotAvailable:
- return "kafka server: In the middle of a leadership election, there is currently no leader for this partition and hence it is unavailable for writes."
- case ErrNotLeaderForPartition:
- return "kafka server: Tried to send a message to a replica that is not the leader for some partition. Your metadata is out of date."
- case ErrRequestTimedOut:
- return "kafka server: Request exceeded the user-specified time limit in the request."
- case ErrBrokerNotAvailable:
- return "kafka server: Broker not available. Not a client facing error, we should never receive this!!!"
- case ErrReplicaNotAvailable:
- return "kafka server: Replica information not available, one or more brokers are down."
- case ErrMessageSizeTooLarge:
- return "kafka server: Message was too large, server rejected it to avoid allocation error."
- case ErrStaleControllerEpochCode:
- return "kafka server: StaleControllerEpochCode (internal error code for broker-to-broker communication)."
- case ErrOffsetMetadataTooLarge:
- return "kafka server: Specified a string larger than the configured maximum for offset metadata."
- case ErrNetworkException:
- return "kafka server: The server disconnected before a response was received."
- case ErrOffsetsLoadInProgress:
- return "kafka server: The broker is still loading offsets after a leader change for that offset's topic partition."
- case ErrConsumerCoordinatorNotAvailable:
- return "kafka server: Offset's topic has not yet been created."
- case ErrNotCoordinatorForConsumer:
- return "kafka server: Request was for a consumer group that is not coordinated by this broker."
- case ErrInvalidTopic:
- return "kafka server: The request attempted to perform an operation on an invalid topic."
- case ErrMessageSetSizeTooLarge:
- return "kafka server: The request included message batch larger than the configured segment size on the server."
- case ErrNotEnoughReplicas:
- return "kafka server: Messages are rejected since there are fewer in-sync replicas than required."
- case ErrNotEnoughReplicasAfterAppend:
- return "kafka server: Messages are written to the log, but to fewer in-sync replicas than required."
- case ErrInvalidRequiredAcks:
- return "kafka server: The number of required acks is invalid (should be either -1, 0, or 1)."
- case ErrIllegalGeneration:
- return "kafka server: The provided generation id is not the current generation."
- case ErrInconsistentGroupProtocol:
- return "kafka server: The provider group protocol type is incompatible with the other members."
- case ErrInvalidGroupId:
- return "kafka server: The provided group id was empty."
- case ErrUnknownMemberId:
- return "kafka server: The provided member is not known in the current generation."
- case ErrInvalidSessionTimeout:
- return "kafka server: The provided session timeout is outside the allowed range."
- case ErrRebalanceInProgress:
- return "kafka server: A rebalance for the group is in progress. Please re-join the group."
- case ErrInvalidCommitOffsetSize:
- return "kafka server: The provided commit metadata was too large."
- case ErrTopicAuthorizationFailed:
- return "kafka server: The client is not authorized to access this topic."
- case ErrGroupAuthorizationFailed:
- return "kafka server: The client is not authorized to access this group."
- case ErrClusterAuthorizationFailed:
- return "kafka server: The client is not authorized to send this request type."
- case ErrInvalidTimestamp:
- return "kafka server: The timestamp of the message is out of acceptable range."
- case ErrUnsupportedSASLMechanism:
- return "kafka server: The broker does not support the requested SASL mechanism."
- case ErrIllegalSASLState:
- return "kafka server: Request is not valid given the current SASL state."
- case ErrUnsupportedVersion:
- return "kafka server: The version of API is not supported."
- case ErrUnsupportedForMessageFormat:
- return "kafka server: The requested operation is not supported by the message format version."
- }
-
- return fmt.Sprintf("Unknown error, how did this happen? Error code = %d", err)
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_request.go b/vendor/github.com/Shopify/sarama/fetch_request.go
deleted file mode 100644
index ae701a3f29a..00000000000
--- a/vendor/github.com/Shopify/sarama/fetch_request.go
+++ /dev/null
@@ -1,136 +0,0 @@
-package sarama
-
-type fetchRequestBlock struct {
- fetchOffset int64
- maxBytes int32
-}
-
-func (b *fetchRequestBlock) encode(pe packetEncoder) error {
- pe.putInt64(b.fetchOffset)
- pe.putInt32(b.maxBytes)
- return nil
-}
-
-func (b *fetchRequestBlock) decode(pd packetDecoder) (err error) {
- if b.fetchOffset, err = pd.getInt64(); err != nil {
- return err
- }
- if b.maxBytes, err = pd.getInt32(); err != nil {
- return err
- }
- return nil
-}
-
-type FetchRequest struct {
- MaxWaitTime int32
- MinBytes int32
- Version int16
- blocks map[string]map[int32]*fetchRequestBlock
-}
-
-func (r *FetchRequest) encode(pe packetEncoder) (err error) {
- pe.putInt32(-1) // replica ID is always -1 for clients
- pe.putInt32(r.MaxWaitTime)
- pe.putInt32(r.MinBytes)
- err = pe.putArrayLength(len(r.blocks))
- if err != nil {
- return err
- }
- for topic, blocks := range r.blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(blocks))
- if err != nil {
- return err
- }
- for partition, block := range blocks {
- pe.putInt32(partition)
- err = block.encode(pe)
- if err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *FetchRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- if _, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MaxWaitTime, err = pd.getInt32(); err != nil {
- return err
- }
- if r.MinBytes, err = pd.getInt32(); err != nil {
- return err
- }
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- fetchBlock := &fetchRequestBlock{}
- if err = fetchBlock.decode(pd); err != nil {
- return nil
- }
- r.blocks[topic][partition] = fetchBlock
- }
- }
- return nil
-}
-
-func (r *FetchRequest) key() int16 {
- return 1
-}
-
-func (r *FetchRequest) version() int16 {
- return r.Version
-}
-
-func (r *FetchRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- default:
- return minVersion
- }
-}
-
-func (r *FetchRequest) AddBlock(topic string, partitionID int32, fetchOffset int64, maxBytes int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*fetchRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*fetchRequestBlock)
- }
-
- tmp := new(fetchRequestBlock)
- tmp.maxBytes = maxBytes
- tmp.fetchOffset = fetchOffset
-
- r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/fetch_response.go b/vendor/github.com/Shopify/sarama/fetch_response.go
deleted file mode 100644
index b56b166c282..00000000000
--- a/vendor/github.com/Shopify/sarama/fetch_response.go
+++ /dev/null
@@ -1,210 +0,0 @@
-package sarama
-
-import "time"
-
-type FetchResponseBlock struct {
- Err KError
- HighWaterMarkOffset int64
- MsgSet MessageSet
-}
-
-func (b *FetchResponseBlock) decode(pd packetDecoder) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- b.HighWaterMarkOffset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- msgSetSize, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- msgSetDecoder, err := pd.getSubset(int(msgSetSize))
- if err != nil {
- return err
- }
- err = (&b.MsgSet).decode(msgSetDecoder)
-
- return err
-}
-
-func (b *FetchResponseBlock) encode(pe packetEncoder) (err error) {
- pe.putInt16(int16(b.Err))
-
- pe.putInt64(b.HighWaterMarkOffset)
-
- pe.push(&lengthField{})
- err = b.MsgSet.encode(pe)
- if err != nil {
- return err
- }
- return pe.pop()
-}
-
-type FetchResponse struct {
- Blocks map[string]map[int32]*FetchResponseBlock
- ThrottleTime time.Duration
- Version int16 // v1 requires 0.9+, v2 requires 0.10+
-}
-
-func (r *FetchResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.Version >= 1 {
- throttle, err := pd.getInt32()
- if err != nil {
- return err
- }
- r.ThrottleTime = time.Duration(throttle) * time.Millisecond
- }
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*FetchResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(FetchResponseBlock)
- err = block.decode(pd)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *FetchResponse) encode(pe packetEncoder) (err error) {
- if r.Version >= 1 {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- }
-
- err = pe.putArrayLength(len(r.Blocks))
- if err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
-
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
-
- for id, block := range partitions {
- pe.putInt32(id)
- err = block.encode(pe)
- if err != nil {
- return err
- }
- }
-
- }
- return nil
-}
-
-func (r *FetchResponse) key() int16 {
- return 1
-}
-
-func (r *FetchResponse) version() int16 {
- return r.Version
-}
-
-func (r *FetchResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- default:
- return minVersion
- }
-}
-
-func (r *FetchResponse) GetBlock(topic string, partition int32) *FetchResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-func (r *FetchResponse) AddError(topic string, partition int32, err KError) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
- frb.Err = err
-}
-
-func (r *FetchResponse) AddMessage(topic string, partition int32, key, value Encoder, offset int64) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*FetchResponseBlock)
- }
- partitions, ok := r.Blocks[topic]
- if !ok {
- partitions = make(map[int32]*FetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- frb, ok := partitions[partition]
- if !ok {
- frb = new(FetchResponseBlock)
- partitions[partition] = frb
- }
- var kb []byte
- var vb []byte
- if key != nil {
- kb, _ = key.Encode()
- }
- if value != nil {
- vb, _ = value.Encode()
- }
- msg := &Message{Key: kb, Value: vb}
- msgBlock := &MessageBlock{Msg: msg, Offset: offset}
- frb.MsgSet.Messages = append(frb.MsgSet.Messages, msgBlock)
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_request.go b/vendor/github.com/Shopify/sarama/heartbeat_request.go
deleted file mode 100644
index ce49c473972..00000000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_request.go
+++ /dev/null
@@ -1,47 +0,0 @@
-package sarama
-
-type HeartbeatRequest struct {
- GroupId string
- GenerationId int32
- MemberId string
-}
-
-func (r *HeartbeatRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
-
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- return nil
-}
-
-func (r *HeartbeatRequest) key() int16 {
- return 12
-}
-
-func (r *HeartbeatRequest) version() int16 {
- return 0
-}
-
-func (r *HeartbeatRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/heartbeat_response.go b/vendor/github.com/Shopify/sarama/heartbeat_response.go
deleted file mode 100644
index 3c51163ad1f..00000000000
--- a/vendor/github.com/Shopify/sarama/heartbeat_response.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package sarama
-
-type HeartbeatResponse struct {
- Err KError
-}
-
-func (r *HeartbeatResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return nil
-}
-
-func (r *HeartbeatResponse) decode(pd packetDecoder, version int16) error {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- return nil
-}
-
-func (r *HeartbeatResponse) key() int16 {
- return 12
-}
-
-func (r *HeartbeatResponse) version() int16 {
- return 0
-}
-
-func (r *HeartbeatResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_request.go b/vendor/github.com/Shopify/sarama/join_group_request.go
deleted file mode 100644
index 656db4562d3..00000000000
--- a/vendor/github.com/Shopify/sarama/join_group_request.go
+++ /dev/null
@@ -1,108 +0,0 @@
-package sarama
-
-type JoinGroupRequest struct {
- GroupId string
- SessionTimeout int32
- MemberId string
- ProtocolType string
- GroupProtocols map[string][]byte
-}
-
-func (r *JoinGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
- pe.putInt32(r.SessionTimeout)
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
- if err := pe.putString(r.ProtocolType); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.GroupProtocols)); err != nil {
- return err
- }
- for name, metadata := range r.GroupProtocols {
- if err := pe.putString(name); err != nil {
- return err
- }
- if err := pe.putBytes(metadata); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *JoinGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
-
- if r.SessionTimeout, err = pd.getInt32(); err != nil {
- return
- }
-
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- if r.ProtocolType, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupProtocols = make(map[string][]byte)
- for i := 0; i < n; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
- metadata, err := pd.getBytes()
- if err != nil {
- return err
- }
-
- r.GroupProtocols[name] = metadata
- }
-
- return nil
-}
-
-func (r *JoinGroupRequest) key() int16 {
- return 11
-}
-
-func (r *JoinGroupRequest) version() int16 {
- return 0
-}
-
-func (r *JoinGroupRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *JoinGroupRequest) AddGroupProtocol(name string, metadata []byte) {
- if r.GroupProtocols == nil {
- r.GroupProtocols = make(map[string][]byte)
- }
-
- r.GroupProtocols[name] = metadata
-}
-
-func (r *JoinGroupRequest) AddGroupProtocolMetadata(name string, metadata *ConsumerGroupMemberMetadata) error {
- bin, err := encode(metadata, nil)
- if err != nil {
- return err
- }
-
- r.AddGroupProtocol(name, bin)
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/join_group_response.go b/vendor/github.com/Shopify/sarama/join_group_response.go
deleted file mode 100644
index 94c7a7fde07..00000000000
--- a/vendor/github.com/Shopify/sarama/join_group_response.go
+++ /dev/null
@@ -1,114 +0,0 @@
-package sarama
-
-type JoinGroupResponse struct {
- Err KError
- GenerationId int32
- GroupProtocol string
- LeaderId string
- MemberId string
- Members map[string][]byte
-}
-
-func (r *JoinGroupResponse) GetMembers() (map[string]ConsumerGroupMemberMetadata, error) {
- members := make(map[string]ConsumerGroupMemberMetadata, len(r.Members))
- for id, bin := range r.Members {
- meta := new(ConsumerGroupMemberMetadata)
- if err := decode(bin, meta); err != nil {
- return nil, err
- }
- members[id] = *meta
- }
- return members, nil
-}
-
-func (r *JoinGroupResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.GroupProtocol); err != nil {
- return err
- }
- if err := pe.putString(r.LeaderId); err != nil {
- return err
- }
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.Members)); err != nil {
- return err
- }
-
- for memberId, memberMetadata := range r.Members {
- if err := pe.putString(memberId); err != nil {
- return err
- }
-
- if err := pe.putBytes(memberMetadata); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *JoinGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
-
- if r.GroupProtocol, err = pd.getString(); err != nil {
- return
- }
-
- if r.LeaderId, err = pd.getString(); err != nil {
- return
- }
-
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.Members = make(map[string][]byte)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
-
- memberMetadata, err := pd.getBytes()
- if err != nil {
- return err
- }
-
- r.Members[memberId] = memberMetadata
- }
-
- return nil
-}
-
-func (r *JoinGroupResponse) key() int16 {
- return 11
-}
-
-func (r *JoinGroupResponse) version() int16 {
- return 0
-}
-
-func (r *JoinGroupResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_request.go b/vendor/github.com/Shopify/sarama/leave_group_request.go
deleted file mode 100644
index e177427482f..00000000000
--- a/vendor/github.com/Shopify/sarama/leave_group_request.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package sarama
-
-type LeaveGroupRequest struct {
- GroupId string
- MemberId string
-}
-
-func (r *LeaveGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *LeaveGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- return nil
-}
-
-func (r *LeaveGroupRequest) key() int16 {
- return 13
-}
-
-func (r *LeaveGroupRequest) version() int16 {
- return 0
-}
-
-func (r *LeaveGroupRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/leave_group_response.go b/vendor/github.com/Shopify/sarama/leave_group_response.go
deleted file mode 100644
index bd4a34f46ce..00000000000
--- a/vendor/github.com/Shopify/sarama/leave_group_response.go
+++ /dev/null
@@ -1,32 +0,0 @@
-package sarama
-
-type LeaveGroupResponse struct {
- Err KError
-}
-
-func (r *LeaveGroupResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return nil
-}
-
-func (r *LeaveGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- return nil
-}
-
-func (r *LeaveGroupResponse) key() int16 {
- return 13
-}
-
-func (r *LeaveGroupResponse) version() int16 {
- return 0
-}
-
-func (r *LeaveGroupResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/length_field.go b/vendor/github.com/Shopify/sarama/length_field.go
deleted file mode 100644
index 70078be5d9f..00000000000
--- a/vendor/github.com/Shopify/sarama/length_field.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package sarama
-
-import "encoding/binary"
-
-// LengthField implements the PushEncoder and PushDecoder interfaces for calculating 4-byte lengths.
-type lengthField struct {
- startOffset int
-}
-
-func (l *lengthField) saveOffset(in int) {
- l.startOffset = in
-}
-
-func (l *lengthField) reserveLength() int {
- return 4
-}
-
-func (l *lengthField) run(curOffset int, buf []byte) error {
- binary.BigEndian.PutUint32(buf[l.startOffset:], uint32(curOffset-l.startOffset-4))
- return nil
-}
-
-func (l *lengthField) check(curOffset int, buf []byte) error {
- if uint32(curOffset-l.startOffset-4) != binary.BigEndian.Uint32(buf[l.startOffset:]) {
- return PacketDecodingError{"length field invalid"}
- }
-
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_request.go b/vendor/github.com/Shopify/sarama/list_groups_request.go
deleted file mode 100644
index 3b16abf7fa8..00000000000
--- a/vendor/github.com/Shopify/sarama/list_groups_request.go
+++ /dev/null
@@ -1,24 +0,0 @@
-package sarama
-
-type ListGroupsRequest struct {
-}
-
-func (r *ListGroupsRequest) encode(pe packetEncoder) error {
- return nil
-}
-
-func (r *ListGroupsRequest) decode(pd packetDecoder, version int16) (err error) {
- return nil
-}
-
-func (r *ListGroupsRequest) key() int16 {
- return 16
-}
-
-func (r *ListGroupsRequest) version() int16 {
- return 0
-}
-
-func (r *ListGroupsRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/list_groups_response.go b/vendor/github.com/Shopify/sarama/list_groups_response.go
deleted file mode 100644
index 3a84f9b6c18..00000000000
--- a/vendor/github.com/Shopify/sarama/list_groups_response.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package sarama
-
-type ListGroupsResponse struct {
- Err KError
- Groups map[string]string
-}
-
-func (r *ListGroupsResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
-
- if err := pe.putArrayLength(len(r.Groups)); err != nil {
- return err
- }
- for groupId, protocolType := range r.Groups {
- if err := pe.putString(groupId); err != nil {
- return err
- }
- if err := pe.putString(protocolType); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *ListGroupsResponse) decode(pd packetDecoder, version int16) error {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.Groups = make(map[string]string)
- for i := 0; i < n; i++ {
- groupId, err := pd.getString()
- if err != nil {
- return err
- }
- protocolType, err := pd.getString()
- if err != nil {
- return err
- }
-
- r.Groups[groupId] = protocolType
- }
-
- return nil
-}
-
-func (r *ListGroupsResponse) key() int16 {
- return 16
-}
-
-func (r *ListGroupsResponse) version() int16 {
- return 0
-}
-
-func (r *ListGroupsResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/message.go b/vendor/github.com/Shopify/sarama/message.go
deleted file mode 100644
index 327c5fa2add..00000000000
--- a/vendor/github.com/Shopify/sarama/message.go
+++ /dev/null
@@ -1,196 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "compress/gzip"
- "fmt"
- "io/ioutil"
- "time"
-
- "github.com/eapache/go-xerial-snappy"
- "github.com/pierrec/lz4"
-)
-
-// CompressionCodec represents the various compression codecs recognized by Kafka in messages.
-type CompressionCodec int8
-
-// only the last two bits are really used
-const compressionCodecMask int8 = 0x03
-
-const (
- CompressionNone CompressionCodec = 0
- CompressionGZIP CompressionCodec = 1
- CompressionSnappy CompressionCodec = 2
- CompressionLZ4 CompressionCodec = 3
-)
-
-type Message struct {
- Codec CompressionCodec // codec used to compress the message contents
- Key []byte // the message key, may be nil
- Value []byte // the message contents
- Set *MessageSet // the message set a message might wrap
- Version int8 // v1 requires Kafka 0.10
- Timestamp time.Time // the timestamp of the message (version 1+ only)
-
- compressedCache []byte
- compressedSize int // used for computing the compression ratio metrics
-}
-
-func (m *Message) encode(pe packetEncoder) error {
- pe.push(&crc32Field{})
-
- pe.putInt8(m.Version)
-
- attributes := int8(m.Codec) & compressionCodecMask
- pe.putInt8(attributes)
-
- if m.Version >= 1 {
- pe.putInt64(m.Timestamp.UnixNano() / int64(time.Millisecond))
- }
-
- err := pe.putBytes(m.Key)
- if err != nil {
- return err
- }
-
- var payload []byte
-
- if m.compressedCache != nil {
- payload = m.compressedCache
- m.compressedCache = nil
- } else if m.Value != nil {
- switch m.Codec {
- case CompressionNone:
- payload = m.Value
- case CompressionGZIP:
- var buf bytes.Buffer
- writer := gzip.NewWriter(&buf)
- if _, err = writer.Write(m.Value); err != nil {
- return err
- }
- if err = writer.Close(); err != nil {
- return err
- }
- m.compressedCache = buf.Bytes()
- payload = m.compressedCache
- case CompressionSnappy:
- tmp := snappy.Encode(m.Value)
- m.compressedCache = tmp
- payload = m.compressedCache
- case CompressionLZ4:
- var buf bytes.Buffer
- writer := lz4.NewWriter(&buf)
- if _, err = writer.Write(m.Value); err != nil {
- return err
- }
- if err = writer.Close(); err != nil {
- return err
- }
- m.compressedCache = buf.Bytes()
- payload = m.compressedCache
-
- default:
- return PacketEncodingError{fmt.Sprintf("unsupported compression codec (%d)", m.Codec)}
- }
- // Keep in mind the compressed payload size for metric gathering
- m.compressedSize = len(payload)
- }
-
- if err = pe.putBytes(payload); err != nil {
- return err
- }
-
- return pe.pop()
-}
-
-func (m *Message) decode(pd packetDecoder) (err error) {
- err = pd.push(&crc32Field{})
- if err != nil {
- return err
- }
-
- m.Version, err = pd.getInt8()
- if err != nil {
- return err
- }
-
- attribute, err := pd.getInt8()
- if err != nil {
- return err
- }
- m.Codec = CompressionCodec(attribute & compressionCodecMask)
-
- if m.Version >= 1 {
- millis, err := pd.getInt64()
- if err != nil {
- return err
- }
- m.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
- }
-
- m.Key, err = pd.getBytes()
- if err != nil {
- return err
- }
-
- m.Value, err = pd.getBytes()
- if err != nil {
- return err
- }
-
- // Required for deep equal assertion during tests but might be useful
- // for future metrics about the compression ratio in fetch requests
- m.compressedSize = len(m.Value)
-
- switch m.Codec {
- case CompressionNone:
- // nothing to do
- case CompressionGZIP:
- if m.Value == nil {
- break
- }
- reader, err := gzip.NewReader(bytes.NewReader(m.Value))
- if err != nil {
- return err
- }
- if m.Value, err = ioutil.ReadAll(reader); err != nil {
- return err
- }
- if err := m.decodeSet(); err != nil {
- return err
- }
- case CompressionSnappy:
- if m.Value == nil {
- break
- }
- if m.Value, err = snappy.Decode(m.Value); err != nil {
- return err
- }
- if err := m.decodeSet(); err != nil {
- return err
- }
- case CompressionLZ4:
- if m.Value == nil {
- break
- }
- reader := lz4.NewReader(bytes.NewReader(m.Value))
- if m.Value, err = ioutil.ReadAll(reader); err != nil {
- return err
- }
- if err := m.decodeSet(); err != nil {
- return err
- }
-
- default:
- return PacketDecodingError{fmt.Sprintf("invalid compression specified (%d)", m.Codec)}
- }
-
- return pd.pop()
-}
-
-// decodes a message set from a previousy encoded bulk-message
-func (m *Message) decodeSet() (err error) {
- pd := realDecoder{raw: m.Value}
- m.Set = &MessageSet{}
- return m.Set.decode(&pd)
-}
diff --git a/vendor/github.com/Shopify/sarama/message_set.go b/vendor/github.com/Shopify/sarama/message_set.go
deleted file mode 100644
index f028784e51a..00000000000
--- a/vendor/github.com/Shopify/sarama/message_set.go
+++ /dev/null
@@ -1,89 +0,0 @@
-package sarama
-
-type MessageBlock struct {
- Offset int64
- Msg *Message
-}
-
-// Messages convenience helper which returns either all the
-// messages that are wrapped in this block
-func (msb *MessageBlock) Messages() []*MessageBlock {
- if msb.Msg.Set != nil {
- return msb.Msg.Set.Messages
- }
- return []*MessageBlock{msb}
-}
-
-func (msb *MessageBlock) encode(pe packetEncoder) error {
- pe.putInt64(msb.Offset)
- pe.push(&lengthField{})
- err := msb.Msg.encode(pe)
- if err != nil {
- return err
- }
- return pe.pop()
-}
-
-func (msb *MessageBlock) decode(pd packetDecoder) (err error) {
- if msb.Offset, err = pd.getInt64(); err != nil {
- return err
- }
-
- if err = pd.push(&lengthField{}); err != nil {
- return err
- }
-
- msb.Msg = new(Message)
- if err = msb.Msg.decode(pd); err != nil {
- return err
- }
-
- if err = pd.pop(); err != nil {
- return err
- }
-
- return nil
-}
-
-type MessageSet struct {
- PartialTrailingMessage bool // whether the set on the wire contained an incomplete trailing MessageBlock
- Messages []*MessageBlock
-}
-
-func (ms *MessageSet) encode(pe packetEncoder) error {
- for i := range ms.Messages {
- err := ms.Messages[i].encode(pe)
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (ms *MessageSet) decode(pd packetDecoder) (err error) {
- ms.Messages = nil
-
- for pd.remaining() > 0 {
- msb := new(MessageBlock)
- err = msb.decode(pd)
- switch err {
- case nil:
- ms.Messages = append(ms.Messages, msb)
- case ErrInsufficientData:
- // As an optimization the server is allowed to return a partial message at the
- // end of the message set. Clients should handle this case. So we just ignore such things.
- ms.PartialTrailingMessage = true
- return nil
- default:
- return err
- }
- }
-
- return nil
-}
-
-func (ms *MessageSet) addMessage(msg *Message) {
- block := new(MessageBlock)
- block.Msg = msg
- ms.Messages = append(ms.Messages, block)
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_request.go b/vendor/github.com/Shopify/sarama/metadata_request.go
deleted file mode 100644
index 9a26b55fd03..00000000000
--- a/vendor/github.com/Shopify/sarama/metadata_request.go
+++ /dev/null
@@ -1,52 +0,0 @@
-package sarama
-
-type MetadataRequest struct {
- Topics []string
-}
-
-func (r *MetadataRequest) encode(pe packetEncoder) error {
- err := pe.putArrayLength(len(r.Topics))
- if err != nil {
- return err
- }
-
- for i := range r.Topics {
- err = pe.putString(r.Topics[i])
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *MetadataRequest) decode(pd packetDecoder, version int16) error {
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
-
- r.Topics = make([]string, topicCount)
- for i := range r.Topics {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- r.Topics[i] = topic
- }
- return nil
-}
-
-func (r *MetadataRequest) key() int16 {
- return 3
-}
-
-func (r *MetadataRequest) version() int16 {
- return 0
-}
-
-func (r *MetadataRequest) requiredVersion() KafkaVersion {
- return minVersion
-}
diff --git a/vendor/github.com/Shopify/sarama/metadata_response.go b/vendor/github.com/Shopify/sarama/metadata_response.go
deleted file mode 100644
index f9d6a4271ed..00000000000
--- a/vendor/github.com/Shopify/sarama/metadata_response.go
+++ /dev/null
@@ -1,239 +0,0 @@
-package sarama
-
-type PartitionMetadata struct {
- Err KError
- ID int32
- Leader int32
- Replicas []int32
- Isr []int32
-}
-
-func (pm *PartitionMetadata) decode(pd packetDecoder) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- pm.Err = KError(tmp)
-
- pm.ID, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- pm.Leader, err = pd.getInt32()
- if err != nil {
- return err
- }
-
- pm.Replicas, err = pd.getInt32Array()
- if err != nil {
- return err
- }
-
- pm.Isr, err = pd.getInt32Array()
- if err != nil {
- return err
- }
-
- return nil
-}
-
-func (pm *PartitionMetadata) encode(pe packetEncoder) (err error) {
- pe.putInt16(int16(pm.Err))
- pe.putInt32(pm.ID)
- pe.putInt32(pm.Leader)
-
- err = pe.putInt32Array(pm.Replicas)
- if err != nil {
- return err
- }
-
- err = pe.putInt32Array(pm.Isr)
- if err != nil {
- return err
- }
-
- return nil
-}
-
-type TopicMetadata struct {
- Err KError
- Name string
- Partitions []*PartitionMetadata
-}
-
-func (tm *TopicMetadata) decode(pd packetDecoder) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- tm.Err = KError(tmp)
-
- tm.Name, err = pd.getString()
- if err != nil {
- return err
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- tm.Partitions = make([]*PartitionMetadata, n)
- for i := 0; i < n; i++ {
- tm.Partitions[i] = new(PartitionMetadata)
- err = tm.Partitions[i].decode(pd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (tm *TopicMetadata) encode(pe packetEncoder) (err error) {
- pe.putInt16(int16(tm.Err))
-
- err = pe.putString(tm.Name)
- if err != nil {
- return err
- }
-
- err = pe.putArrayLength(len(tm.Partitions))
- if err != nil {
- return err
- }
-
- for _, pm := range tm.Partitions {
- err = pm.encode(pe)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-type MetadataResponse struct {
- Brokers []*Broker
- Topics []*TopicMetadata
-}
-
-func (r *MetadataResponse) decode(pd packetDecoder, version int16) (err error) {
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Brokers = make([]*Broker, n)
- for i := 0; i < n; i++ {
- r.Brokers[i] = new(Broker)
- err = r.Brokers[i].decode(pd)
- if err != nil {
- return err
- }
- }
-
- n, err = pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Topics = make([]*TopicMetadata, n)
- for i := 0; i < n; i++ {
- r.Topics[i] = new(TopicMetadata)
- err = r.Topics[i].decode(pd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *MetadataResponse) encode(pe packetEncoder) error {
- err := pe.putArrayLength(len(r.Brokers))
- if err != nil {
- return err
- }
- for _, broker := range r.Brokers {
- err = broker.encode(pe)
- if err != nil {
- return err
- }
- }
-
- err = pe.putArrayLength(len(r.Topics))
- if err != nil {
- return err
- }
- for _, tm := range r.Topics {
- err = tm.encode(pe)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *MetadataResponse) key() int16 {
- return 3
-}
-
-func (r *MetadataResponse) version() int16 {
- return 0
-}
-
-func (r *MetadataResponse) requiredVersion() KafkaVersion {
- return minVersion
-}
-
-// testing API
-
-func (r *MetadataResponse) AddBroker(addr string, id int32) {
- r.Brokers = append(r.Brokers, &Broker{id: id, addr: addr})
-}
-
-func (r *MetadataResponse) AddTopic(topic string, err KError) *TopicMetadata {
- var tmatch *TopicMetadata
-
- for _, tm := range r.Topics {
- if tm.Name == topic {
- tmatch = tm
- goto foundTopic
- }
- }
-
- tmatch = new(TopicMetadata)
- tmatch.Name = topic
- r.Topics = append(r.Topics, tmatch)
-
-foundTopic:
-
- tmatch.Err = err
- return tmatch
-}
-
-func (r *MetadataResponse) AddTopicPartition(topic string, partition, brokerID int32, replicas, isr []int32, err KError) {
- tmatch := r.AddTopic(topic, ErrNoError)
- var pmatch *PartitionMetadata
-
- for _, pm := range tmatch.Partitions {
- if pm.ID == partition {
- pmatch = pm
- goto foundPartition
- }
- }
-
- pmatch = new(PartitionMetadata)
- pmatch.ID = partition
- tmatch.Partitions = append(tmatch.Partitions, pmatch)
-
-foundPartition:
-
- pmatch.Leader = brokerID
- pmatch.Replicas = replicas
- pmatch.Isr = isr
- pmatch.Err = err
-
-}
diff --git a/vendor/github.com/Shopify/sarama/metrics.go b/vendor/github.com/Shopify/sarama/metrics.go
deleted file mode 100644
index 4869708e944..00000000000
--- a/vendor/github.com/Shopify/sarama/metrics.go
+++ /dev/null
@@ -1,51 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "strings"
-
- "github.com/rcrowley/go-metrics"
-)
-
-// Use exponentially decaying reservoir for sampling histograms with the same defaults as the Java library:
-// 1028 elements, which offers a 99.9% confidence level with a 5% margin of error assuming a normal distribution,
-// and an alpha factor of 0.015, which heavily biases the reservoir to the past 5 minutes of measurements.
-// See https://github.com/dropwizard/metrics/blob/v3.1.0/metrics-core/src/main/java/com/codahale/metrics/ExponentiallyDecayingReservoir.java#L38
-const (
- metricsReservoirSize = 1028
- metricsAlphaFactor = 0.015
-)
-
-func getOrRegisterHistogram(name string, r metrics.Registry) metrics.Histogram {
- return r.GetOrRegister(name, func() metrics.Histogram {
- return metrics.NewHistogram(metrics.NewExpDecaySample(metricsReservoirSize, metricsAlphaFactor))
- }).(metrics.Histogram)
-}
-
-func getMetricNameForBroker(name string, broker *Broker) string {
- // Use broker id like the Java client as it does not contain '.' or ':' characters that
- // can be interpreted as special character by monitoring tool (e.g. Graphite)
- return fmt.Sprintf(name+"-for-broker-%d", broker.ID())
-}
-
-func getOrRegisterBrokerMeter(name string, broker *Broker, r metrics.Registry) metrics.Meter {
- return metrics.GetOrRegisterMeter(getMetricNameForBroker(name, broker), r)
-}
-
-func getOrRegisterBrokerHistogram(name string, broker *Broker, r metrics.Registry) metrics.Histogram {
- return getOrRegisterHistogram(getMetricNameForBroker(name, broker), r)
-}
-
-func getMetricNameForTopic(name string, topic string) string {
- // Convert dot to _ since reporters like Graphite typically use dot to represent hierarchy
- // cf. KAFKA-1902 and KAFKA-2337
- return fmt.Sprintf(name+"-for-topic-%s", strings.Replace(topic, ".", "_", -1))
-}
-
-func getOrRegisterTopicMeter(name string, topic string, r metrics.Registry) metrics.Meter {
- return metrics.GetOrRegisterMeter(getMetricNameForTopic(name, topic), r)
-}
-
-func getOrRegisterTopicHistogram(name string, topic string, r metrics.Registry) metrics.Histogram {
- return getOrRegisterHistogram(getMetricNameForTopic(name, topic), r)
-}
diff --git a/vendor/github.com/Shopify/sarama/mockbroker.go b/vendor/github.com/Shopify/sarama/mockbroker.go
deleted file mode 100644
index 0734d34f6cb..00000000000
--- a/vendor/github.com/Shopify/sarama/mockbroker.go
+++ /dev/null
@@ -1,324 +0,0 @@
-package sarama
-
-import (
- "bytes"
- "encoding/binary"
- "fmt"
- "io"
- "net"
- "reflect"
- "strconv"
- "sync"
- "time"
-
- "github.com/davecgh/go-spew/spew"
-)
-
-const (
- expectationTimeout = 500 * time.Millisecond
-)
-
-type requestHandlerFunc func(req *request) (res encoder)
-
-// RequestNotifierFunc is invoked when a mock broker processes a request successfully
-// and will provides the number of bytes read and written.
-type RequestNotifierFunc func(bytesRead, bytesWritten int)
-
-// MockBroker is a mock Kafka broker that is used in unit tests. It is exposed
-// to facilitate testing of higher level or specialized consumers and producers
-// built on top of Sarama. Note that it does not 'mimic' the Kafka API protocol,
-// but rather provides a facility to do that. It takes care of the TCP
-// transport, request unmarshaling, response marshaling, and makes it the test
-// writer responsibility to program correct according to the Kafka API protocol
-// MockBroker behaviour.
-//
-// MockBroker is implemented as a TCP server listening on a kernel-selected
-// localhost port that can accept many connections. It reads Kafka requests
-// from that connection and returns responses programmed by the SetHandlerByMap
-// function. If a MockBroker receives a request that it has no programmed
-// response for, then it returns nothing and the request times out.
-//
-// A set of MockRequest builders to define mappings used by MockBroker is
-// provided by Sarama. But users can develop MockRequests of their own and use
-// them along with or instead of the standard ones.
-//
-// When running tests with MockBroker it is strongly recommended to specify
-// a timeout to `go test` so that if the broker hangs waiting for a response,
-// the test panics.
-//
-// It is not necessary to prefix message length or correlation ID to your
-// response bytes, the server does that automatically as a convenience.
-type MockBroker struct {
- brokerID int32
- port int32
- closing chan none
- stopper chan none
- expectations chan encoder
- listener net.Listener
- t TestReporter
- latency time.Duration
- handler requestHandlerFunc
- notifier RequestNotifierFunc
- history []RequestResponse
- lock sync.Mutex
-}
-
-// RequestResponse represents a Request/Response pair processed by MockBroker.
-type RequestResponse struct {
- Request protocolBody
- Response encoder
-}
-
-// SetLatency makes broker pause for the specified period every time before
-// replying.
-func (b *MockBroker) SetLatency(latency time.Duration) {
- b.latency = latency
-}
-
-// SetHandlerByMap defines mapping of Request types to MockResponses. When a
-// request is received by the broker, it looks up the request type in the map
-// and uses the found MockResponse instance to generate an appropriate reply.
-// If the request type is not found in the map then nothing is sent.
-func (b *MockBroker) SetHandlerByMap(handlerMap map[string]MockResponse) {
- b.setHandler(func(req *request) (res encoder) {
- reqTypeName := reflect.TypeOf(req.body).Elem().Name()
- mockResponse := handlerMap[reqTypeName]
- if mockResponse == nil {
- return nil
- }
- return mockResponse.For(req.body)
- })
-}
-
-// SetNotifier set a function that will get invoked whenever a request has been
-// processed successfully and will provide the number of bytes read and written
-func (b *MockBroker) SetNotifier(notifier RequestNotifierFunc) {
- b.lock.Lock()
- b.notifier = notifier
- b.lock.Unlock()
-}
-
-// BrokerID returns broker ID assigned to the broker.
-func (b *MockBroker) BrokerID() int32 {
- return b.brokerID
-}
-
-// History returns a slice of RequestResponse pairs in the order they were
-// processed by the broker. Note that in case of multiple connections to the
-// broker the order expected by a test can be different from the order recorded
-// in the history, unless some synchronization is implemented in the test.
-func (b *MockBroker) History() []RequestResponse {
- b.lock.Lock()
- history := make([]RequestResponse, len(b.history))
- copy(history, b.history)
- b.lock.Unlock()
- return history
-}
-
-// Port returns the TCP port number the broker is listening for requests on.
-func (b *MockBroker) Port() int32 {
- return b.port
-}
-
-// Addr returns the broker connection string in the form "
:".
-func (b *MockBroker) Addr() string {
- return b.listener.Addr().String()
-}
-
-// Close terminates the broker blocking until it stops internal goroutines and
-// releases all resources.
-func (b *MockBroker) Close() {
- close(b.expectations)
- if len(b.expectations) > 0 {
- buf := bytes.NewBufferString(fmt.Sprintf("mockbroker/%d: not all expectations were satisfied! Still waiting on:\n", b.BrokerID()))
- for e := range b.expectations {
- _, _ = buf.WriteString(spew.Sdump(e))
- }
- b.t.Error(buf.String())
- }
- close(b.closing)
- <-b.stopper
-}
-
-// setHandler sets the specified function as the request handler. Whenever
-// a mock broker reads a request from the wire it passes the request to the
-// function and sends back whatever the handler function returns.
-func (b *MockBroker) setHandler(handler requestHandlerFunc) {
- b.lock.Lock()
- b.handler = handler
- b.lock.Unlock()
-}
-
-func (b *MockBroker) serverLoop() {
- defer close(b.stopper)
- var err error
- var conn net.Conn
-
- go func() {
- <-b.closing
- err := b.listener.Close()
- if err != nil {
- b.t.Error(err)
- }
- }()
-
- wg := &sync.WaitGroup{}
- i := 0
- for conn, err = b.listener.Accept(); err == nil; conn, err = b.listener.Accept() {
- wg.Add(1)
- go b.handleRequests(conn, i, wg)
- i++
- }
- wg.Wait()
- Logger.Printf("*** mockbroker/%d: listener closed, err=%v", b.BrokerID(), err)
-}
-
-func (b *MockBroker) handleRequests(conn net.Conn, idx int, wg *sync.WaitGroup) {
- defer wg.Done()
- defer func() {
- _ = conn.Close()
- }()
- Logger.Printf("*** mockbroker/%d/%d: connection opened", b.BrokerID(), idx)
- var err error
-
- abort := make(chan none)
- defer close(abort)
- go func() {
- select {
- case <-b.closing:
- _ = conn.Close()
- case <-abort:
- }
- }()
-
- resHeader := make([]byte, 8)
- for {
- req, bytesRead, err := decodeRequest(conn)
- if err != nil {
- Logger.Printf("*** mockbroker/%d/%d: invalid request: err=%+v, %+v", b.brokerID, idx, err, spew.Sdump(req))
- b.serverError(err)
- break
- }
-
- if b.latency > 0 {
- time.Sleep(b.latency)
- }
-
- b.lock.Lock()
- res := b.handler(req)
- b.history = append(b.history, RequestResponse{req.body, res})
- b.lock.Unlock()
-
- if res == nil {
- Logger.Printf("*** mockbroker/%d/%d: ignored %v", b.brokerID, idx, spew.Sdump(req))
- continue
- }
- Logger.Printf("*** mockbroker/%d/%d: served %v -> %v", b.brokerID, idx, req, res)
-
- encodedRes, err := encode(res, nil)
- if err != nil {
- b.serverError(err)
- break
- }
- if len(encodedRes) == 0 {
- b.lock.Lock()
- if b.notifier != nil {
- b.notifier(bytesRead, 0)
- }
- b.lock.Unlock()
- continue
- }
-
- binary.BigEndian.PutUint32(resHeader, uint32(len(encodedRes)+4))
- binary.BigEndian.PutUint32(resHeader[4:], uint32(req.correlationID))
- if _, err = conn.Write(resHeader); err != nil {
- b.serverError(err)
- break
- }
- if _, err = conn.Write(encodedRes); err != nil {
- b.serverError(err)
- break
- }
-
- b.lock.Lock()
- if b.notifier != nil {
- b.notifier(bytesRead, len(resHeader)+len(encodedRes))
- }
- b.lock.Unlock()
- }
- Logger.Printf("*** mockbroker/%d/%d: connection closed, err=%v", b.BrokerID(), idx, err)
-}
-
-func (b *MockBroker) defaultRequestHandler(req *request) (res encoder) {
- select {
- case res, ok := <-b.expectations:
- if !ok {
- return nil
- }
- return res
- case <-time.After(expectationTimeout):
- return nil
- }
-}
-
-func (b *MockBroker) serverError(err error) {
- isConnectionClosedError := false
- if _, ok := err.(*net.OpError); ok {
- isConnectionClosedError = true
- } else if err == io.EOF {
- isConnectionClosedError = true
- } else if err.Error() == "use of closed network connection" {
- isConnectionClosedError = true
- }
-
- if isConnectionClosedError {
- return
- }
-
- b.t.Errorf(err.Error())
-}
-
-// NewMockBroker launches a fake Kafka broker. It takes a TestReporter as provided by the
-// test framework and a channel of responses to use. If an error occurs it is
-// simply logged to the TestReporter and the broker exits.
-func NewMockBroker(t TestReporter, brokerID int32) *MockBroker {
- return NewMockBrokerAddr(t, brokerID, "localhost:0")
-}
-
-// NewMockBrokerAddr behaves like newMockBroker but listens on the address you give
-// it rather than just some ephemeral port.
-func NewMockBrokerAddr(t TestReporter, brokerID int32, addr string) *MockBroker {
- var err error
-
- broker := &MockBroker{
- closing: make(chan none),
- stopper: make(chan none),
- t: t,
- brokerID: brokerID,
- expectations: make(chan encoder, 512),
- }
- broker.handler = broker.defaultRequestHandler
-
- broker.listener, err = net.Listen("tcp", addr)
- if err != nil {
- t.Fatal(err)
- }
- Logger.Printf("*** mockbroker/%d listening on %s\n", brokerID, broker.listener.Addr().String())
- _, portStr, err := net.SplitHostPort(broker.listener.Addr().String())
- if err != nil {
- t.Fatal(err)
- }
- tmp, err := strconv.ParseInt(portStr, 10, 32)
- if err != nil {
- t.Fatal(err)
- }
- broker.port = int32(tmp)
-
- go broker.serverLoop()
-
- return broker
-}
-
-func (b *MockBroker) Returns(e encoder) {
- b.expectations <- e
-}
diff --git a/vendor/github.com/Shopify/sarama/mockresponses.go b/vendor/github.com/Shopify/sarama/mockresponses.go
deleted file mode 100644
index a203142094d..00000000000
--- a/vendor/github.com/Shopify/sarama/mockresponses.go
+++ /dev/null
@@ -1,455 +0,0 @@
-package sarama
-
-import (
- "fmt"
-)
-
-// TestReporter has methods matching go's testing.T to avoid importing
-// `testing` in the main part of the library.
-type TestReporter interface {
- Error(...interface{})
- Errorf(string, ...interface{})
- Fatal(...interface{})
- Fatalf(string, ...interface{})
-}
-
-// MockResponse is a response builder interface it defines one method that
-// allows generating a response based on a request body. MockResponses are used
-// to program behavior of MockBroker in tests.
-type MockResponse interface {
- For(reqBody versionedDecoder) (res encoder)
-}
-
-// MockWrapper is a mock response builder that returns a particular concrete
-// response regardless of the actual request passed to the `For` method.
-type MockWrapper struct {
- res encoder
-}
-
-func (mw *MockWrapper) For(reqBody versionedDecoder) (res encoder) {
- return mw.res
-}
-
-func NewMockWrapper(res encoder) *MockWrapper {
- return &MockWrapper{res: res}
-}
-
-// MockSequence is a mock response builder that is created from a sequence of
-// concrete responses. Every time when a `MockBroker` calls its `For` method
-// the next response from the sequence is returned. When the end of the
-// sequence is reached the last element from the sequence is returned.
-type MockSequence struct {
- responses []MockResponse
-}
-
-func NewMockSequence(responses ...interface{}) *MockSequence {
- ms := &MockSequence{}
- ms.responses = make([]MockResponse, len(responses))
- for i, res := range responses {
- switch res := res.(type) {
- case MockResponse:
- ms.responses[i] = res
- case encoder:
- ms.responses[i] = NewMockWrapper(res)
- default:
- panic(fmt.Sprintf("Unexpected response type: %T", res))
- }
- }
- return ms
-}
-
-func (mc *MockSequence) For(reqBody versionedDecoder) (res encoder) {
- res = mc.responses[0].For(reqBody)
- if len(mc.responses) > 1 {
- mc.responses = mc.responses[1:]
- }
- return res
-}
-
-// MockMetadataResponse is a `MetadataResponse` builder.
-type MockMetadataResponse struct {
- leaders map[string]map[int32]int32
- brokers map[string]int32
- t TestReporter
-}
-
-func NewMockMetadataResponse(t TestReporter) *MockMetadataResponse {
- return &MockMetadataResponse{
- leaders: make(map[string]map[int32]int32),
- brokers: make(map[string]int32),
- t: t,
- }
-}
-
-func (mmr *MockMetadataResponse) SetLeader(topic string, partition, brokerID int32) *MockMetadataResponse {
- partitions := mmr.leaders[topic]
- if partitions == nil {
- partitions = make(map[int32]int32)
- mmr.leaders[topic] = partitions
- }
- partitions[partition] = brokerID
- return mmr
-}
-
-func (mmr *MockMetadataResponse) SetBroker(addr string, brokerID int32) *MockMetadataResponse {
- mmr.brokers[addr] = brokerID
- return mmr
-}
-
-func (mmr *MockMetadataResponse) For(reqBody versionedDecoder) encoder {
- metadataRequest := reqBody.(*MetadataRequest)
- metadataResponse := &MetadataResponse{}
- for addr, brokerID := range mmr.brokers {
- metadataResponse.AddBroker(addr, brokerID)
- }
- if len(metadataRequest.Topics) == 0 {
- for topic, partitions := range mmr.leaders {
- for partition, brokerID := range partitions {
- metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
- }
- }
- return metadataResponse
- }
- for _, topic := range metadataRequest.Topics {
- for partition, brokerID := range mmr.leaders[topic] {
- metadataResponse.AddTopicPartition(topic, partition, brokerID, nil, nil, ErrNoError)
- }
- }
- return metadataResponse
-}
-
-// MockOffsetResponse is an `OffsetResponse` builder.
-type MockOffsetResponse struct {
- offsets map[string]map[int32]map[int64]int64
- t TestReporter
-}
-
-func NewMockOffsetResponse(t TestReporter) *MockOffsetResponse {
- return &MockOffsetResponse{
- offsets: make(map[string]map[int32]map[int64]int64),
- t: t,
- }
-}
-
-func (mor *MockOffsetResponse) SetOffset(topic string, partition int32, time, offset int64) *MockOffsetResponse {
- partitions := mor.offsets[topic]
- if partitions == nil {
- partitions = make(map[int32]map[int64]int64)
- mor.offsets[topic] = partitions
- }
- times := partitions[partition]
- if times == nil {
- times = make(map[int64]int64)
- partitions[partition] = times
- }
- times[time] = offset
- return mor
-}
-
-func (mor *MockOffsetResponse) For(reqBody versionedDecoder) encoder {
- offsetRequest := reqBody.(*OffsetRequest)
- offsetResponse := &OffsetResponse{}
- for topic, partitions := range offsetRequest.blocks {
- for partition, block := range partitions {
- offset := mor.getOffset(topic, partition, block.time)
- offsetResponse.AddTopicPartition(topic, partition, offset)
- }
- }
- return offsetResponse
-}
-
-func (mor *MockOffsetResponse) getOffset(topic string, partition int32, time int64) int64 {
- partitions := mor.offsets[topic]
- if partitions == nil {
- mor.t.Errorf("missing topic: %s", topic)
- }
- times := partitions[partition]
- if times == nil {
- mor.t.Errorf("missing partition: %d", partition)
- }
- offset, ok := times[time]
- if !ok {
- mor.t.Errorf("missing time: %d", time)
- }
- return offset
-}
-
-// MockFetchResponse is a `FetchResponse` builder.
-type MockFetchResponse struct {
- messages map[string]map[int32]map[int64]Encoder
- highWaterMarks map[string]map[int32]int64
- t TestReporter
- batchSize int
-}
-
-func NewMockFetchResponse(t TestReporter, batchSize int) *MockFetchResponse {
- return &MockFetchResponse{
- messages: make(map[string]map[int32]map[int64]Encoder),
- highWaterMarks: make(map[string]map[int32]int64),
- t: t,
- batchSize: batchSize,
- }
-}
-
-func (mfr *MockFetchResponse) SetMessage(topic string, partition int32, offset int64, msg Encoder) *MockFetchResponse {
- partitions := mfr.messages[topic]
- if partitions == nil {
- partitions = make(map[int32]map[int64]Encoder)
- mfr.messages[topic] = partitions
- }
- messages := partitions[partition]
- if messages == nil {
- messages = make(map[int64]Encoder)
- partitions[partition] = messages
- }
- messages[offset] = msg
- return mfr
-}
-
-func (mfr *MockFetchResponse) SetHighWaterMark(topic string, partition int32, offset int64) *MockFetchResponse {
- partitions := mfr.highWaterMarks[topic]
- if partitions == nil {
- partitions = make(map[int32]int64)
- mfr.highWaterMarks[topic] = partitions
- }
- partitions[partition] = offset
- return mfr
-}
-
-func (mfr *MockFetchResponse) For(reqBody versionedDecoder) encoder {
- fetchRequest := reqBody.(*FetchRequest)
- res := &FetchResponse{}
- for topic, partitions := range fetchRequest.blocks {
- for partition, block := range partitions {
- initialOffset := block.fetchOffset
- offset := initialOffset
- maxOffset := initialOffset + int64(mfr.getMessageCount(topic, partition))
- for i := 0; i < mfr.batchSize && offset < maxOffset; {
- msg := mfr.getMessage(topic, partition, offset)
- if msg != nil {
- res.AddMessage(topic, partition, nil, msg, offset)
- i++
- }
- offset++
- }
- fb := res.GetBlock(topic, partition)
- if fb == nil {
- res.AddError(topic, partition, ErrNoError)
- fb = res.GetBlock(topic, partition)
- }
- fb.HighWaterMarkOffset = mfr.getHighWaterMark(topic, partition)
- }
- }
- return res
-}
-
-func (mfr *MockFetchResponse) getMessage(topic string, partition int32, offset int64) Encoder {
- partitions := mfr.messages[topic]
- if partitions == nil {
- return nil
- }
- messages := partitions[partition]
- if messages == nil {
- return nil
- }
- return messages[offset]
-}
-
-func (mfr *MockFetchResponse) getMessageCount(topic string, partition int32) int {
- partitions := mfr.messages[topic]
- if partitions == nil {
- return 0
- }
- messages := partitions[partition]
- if messages == nil {
- return 0
- }
- return len(messages)
-}
-
-func (mfr *MockFetchResponse) getHighWaterMark(topic string, partition int32) int64 {
- partitions := mfr.highWaterMarks[topic]
- if partitions == nil {
- return 0
- }
- return partitions[partition]
-}
-
-// MockConsumerMetadataResponse is a `ConsumerMetadataResponse` builder.
-type MockConsumerMetadataResponse struct {
- coordinators map[string]interface{}
- t TestReporter
-}
-
-func NewMockConsumerMetadataResponse(t TestReporter) *MockConsumerMetadataResponse {
- return &MockConsumerMetadataResponse{
- coordinators: make(map[string]interface{}),
- t: t,
- }
-}
-
-func (mr *MockConsumerMetadataResponse) SetCoordinator(group string, broker *MockBroker) *MockConsumerMetadataResponse {
- mr.coordinators[group] = broker
- return mr
-}
-
-func (mr *MockConsumerMetadataResponse) SetError(group string, kerror KError) *MockConsumerMetadataResponse {
- mr.coordinators[group] = kerror
- return mr
-}
-
-func (mr *MockConsumerMetadataResponse) For(reqBody versionedDecoder) encoder {
- req := reqBody.(*ConsumerMetadataRequest)
- group := req.ConsumerGroup
- res := &ConsumerMetadataResponse{}
- v := mr.coordinators[group]
- switch v := v.(type) {
- case *MockBroker:
- res.Coordinator = &Broker{id: v.BrokerID(), addr: v.Addr()}
- case KError:
- res.Err = v
- }
- return res
-}
-
-// MockOffsetCommitResponse is a `OffsetCommitResponse` builder.
-type MockOffsetCommitResponse struct {
- errors map[string]map[string]map[int32]KError
- t TestReporter
-}
-
-func NewMockOffsetCommitResponse(t TestReporter) *MockOffsetCommitResponse {
- return &MockOffsetCommitResponse{t: t}
-}
-
-func (mr *MockOffsetCommitResponse) SetError(group, topic string, partition int32, kerror KError) *MockOffsetCommitResponse {
- if mr.errors == nil {
- mr.errors = make(map[string]map[string]map[int32]KError)
- }
- topics := mr.errors[group]
- if topics == nil {
- topics = make(map[string]map[int32]KError)
- mr.errors[group] = topics
- }
- partitions := topics[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- topics[topic] = partitions
- }
- partitions[partition] = kerror
- return mr
-}
-
-func (mr *MockOffsetCommitResponse) For(reqBody versionedDecoder) encoder {
- req := reqBody.(*OffsetCommitRequest)
- group := req.ConsumerGroup
- res := &OffsetCommitResponse{}
- for topic, partitions := range req.blocks {
- for partition := range partitions {
- res.AddError(topic, partition, mr.getError(group, topic, partition))
- }
- }
- return res
-}
-
-func (mr *MockOffsetCommitResponse) getError(group, topic string, partition int32) KError {
- topics := mr.errors[group]
- if topics == nil {
- return ErrNoError
- }
- partitions := topics[topic]
- if partitions == nil {
- return ErrNoError
- }
- kerror, ok := partitions[partition]
- if !ok {
- return ErrNoError
- }
- return kerror
-}
-
-// MockProduceResponse is a `ProduceResponse` builder.
-type MockProduceResponse struct {
- errors map[string]map[int32]KError
- t TestReporter
-}
-
-func NewMockProduceResponse(t TestReporter) *MockProduceResponse {
- return &MockProduceResponse{t: t}
-}
-
-func (mr *MockProduceResponse) SetError(topic string, partition int32, kerror KError) *MockProduceResponse {
- if mr.errors == nil {
- mr.errors = make(map[string]map[int32]KError)
- }
- partitions := mr.errors[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- mr.errors[topic] = partitions
- }
- partitions[partition] = kerror
- return mr
-}
-
-func (mr *MockProduceResponse) For(reqBody versionedDecoder) encoder {
- req := reqBody.(*ProduceRequest)
- res := &ProduceResponse{}
- for topic, partitions := range req.msgSets {
- for partition := range partitions {
- res.AddTopicPartition(topic, partition, mr.getError(topic, partition))
- }
- }
- return res
-}
-
-func (mr *MockProduceResponse) getError(topic string, partition int32) KError {
- partitions := mr.errors[topic]
- if partitions == nil {
- return ErrNoError
- }
- kerror, ok := partitions[partition]
- if !ok {
- return ErrNoError
- }
- return kerror
-}
-
-// MockOffsetFetchResponse is a `OffsetFetchResponse` builder.
-type MockOffsetFetchResponse struct {
- offsets map[string]map[string]map[int32]*OffsetFetchResponseBlock
- t TestReporter
-}
-
-func NewMockOffsetFetchResponse(t TestReporter) *MockOffsetFetchResponse {
- return &MockOffsetFetchResponse{t: t}
-}
-
-func (mr *MockOffsetFetchResponse) SetOffset(group, topic string, partition int32, offset int64, metadata string, kerror KError) *MockOffsetFetchResponse {
- if mr.offsets == nil {
- mr.offsets = make(map[string]map[string]map[int32]*OffsetFetchResponseBlock)
- }
- topics := mr.offsets[group]
- if topics == nil {
- topics = make(map[string]map[int32]*OffsetFetchResponseBlock)
- mr.offsets[group] = topics
- }
- partitions := topics[topic]
- if partitions == nil {
- partitions = make(map[int32]*OffsetFetchResponseBlock)
- topics[topic] = partitions
- }
- partitions[partition] = &OffsetFetchResponseBlock{offset, metadata, kerror}
- return mr
-}
-
-func (mr *MockOffsetFetchResponse) For(reqBody versionedDecoder) encoder {
- req := reqBody.(*OffsetFetchRequest)
- group := req.ConsumerGroup
- res := &OffsetFetchResponse{}
- for topic, partitions := range mr.offsets[group] {
- for partition, block := range partitions {
- res.AddBlock(topic, partition, block)
- }
- }
- return res
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_request.go b/vendor/github.com/Shopify/sarama/offset_commit_request.go
deleted file mode 100644
index b21ea634b02..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_request.go
+++ /dev/null
@@ -1,190 +0,0 @@
-package sarama
-
-// ReceiveTime is a special value for the timestamp field of Offset Commit Requests which
-// tells the broker to set the timestamp to the time at which the request was received.
-// The timestamp is only used if message version 1 is used, which requires kafka 0.8.2.
-const ReceiveTime int64 = -1
-
-// GroupGenerationUndefined is a special value for the group generation field of
-// Offset Commit Requests that should be used when a consumer group does not rely
-// on Kafka for partition management.
-const GroupGenerationUndefined = -1
-
-type offsetCommitRequestBlock struct {
- offset int64
- timestamp int64
- metadata string
-}
-
-func (b *offsetCommitRequestBlock) encode(pe packetEncoder, version int16) error {
- pe.putInt64(b.offset)
- if version == 1 {
- pe.putInt64(b.timestamp)
- } else if b.timestamp != 0 {
- Logger.Println("Non-zero timestamp specified for OffsetCommitRequest not v1, it will be ignored")
- }
-
- return pe.putString(b.metadata)
-}
-
-func (b *offsetCommitRequestBlock) decode(pd packetDecoder, version int16) (err error) {
- if b.offset, err = pd.getInt64(); err != nil {
- return err
- }
- if version == 1 {
- if b.timestamp, err = pd.getInt64(); err != nil {
- return err
- }
- }
- b.metadata, err = pd.getString()
- return err
-}
-
-type OffsetCommitRequest struct {
- ConsumerGroup string
- ConsumerGroupGeneration int32 // v1 or later
- ConsumerID string // v1 or later
- RetentionTime int64 // v2 or later
-
- // Version can be:
- // - 0 (kafka 0.8.1 and later)
- // - 1 (kafka 0.8.2 and later)
- // - 2 (kafka 0.9.0 and later)
- Version int16
- blocks map[string]map[int32]*offsetCommitRequestBlock
-}
-
-func (r *OffsetCommitRequest) encode(pe packetEncoder) error {
- if r.Version < 0 || r.Version > 2 {
- return PacketEncodingError{"invalid or unsupported OffsetCommitRequest version field"}
- }
-
- if err := pe.putString(r.ConsumerGroup); err != nil {
- return err
- }
-
- if r.Version >= 1 {
- pe.putInt32(r.ConsumerGroupGeneration)
- if err := pe.putString(r.ConsumerID); err != nil {
- return err
- }
- } else {
- if r.ConsumerGroupGeneration != 0 {
- Logger.Println("Non-zero ConsumerGroupGeneration specified for OffsetCommitRequest v0, it will be ignored")
- }
- if r.ConsumerID != "" {
- Logger.Println("Non-empty ConsumerID specified for OffsetCommitRequest v0, it will be ignored")
- }
- }
-
- if r.Version >= 2 {
- pe.putInt64(r.RetentionTime)
- } else if r.RetentionTime != 0 {
- Logger.Println("Non-zero RetentionTime specified for OffsetCommitRequest version <2, it will be ignored")
- }
-
- if err := pe.putArrayLength(len(r.blocks)); err != nil {
- return err
- }
- for topic, partitions := range r.blocks {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err := block.encode(pe, r.Version); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *OffsetCommitRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- if r.ConsumerGroup, err = pd.getString(); err != nil {
- return err
- }
-
- if r.Version >= 1 {
- if r.ConsumerGroupGeneration, err = pd.getInt32(); err != nil {
- return err
- }
- if r.ConsumerID, err = pd.getString(); err != nil {
- return err
- }
- }
-
- if r.Version >= 2 {
- if r.RetentionTime, err = pd.getInt64(); err != nil {
- return err
- }
- }
-
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &offsetCommitRequestBlock{}
- if err := block.decode(pd, r.Version); err != nil {
- return err
- }
- r.blocks[topic][partition] = block
- }
- }
- return nil
-}
-
-func (r *OffsetCommitRequest) key() int16 {
- return 8
-}
-
-func (r *OffsetCommitRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetCommitRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- case 2:
- return V0_9_0_0
- default:
- return minVersion
- }
-}
-
-func (r *OffsetCommitRequest) AddBlock(topic string, partitionID int32, offset int64, timestamp int64, metadata string) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*offsetCommitRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*offsetCommitRequestBlock)
- }
-
- r.blocks[topic][partitionID] = &offsetCommitRequestBlock{offset, timestamp, metadata}
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_commit_response.go b/vendor/github.com/Shopify/sarama/offset_commit_response.go
deleted file mode 100644
index 7f277e7753a..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_commit_response.go
+++ /dev/null
@@ -1,85 +0,0 @@
-package sarama
-
-type OffsetCommitResponse struct {
- Errors map[string]map[int32]KError
-}
-
-func (r *OffsetCommitResponse) AddError(topic string, partition int32, kerror KError) {
- if r.Errors == nil {
- r.Errors = make(map[string]map[int32]KError)
- }
- partitions := r.Errors[topic]
- if partitions == nil {
- partitions = make(map[int32]KError)
- r.Errors[topic] = partitions
- }
- partitions[partition] = kerror
-}
-
-func (r *OffsetCommitResponse) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Errors)); err != nil {
- return err
- }
- for topic, partitions := range r.Errors {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, kerror := range partitions {
- pe.putInt32(partition)
- pe.putInt16(int16(kerror))
- }
- }
- return nil
-}
-
-func (r *OffsetCommitResponse) decode(pd packetDecoder, version int16) (err error) {
- numTopics, err := pd.getArrayLength()
- if err != nil || numTopics == 0 {
- return err
- }
-
- r.Errors = make(map[string]map[int32]KError, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numErrors, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Errors[name] = make(map[int32]KError, numErrors)
-
- for j := 0; j < numErrors; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.Errors[name][id] = KError(tmp)
- }
- }
-
- return nil
-}
-
-func (r *OffsetCommitResponse) key() int16 {
- return 8
-}
-
-func (r *OffsetCommitResponse) version() int16 {
- return 0
-}
-
-func (r *OffsetCommitResponse) requiredVersion() KafkaVersion {
- return minVersion
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_request.go b/vendor/github.com/Shopify/sarama/offset_fetch_request.go
deleted file mode 100644
index b19fe79ba7a..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_request.go
+++ /dev/null
@@ -1,81 +0,0 @@
-package sarama
-
-type OffsetFetchRequest struct {
- ConsumerGroup string
- Version int16
- partitions map[string][]int32
-}
-
-func (r *OffsetFetchRequest) encode(pe packetEncoder) (err error) {
- if r.Version < 0 || r.Version > 1 {
- return PacketEncodingError{"invalid or unsupported OffsetFetchRequest version field"}
- }
-
- if err = pe.putString(r.ConsumerGroup); err != nil {
- return err
- }
- if err = pe.putArrayLength(len(r.partitions)); err != nil {
- return err
- }
- for topic, partitions := range r.partitions {
- if err = pe.putString(topic); err != nil {
- return err
- }
- if err = pe.putInt32Array(partitions); err != nil {
- return err
- }
- }
- return nil
-}
-
-func (r *OffsetFetchRequest) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
- if r.ConsumerGroup, err = pd.getString(); err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if partitionCount == 0 {
- return nil
- }
- r.partitions = make(map[string][]int32)
- for i := 0; i < partitionCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitions, err := pd.getInt32Array()
- if err != nil {
- return err
- }
- r.partitions[topic] = partitions
- }
- return nil
-}
-
-func (r *OffsetFetchRequest) key() int16 {
- return 9
-}
-
-func (r *OffsetFetchRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetFetchRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_8_2_0
- default:
- return minVersion
- }
-}
-
-func (r *OffsetFetchRequest) AddPartition(topic string, partitionID int32) {
- if r.partitions == nil {
- r.partitions = make(map[string][]int32)
- }
-
- r.partitions[topic] = append(r.partitions[topic], partitionID)
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_fetch_response.go b/vendor/github.com/Shopify/sarama/offset_fetch_response.go
deleted file mode 100644
index 323220eac97..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_fetch_response.go
+++ /dev/null
@@ -1,143 +0,0 @@
-package sarama
-
-type OffsetFetchResponseBlock struct {
- Offset int64
- Metadata string
- Err KError
-}
-
-func (b *OffsetFetchResponseBlock) decode(pd packetDecoder) (err error) {
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- b.Metadata, err = pd.getString()
- if err != nil {
- return err
- }
-
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- return nil
-}
-
-func (b *OffsetFetchResponseBlock) encode(pe packetEncoder) (err error) {
- pe.putInt64(b.Offset)
-
- err = pe.putString(b.Metadata)
- if err != nil {
- return err
- }
-
- pe.putInt16(int16(b.Err))
-
- return nil
-}
-
-type OffsetFetchResponse struct {
- Blocks map[string]map[int32]*OffsetFetchResponseBlock
-}
-
-func (r *OffsetFetchResponse) encode(pe packetEncoder) error {
- if err := pe.putArrayLength(len(r.Blocks)); err != nil {
- return err
- }
- for topic, partitions := range r.Blocks {
- if err := pe.putString(topic); err != nil {
- return err
- }
- if err := pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err := block.encode(pe); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *OffsetFetchResponse) decode(pd packetDecoder, version int16) (err error) {
- numTopics, err := pd.getArrayLength()
- if err != nil || numTopics == 0 {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- if numBlocks == 0 {
- r.Blocks[name] = nil
- continue
- }
- r.Blocks[name] = make(map[int32]*OffsetFetchResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(OffsetFetchResponseBlock)
- err = block.decode(pd)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *OffsetFetchResponse) key() int16 {
- return 9
-}
-
-func (r *OffsetFetchResponse) version() int16 {
- return 0
-}
-
-func (r *OffsetFetchResponse) requiredVersion() KafkaVersion {
- return minVersion
-}
-
-func (r *OffsetFetchResponse) GetBlock(topic string, partition int32) *OffsetFetchResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-func (r *OffsetFetchResponse) AddBlock(topic string, partition int32, block *OffsetFetchResponseBlock) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*OffsetFetchResponseBlock)
- }
- partitions := r.Blocks[topic]
- if partitions == nil {
- partitions = make(map[int32]*OffsetFetchResponseBlock)
- r.Blocks[topic] = partitions
- }
- partitions[partition] = block
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_manager.go b/vendor/github.com/Shopify/sarama/offset_manager.go
deleted file mode 100644
index 5e15cdafe3a..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_manager.go
+++ /dev/null
@@ -1,542 +0,0 @@
-package sarama
-
-import (
- "sync"
- "time"
-)
-
-// Offset Manager
-
-// OffsetManager uses Kafka to store and fetch consumed partition offsets.
-type OffsetManager interface {
- // ManagePartition creates a PartitionOffsetManager on the given topic/partition.
- // It will return an error if this OffsetManager is already managing the given
- // topic/partition.
- ManagePartition(topic string, partition int32) (PartitionOffsetManager, error)
-
- // Close stops the OffsetManager from managing offsets. It is required to call
- // this function before an OffsetManager object passes out of scope, as it
- // will otherwise leak memory. You must call this after all the
- // PartitionOffsetManagers are closed.
- Close() error
-}
-
-type offsetManager struct {
- client Client
- conf *Config
- group string
-
- lock sync.Mutex
- poms map[string]map[int32]*partitionOffsetManager
- boms map[*Broker]*brokerOffsetManager
-}
-
-// NewOffsetManagerFromClient creates a new OffsetManager from the given client.
-// It is still necessary to call Close() on the underlying client when finished with the partition manager.
-func NewOffsetManagerFromClient(group string, client Client) (OffsetManager, error) {
- // Check that we are not dealing with a closed Client before processing any other arguments
- if client.Closed() {
- return nil, ErrClosedClient
- }
-
- om := &offsetManager{
- client: client,
- conf: client.Config(),
- group: group,
- poms: make(map[string]map[int32]*partitionOffsetManager),
- boms: make(map[*Broker]*brokerOffsetManager),
- }
-
- return om, nil
-}
-
-func (om *offsetManager) ManagePartition(topic string, partition int32) (PartitionOffsetManager, error) {
- pom, err := om.newPartitionOffsetManager(topic, partition)
- if err != nil {
- return nil, err
- }
-
- om.lock.Lock()
- defer om.lock.Unlock()
-
- topicManagers := om.poms[topic]
- if topicManagers == nil {
- topicManagers = make(map[int32]*partitionOffsetManager)
- om.poms[topic] = topicManagers
- }
-
- if topicManagers[partition] != nil {
- return nil, ConfigurationError("That topic/partition is already being managed")
- }
-
- topicManagers[partition] = pom
- return pom, nil
-}
-
-func (om *offsetManager) Close() error {
- return nil
-}
-
-func (om *offsetManager) refBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
- om.lock.Lock()
- defer om.lock.Unlock()
-
- bom := om.boms[broker]
- if bom == nil {
- bom = om.newBrokerOffsetManager(broker)
- om.boms[broker] = bom
- }
-
- bom.refs++
-
- return bom
-}
-
-func (om *offsetManager) unrefBrokerOffsetManager(bom *brokerOffsetManager) {
- om.lock.Lock()
- defer om.lock.Unlock()
-
- bom.refs--
-
- if bom.refs == 0 {
- close(bom.updateSubscriptions)
- if om.boms[bom.broker] == bom {
- delete(om.boms, bom.broker)
- }
- }
-}
-
-func (om *offsetManager) abandonBroker(bom *brokerOffsetManager) {
- om.lock.Lock()
- defer om.lock.Unlock()
-
- delete(om.boms, bom.broker)
-}
-
-func (om *offsetManager) abandonPartitionOffsetManager(pom *partitionOffsetManager) {
- om.lock.Lock()
- defer om.lock.Unlock()
-
- delete(om.poms[pom.topic], pom.partition)
- if len(om.poms[pom.topic]) == 0 {
- delete(om.poms, pom.topic)
- }
-}
-
-// Partition Offset Manager
-
-// PartitionOffsetManager uses Kafka to store and fetch consumed partition offsets. You MUST call Close()
-// on a partition offset manager to avoid leaks, it will not be garbage-collected automatically when it passes
-// out of scope.
-type PartitionOffsetManager interface {
- // NextOffset returns the next offset that should be consumed for the managed
- // partition, accompanied by metadata which can be used to reconstruct the state
- // of the partition consumer when it resumes. NextOffset() will return
- // `config.Consumer.Offsets.Initial` and an empty metadata string if no offset
- // was committed for this partition yet.
- NextOffset() (int64, string)
-
- // MarkOffset marks the provided offset, alongside a metadata string
- // that represents the state of the partition consumer at that point in time. The
- // metadata string can be used by another consumer to restore that state, so it
- // can resume consumption.
- //
- // To follow upstream conventions, you are expected to mark the offset of the
- // next message to read, not the last message read. Thus, when calling `MarkOffset`
- // you should typically add one to the offset of the last consumed message.
- //
- // Note: calling MarkOffset does not necessarily commit the offset to the backend
- // store immediately for efficiency reasons, and it may never be committed if
- // your application crashes. This means that you may end up processing the same
- // message twice, and your processing should ideally be idempotent.
- MarkOffset(offset int64, metadata string)
-
- // Errors returns a read channel of errors that occur during offset management, if
- // enabled. By default, errors are logged and not returned over this channel. If
- // you want to implement any custom error handling, set your config's
- // Consumer.Return.Errors setting to true, and read from this channel.
- Errors() <-chan *ConsumerError
-
- // AsyncClose initiates a shutdown of the PartitionOffsetManager. This method will
- // return immediately, after which you should wait until the 'errors' channel has
- // been drained and closed. It is required to call this function, or Close before
- // a consumer object passes out of scope, as it will otherwise leak memory. You
- // must call this before calling Close on the underlying client.
- AsyncClose()
-
- // Close stops the PartitionOffsetManager from managing offsets. It is required to
- // call this function (or AsyncClose) before a PartitionOffsetManager object
- // passes out of scope, as it will otherwise leak memory. You must call this
- // before calling Close on the underlying client.
- Close() error
-}
-
-type partitionOffsetManager struct {
- parent *offsetManager
- topic string
- partition int32
-
- lock sync.Mutex
- offset int64
- metadata string
- dirty bool
- clean sync.Cond
- broker *brokerOffsetManager
-
- errors chan *ConsumerError
- rebalance chan none
- dying chan none
-}
-
-func (om *offsetManager) newPartitionOffsetManager(topic string, partition int32) (*partitionOffsetManager, error) {
- pom := &partitionOffsetManager{
- parent: om,
- topic: topic,
- partition: partition,
- errors: make(chan *ConsumerError, om.conf.ChannelBufferSize),
- rebalance: make(chan none, 1),
- dying: make(chan none),
- }
- pom.clean.L = &pom.lock
-
- if err := pom.selectBroker(); err != nil {
- return nil, err
- }
-
- if err := pom.fetchInitialOffset(om.conf.Metadata.Retry.Max); err != nil {
- return nil, err
- }
-
- pom.broker.updateSubscriptions <- pom
-
- go withRecover(pom.mainLoop)
-
- return pom, nil
-}
-
-func (pom *partitionOffsetManager) mainLoop() {
- for {
- select {
- case <-pom.rebalance:
- if err := pom.selectBroker(); err != nil {
- pom.handleError(err)
- pom.rebalance <- none{}
- } else {
- pom.broker.updateSubscriptions <- pom
- }
- case <-pom.dying:
- if pom.broker != nil {
- select {
- case <-pom.rebalance:
- case pom.broker.updateSubscriptions <- pom:
- }
- pom.parent.unrefBrokerOffsetManager(pom.broker)
- }
- pom.parent.abandonPartitionOffsetManager(pom)
- close(pom.errors)
- return
- }
- }
-}
-
-func (pom *partitionOffsetManager) selectBroker() error {
- if pom.broker != nil {
- pom.parent.unrefBrokerOffsetManager(pom.broker)
- pom.broker = nil
- }
-
- var broker *Broker
- var err error
-
- if err = pom.parent.client.RefreshCoordinator(pom.parent.group); err != nil {
- return err
- }
-
- if broker, err = pom.parent.client.Coordinator(pom.parent.group); err != nil {
- return err
- }
-
- pom.broker = pom.parent.refBrokerOffsetManager(broker)
- return nil
-}
-
-func (pom *partitionOffsetManager) fetchInitialOffset(retries int) error {
- request := new(OffsetFetchRequest)
- request.Version = 1
- request.ConsumerGroup = pom.parent.group
- request.AddPartition(pom.topic, pom.partition)
-
- response, err := pom.broker.broker.FetchOffset(request)
- if err != nil {
- return err
- }
-
- block := response.GetBlock(pom.topic, pom.partition)
- if block == nil {
- return ErrIncompleteResponse
- }
-
- switch block.Err {
- case ErrNoError:
- pom.offset = block.Offset
- pom.metadata = block.Metadata
- return nil
- case ErrNotCoordinatorForConsumer:
- if retries <= 0 {
- return block.Err
- }
- if err := pom.selectBroker(); err != nil {
- return err
- }
- return pom.fetchInitialOffset(retries - 1)
- case ErrOffsetsLoadInProgress:
- if retries <= 0 {
- return block.Err
- }
- time.Sleep(pom.parent.conf.Metadata.Retry.Backoff)
- return pom.fetchInitialOffset(retries - 1)
- default:
- return block.Err
- }
-}
-
-func (pom *partitionOffsetManager) handleError(err error) {
- cErr := &ConsumerError{
- Topic: pom.topic,
- Partition: pom.partition,
- Err: err,
- }
-
- if pom.parent.conf.Consumer.Return.Errors {
- pom.errors <- cErr
- } else {
- Logger.Println(cErr)
- }
-}
-
-func (pom *partitionOffsetManager) Errors() <-chan *ConsumerError {
- return pom.errors
-}
-
-func (pom *partitionOffsetManager) MarkOffset(offset int64, metadata string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if offset > pom.offset {
- pom.offset = offset
- pom.metadata = metadata
- pom.dirty = true
- }
-}
-
-func (pom *partitionOffsetManager) updateCommitted(offset int64, metadata string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if pom.offset == offset && pom.metadata == metadata {
- pom.dirty = false
- pom.clean.Signal()
- }
-}
-
-func (pom *partitionOffsetManager) NextOffset() (int64, string) {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- if pom.offset >= 0 {
- return pom.offset, pom.metadata
- }
-
- return pom.parent.conf.Consumer.Offsets.Initial, ""
-}
-
-func (pom *partitionOffsetManager) AsyncClose() {
- go func() {
- pom.lock.Lock()
- defer pom.lock.Unlock()
-
- for pom.dirty {
- pom.clean.Wait()
- }
-
- close(pom.dying)
- }()
-}
-
-func (pom *partitionOffsetManager) Close() error {
- pom.AsyncClose()
-
- var errors ConsumerErrors
- for err := range pom.errors {
- errors = append(errors, err)
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-// Broker Offset Manager
-
-type brokerOffsetManager struct {
- parent *offsetManager
- broker *Broker
- timer *time.Ticker
- updateSubscriptions chan *partitionOffsetManager
- subscriptions map[*partitionOffsetManager]none
- refs int
-}
-
-func (om *offsetManager) newBrokerOffsetManager(broker *Broker) *brokerOffsetManager {
- bom := &brokerOffsetManager{
- parent: om,
- broker: broker,
- timer: time.NewTicker(om.conf.Consumer.Offsets.CommitInterval),
- updateSubscriptions: make(chan *partitionOffsetManager),
- subscriptions: make(map[*partitionOffsetManager]none),
- }
-
- go withRecover(bom.mainLoop)
-
- return bom
-}
-
-func (bom *brokerOffsetManager) mainLoop() {
- for {
- select {
- case <-bom.timer.C:
- if len(bom.subscriptions) > 0 {
- bom.flushToBroker()
- }
- case s, ok := <-bom.updateSubscriptions:
- if !ok {
- bom.timer.Stop()
- return
- }
- if _, ok := bom.subscriptions[s]; ok {
- delete(bom.subscriptions, s)
- } else {
- bom.subscriptions[s] = none{}
- }
- }
- }
-}
-
-func (bom *brokerOffsetManager) flushToBroker() {
- request := bom.constructRequest()
- if request == nil {
- return
- }
-
- response, err := bom.broker.CommitOffset(request)
-
- if err != nil {
- bom.abort(err)
- return
- }
-
- for s := range bom.subscriptions {
- if request.blocks[s.topic] == nil || request.blocks[s.topic][s.partition] == nil {
- continue
- }
-
- var err KError
- var ok bool
-
- if response.Errors[s.topic] == nil {
- s.handleError(ErrIncompleteResponse)
- delete(bom.subscriptions, s)
- s.rebalance <- none{}
- continue
- }
- if err, ok = response.Errors[s.topic][s.partition]; !ok {
- s.handleError(ErrIncompleteResponse)
- delete(bom.subscriptions, s)
- s.rebalance <- none{}
- continue
- }
-
- switch err {
- case ErrNoError:
- block := request.blocks[s.topic][s.partition]
- s.updateCommitted(block.offset, block.metadata)
- case ErrNotLeaderForPartition, ErrLeaderNotAvailable,
- ErrConsumerCoordinatorNotAvailable, ErrNotCoordinatorForConsumer:
- // not a critical error, we just need to redispatch
- delete(bom.subscriptions, s)
- s.rebalance <- none{}
- case ErrOffsetMetadataTooLarge, ErrInvalidCommitOffsetSize:
- // nothing we can do about this, just tell the user and carry on
- s.handleError(err)
- case ErrOffsetsLoadInProgress:
- // nothing wrong but we didn't commit, we'll get it next time round
- break
- case ErrUnknownTopicOrPartition:
- // let the user know *and* try redispatching - if topic-auto-create is
- // enabled, redispatching should trigger a metadata request and create the
- // topic; if not then re-dispatching won't help, but we've let the user
- // know and it shouldn't hurt either (see https://github.com/Shopify/sarama/issues/706)
- fallthrough
- default:
- // dunno, tell the user and try redispatching
- s.handleError(err)
- delete(bom.subscriptions, s)
- s.rebalance <- none{}
- }
- }
-}
-
-func (bom *brokerOffsetManager) constructRequest() *OffsetCommitRequest {
- var r *OffsetCommitRequest
- var perPartitionTimestamp int64
- if bom.parent.conf.Consumer.Offsets.Retention == 0 {
- perPartitionTimestamp = ReceiveTime
- r = &OffsetCommitRequest{
- Version: 1,
- ConsumerGroup: bom.parent.group,
- ConsumerGroupGeneration: GroupGenerationUndefined,
- }
- } else {
- r = &OffsetCommitRequest{
- Version: 2,
- RetentionTime: int64(bom.parent.conf.Consumer.Offsets.Retention / time.Millisecond),
- ConsumerGroup: bom.parent.group,
- ConsumerGroupGeneration: GroupGenerationUndefined,
- }
-
- }
-
- for s := range bom.subscriptions {
- s.lock.Lock()
- if s.dirty {
- r.AddBlock(s.topic, s.partition, s.offset, perPartitionTimestamp, s.metadata)
- }
- s.lock.Unlock()
- }
-
- if len(r.blocks) > 0 {
- return r
- }
-
- return nil
-}
-
-func (bom *brokerOffsetManager) abort(err error) {
- _ = bom.broker.Close() // we don't care about the error this might return, we already have one
- bom.parent.abandonBroker(bom)
-
- for pom := range bom.subscriptions {
- pom.handleError(err)
- pom.rebalance <- none{}
- }
-
- for s := range bom.updateSubscriptions {
- if _, ok := bom.subscriptions[s]; !ok {
- s.handleError(err)
- s.rebalance <- none{}
- }
- }
-
- bom.subscriptions = make(map[*partitionOffsetManager]none)
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_request.go b/vendor/github.com/Shopify/sarama/offset_request.go
deleted file mode 100644
index 6c269601647..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_request.go
+++ /dev/null
@@ -1,132 +0,0 @@
-package sarama
-
-type offsetRequestBlock struct {
- time int64
- maxOffsets int32 // Only used in version 0
-}
-
-func (b *offsetRequestBlock) encode(pe packetEncoder, version int16) error {
- pe.putInt64(int64(b.time))
- if version == 0 {
- pe.putInt32(b.maxOffsets)
- }
-
- return nil
-}
-
-func (b *offsetRequestBlock) decode(pd packetDecoder, version int16) (err error) {
- if b.time, err = pd.getInt64(); err != nil {
- return err
- }
- if version == 0 {
- if b.maxOffsets, err = pd.getInt32(); err != nil {
- return err
- }
- }
- return nil
-}
-
-type OffsetRequest struct {
- Version int16
- blocks map[string]map[int32]*offsetRequestBlock
-}
-
-func (r *OffsetRequest) encode(pe packetEncoder) error {
- pe.putInt32(-1) // replica ID is always -1 for clients
- err := pe.putArrayLength(len(r.blocks))
- if err != nil {
- return err
- }
- for topic, partitions := range r.blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err = block.encode(pe, r.Version); err != nil {
- return err
- }
- }
- }
- return nil
-}
-
-func (r *OffsetRequest) decode(pd packetDecoder, version int16) error {
- r.Version = version
-
- // Ignore replica ID
- if _, err := pd.getInt32(); err != nil {
- return err
- }
- blockCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if blockCount == 0 {
- return nil
- }
- r.blocks = make(map[string]map[int32]*offsetRequestBlock)
- for i := 0; i < blockCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.blocks[topic] = make(map[int32]*offsetRequestBlock)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- block := &offsetRequestBlock{}
- if err := block.decode(pd, version); err != nil {
- return err
- }
- r.blocks[topic][partition] = block
- }
- }
- return nil
-}
-
-func (r *OffsetRequest) key() int16 {
- return 2
-}
-
-func (r *OffsetRequest) version() int16 {
- return r.Version
-}
-
-func (r *OffsetRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_1_0
- default:
- return minVersion
- }
-}
-
-func (r *OffsetRequest) AddBlock(topic string, partitionID int32, time int64, maxOffsets int32) {
- if r.blocks == nil {
- r.blocks = make(map[string]map[int32]*offsetRequestBlock)
- }
-
- if r.blocks[topic] == nil {
- r.blocks[topic] = make(map[int32]*offsetRequestBlock)
- }
-
- tmp := new(offsetRequestBlock)
- tmp.time = time
- if r.Version == 0 {
- tmp.maxOffsets = maxOffsets
- }
-
- r.blocks[topic][partitionID] = tmp
-}
diff --git a/vendor/github.com/Shopify/sarama/offset_response.go b/vendor/github.com/Shopify/sarama/offset_response.go
deleted file mode 100644
index 9a9cfe96f3b..00000000000
--- a/vendor/github.com/Shopify/sarama/offset_response.go
+++ /dev/null
@@ -1,174 +0,0 @@
-package sarama
-
-type OffsetResponseBlock struct {
- Err KError
- Offsets []int64 // Version 0
- Offset int64 // Version 1
- Timestamp int64 // Version 1
-}
-
-func (b *OffsetResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- if version == 0 {
- b.Offsets, err = pd.getInt64Array()
-
- return err
- }
-
- b.Timestamp, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- // For backwards compatibility put the offset in the offsets array too
- b.Offsets = []int64{b.Offset}
-
- return nil
-}
-
-func (b *OffsetResponseBlock) encode(pe packetEncoder, version int16) (err error) {
- pe.putInt16(int16(b.Err))
-
- if version == 0 {
- return pe.putInt64Array(b.Offsets)
- }
-
- pe.putInt64(b.Timestamp)
- pe.putInt64(b.Offset)
-
- return nil
-}
-
-type OffsetResponse struct {
- Version int16
- Blocks map[string]map[int32]*OffsetResponseBlock
-}
-
-func (r *OffsetResponse) decode(pd packetDecoder, version int16) (err error) {
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*OffsetResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(OffsetResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- return nil
-}
-
-func (r *OffsetResponse) GetBlock(topic string, partition int32) *OffsetResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-/*
-// [0 0 0 1 ntopics
-0 8 109 121 95 116 111 112 105 99 topic
-0 0 0 1 npartitions
-0 0 0 0 id
-0 0
-
-0 0 0 1 0 0 0 0
-0 1 1 1 0 0 0 1
-0 8 109 121 95 116 111 112
-105 99 0 0 0 1 0 0
-0 0 0 0 0 0 0 1
-0 0 0 0 0 1 1 1]
-
-*/
-func (r *OffsetResponse) encode(pe packetEncoder) (err error) {
- if err = pe.putArrayLength(len(r.Blocks)); err != nil {
- return err
- }
-
- for topic, partitions := range r.Blocks {
- if err = pe.putString(topic); err != nil {
- return err
- }
- if err = pe.putArrayLength(len(partitions)); err != nil {
- return err
- }
- for partition, block := range partitions {
- pe.putInt32(partition)
- if err = block.encode(pe, r.version()); err != nil {
- return err
- }
- }
- }
-
- return nil
-}
-
-func (r *OffsetResponse) key() int16 {
- return 2
-}
-
-func (r *OffsetResponse) version() int16 {
- return r.Version
-}
-
-func (r *OffsetResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_10_1_0
- default:
- return minVersion
- }
-}
-
-// testing API
-
-func (r *OffsetResponse) AddTopicPartition(topic string, partition int32, offset int64) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*OffsetResponseBlock)
- }
- byTopic, ok := r.Blocks[topic]
- if !ok {
- byTopic = make(map[int32]*OffsetResponseBlock)
- r.Blocks[topic] = byTopic
- }
- byTopic[partition] = &OffsetResponseBlock{Offsets: []int64{offset}, Offset: offset}
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_decoder.go b/vendor/github.com/Shopify/sarama/packet_decoder.go
deleted file mode 100644
index 28670c0e625..00000000000
--- a/vendor/github.com/Shopify/sarama/packet_decoder.go
+++ /dev/null
@@ -1,45 +0,0 @@
-package sarama
-
-// PacketDecoder is the interface providing helpers for reading with Kafka's encoding rules.
-// Types implementing Decoder only need to worry about calling methods like GetString,
-// not about how a string is represented in Kafka.
-type packetDecoder interface {
- // Primitives
- getInt8() (int8, error)
- getInt16() (int16, error)
- getInt32() (int32, error)
- getInt64() (int64, error)
- getArrayLength() (int, error)
-
- // Collections
- getBytes() ([]byte, error)
- getString() (string, error)
- getInt32Array() ([]int32, error)
- getInt64Array() ([]int64, error)
- getStringArray() ([]string, error)
-
- // Subsets
- remaining() int
- getSubset(length int) (packetDecoder, error)
-
- // Stacks, see PushDecoder
- push(in pushDecoder) error
- pop() error
-}
-
-// PushDecoder is the interface for decoding fields like CRCs and lengths where the validity
-// of the field depends on what is after it in the packet. Start them with PacketDecoder.Push() where
-// the actual value is located in the packet, then PacketDecoder.Pop() them when all the bytes they
-// depend upon have been decoded.
-type pushDecoder interface {
- // Saves the offset into the input buffer as the location to actually read the calculated value when able.
- saveOffset(in int)
-
- // Returns the length of data to reserve for the input of this encoder (eg 4 bytes for a CRC32).
- reserveLength() int
-
- // Indicates that all required data is now available to calculate and check the field.
- // SaveOffset is guaranteed to have been called first. The implementation should read ReserveLength() bytes
- // of data from the saved offset, and verify it based on the data between the saved offset and curOffset.
- check(curOffset int, buf []byte) error
-}
diff --git a/vendor/github.com/Shopify/sarama/packet_encoder.go b/vendor/github.com/Shopify/sarama/packet_encoder.go
deleted file mode 100644
index 27a10f6d44b..00000000000
--- a/vendor/github.com/Shopify/sarama/packet_encoder.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// PacketEncoder is the interface providing helpers for writing with Kafka's encoding rules.
-// Types implementing Encoder only need to worry about calling methods like PutString,
-// not about how a string is represented in Kafka.
-type packetEncoder interface {
- // Primitives
- putInt8(in int8)
- putInt16(in int16)
- putInt32(in int32)
- putInt64(in int64)
- putArrayLength(in int) error
-
- // Collections
- putBytes(in []byte) error
- putRawBytes(in []byte) error
- putString(in string) error
- putStringArray(in []string) error
- putInt32Array(in []int32) error
- putInt64Array(in []int64) error
-
- // Provide the current offset to record the batch size metric
- offset() int
-
- // Stacks, see PushEncoder
- push(in pushEncoder)
- pop() error
-
- // To record metrics when provided
- metricRegistry() metrics.Registry
-}
-
-// PushEncoder is the interface for encoding fields like CRCs and lengths where the value
-// of the field depends on what is encoded after it in the packet. Start them with PacketEncoder.Push() where
-// the actual value is located in the packet, then PacketEncoder.Pop() them when all the bytes they
-// depend upon have been written.
-type pushEncoder interface {
- // Saves the offset into the input buffer as the location to actually write the calculated value when able.
- saveOffset(in int)
-
- // Returns the length of data to reserve for the output of this encoder (eg 4 bytes for a CRC32).
- reserveLength() int
-
- // Indicates that all required data is now available to calculate and write the field.
- // SaveOffset is guaranteed to have been called first. The implementation should write ReserveLength() bytes
- // of data to the saved offset, based on the data between the saved offset and curOffset.
- run(curOffset int, buf []byte) error
-}
diff --git a/vendor/github.com/Shopify/sarama/partitioner.go b/vendor/github.com/Shopify/sarama/partitioner.go
deleted file mode 100644
index d24199da9c2..00000000000
--- a/vendor/github.com/Shopify/sarama/partitioner.go
+++ /dev/null
@@ -1,123 +0,0 @@
-package sarama
-
-import (
- "hash"
- "hash/fnv"
- "math/rand"
- "time"
-)
-
-// Partitioner is anything that, given a Kafka message and a number of partitions indexed [0...numPartitions-1],
-// decides to which partition to send the message. RandomPartitioner, RoundRobinPartitioner and HashPartitioner are provided
-// as simple default implementations.
-type Partitioner interface {
- // Partition takes a message and partition count and chooses a partition
- Partition(message *ProducerMessage, numPartitions int32) (int32, error)
-
- // RequiresConsistency indicates to the user of the partitioner whether the
- // mapping of key->partition is consistent or not. Specifically, if a
- // partitioner requires consistency then it must be allowed to choose from all
- // partitions (even ones known to be unavailable), and its choice must be
- // respected by the caller. The obvious example is the HashPartitioner.
- RequiresConsistency() bool
-}
-
-// PartitionerConstructor is the type for a function capable of constructing new Partitioners.
-type PartitionerConstructor func(topic string) Partitioner
-
-type manualPartitioner struct{}
-
-// NewManualPartitioner returns a Partitioner which uses the partition manually set in the provided
-// ProducerMessage's Partition field as the partition to produce to.
-func NewManualPartitioner(topic string) Partitioner {
- return new(manualPartitioner)
-}
-
-func (p *manualPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- return message.Partition, nil
-}
-
-func (p *manualPartitioner) RequiresConsistency() bool {
- return true
-}
-
-type randomPartitioner struct {
- generator *rand.Rand
-}
-
-// NewRandomPartitioner returns a Partitioner which chooses a random partition each time.
-func NewRandomPartitioner(topic string) Partitioner {
- p := new(randomPartitioner)
- p.generator = rand.New(rand.NewSource(time.Now().UTC().UnixNano()))
- return p
-}
-
-func (p *randomPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- return int32(p.generator.Intn(int(numPartitions))), nil
-}
-
-func (p *randomPartitioner) RequiresConsistency() bool {
- return false
-}
-
-type roundRobinPartitioner struct {
- partition int32
-}
-
-// NewRoundRobinPartitioner returns a Partitioner which walks through the available partitions one at a time.
-func NewRoundRobinPartitioner(topic string) Partitioner {
- return &roundRobinPartitioner{}
-}
-
-func (p *roundRobinPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- if p.partition >= numPartitions {
- p.partition = 0
- }
- ret := p.partition
- p.partition++
- return ret, nil
-}
-
-func (p *roundRobinPartitioner) RequiresConsistency() bool {
- return false
-}
-
-type hashPartitioner struct {
- random Partitioner
- hasher hash.Hash32
-}
-
-// NewHashPartitioner returns a Partitioner which behaves as follows. If the message's key is nil then a
-// random partition is chosen. Otherwise the FNV-1a hash of the encoded bytes of the message key is used,
-// modulus the number of partitions. This ensures that messages with the same key always end up on the
-// same partition.
-func NewHashPartitioner(topic string) Partitioner {
- p := new(hashPartitioner)
- p.random = NewRandomPartitioner(topic)
- p.hasher = fnv.New32a()
- return p
-}
-
-func (p *hashPartitioner) Partition(message *ProducerMessage, numPartitions int32) (int32, error) {
- if message.Key == nil {
- return p.random.Partition(message, numPartitions)
- }
- bytes, err := message.Key.Encode()
- if err != nil {
- return -1, err
- }
- p.hasher.Reset()
- _, err = p.hasher.Write(bytes)
- if err != nil {
- return -1, err
- }
- partition := int32(p.hasher.Sum32()) % numPartitions
- if partition < 0 {
- partition = -partition
- }
- return partition, nil
-}
-
-func (p *hashPartitioner) RequiresConsistency() bool {
- return true
-}
diff --git a/vendor/github.com/Shopify/sarama/prep_encoder.go b/vendor/github.com/Shopify/sarama/prep_encoder.go
deleted file mode 100644
index fd5ea0f9194..00000000000
--- a/vendor/github.com/Shopify/sarama/prep_encoder.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package sarama
-
-import (
- "fmt"
- "math"
-
- "github.com/rcrowley/go-metrics"
-)
-
-type prepEncoder struct {
- length int
-}
-
-// primitives
-
-func (pe *prepEncoder) putInt8(in int8) {
- pe.length++
-}
-
-func (pe *prepEncoder) putInt16(in int16) {
- pe.length += 2
-}
-
-func (pe *prepEncoder) putInt32(in int32) {
- pe.length += 4
-}
-
-func (pe *prepEncoder) putInt64(in int64) {
- pe.length += 8
-}
-
-func (pe *prepEncoder) putArrayLength(in int) error {
- if in > math.MaxInt32 {
- return PacketEncodingError{fmt.Sprintf("array too long (%d)", in)}
- }
- pe.length += 4
- return nil
-}
-
-// arrays
-
-func (pe *prepEncoder) putBytes(in []byte) error {
- pe.length += 4
- if in == nil {
- return nil
- }
- if len(in) > math.MaxInt32 {
- return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
- }
- pe.length += len(in)
- return nil
-}
-
-func (pe *prepEncoder) putRawBytes(in []byte) error {
- if len(in) > math.MaxInt32 {
- return PacketEncodingError{fmt.Sprintf("byteslice too long (%d)", len(in))}
- }
- pe.length += len(in)
- return nil
-}
-
-func (pe *prepEncoder) putString(in string) error {
- pe.length += 2
- if len(in) > math.MaxInt16 {
- return PacketEncodingError{fmt.Sprintf("string too long (%d)", len(in))}
- }
- pe.length += len(in)
- return nil
-}
-
-func (pe *prepEncoder) putStringArray(in []string) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
-
- for _, str := range in {
- if err := pe.putString(str); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (pe *prepEncoder) putInt32Array(in []int32) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
- pe.length += 4 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) putInt64Array(in []int64) error {
- err := pe.putArrayLength(len(in))
- if err != nil {
- return err
- }
- pe.length += 8 * len(in)
- return nil
-}
-
-func (pe *prepEncoder) offset() int {
- return pe.length
-}
-
-// stackable
-
-func (pe *prepEncoder) push(in pushEncoder) {
- pe.length += in.reserveLength()
-}
-
-func (pe *prepEncoder) pop() error {
- return nil
-}
-
-// we do not record metrics during the prep encoder pass
-func (pe *prepEncoder) metricRegistry() metrics.Registry {
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_request.go b/vendor/github.com/Shopify/sarama/produce_request.go
deleted file mode 100644
index 40dc8015148..00000000000
--- a/vendor/github.com/Shopify/sarama/produce_request.go
+++ /dev/null
@@ -1,209 +0,0 @@
-package sarama
-
-import "github.com/rcrowley/go-metrics"
-
-// RequiredAcks is used in Produce Requests to tell the broker how many replica acknowledgements
-// it must see before responding. Any of the constants defined here are valid. On broker versions
-// prior to 0.8.2.0 any other positive int16 is also valid (the broker will wait for that many
-// acknowledgements) but in 0.8.2.0 and later this will raise an exception (it has been replaced
-// by setting the `min.isr` value in the brokers configuration).
-type RequiredAcks int16
-
-const (
- // NoResponse doesn't send any response, the TCP ACK is all you get.
- NoResponse RequiredAcks = 0
- // WaitForLocal waits for only the local commit to succeed before responding.
- WaitForLocal RequiredAcks = 1
- // WaitForAll waits for all in-sync replicas to commit before responding.
- // The minimum number of in-sync replicas is configured on the broker via
- // the `min.insync.replicas` configuration key.
- WaitForAll RequiredAcks = -1
-)
-
-type ProduceRequest struct {
- RequiredAcks RequiredAcks
- Timeout int32
- Version int16 // v1 requires Kafka 0.9, v2 requires Kafka 0.10
- msgSets map[string]map[int32]*MessageSet
-}
-
-func (r *ProduceRequest) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.RequiredAcks))
- pe.putInt32(r.Timeout)
- err := pe.putArrayLength(len(r.msgSets))
- if err != nil {
- return err
- }
- metricRegistry := pe.metricRegistry()
- var batchSizeMetric metrics.Histogram
- var compressionRatioMetric metrics.Histogram
- if metricRegistry != nil {
- batchSizeMetric = getOrRegisterHistogram("batch-size", metricRegistry)
- compressionRatioMetric = getOrRegisterHistogram("compression-ratio", metricRegistry)
- }
-
- totalRecordCount := int64(0)
- for topic, partitions := range r.msgSets {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- topicRecordCount := int64(0)
- var topicCompressionRatioMetric metrics.Histogram
- if metricRegistry != nil {
- topicCompressionRatioMetric = getOrRegisterTopicHistogram("compression-ratio", topic, metricRegistry)
- }
- for id, msgSet := range partitions {
- startOffset := pe.offset()
- pe.putInt32(id)
- pe.push(&lengthField{})
- err = msgSet.encode(pe)
- if err != nil {
- return err
- }
- err = pe.pop()
- if err != nil {
- return err
- }
- if metricRegistry != nil {
- for _, messageBlock := range msgSet.Messages {
- // Is this a fake "message" wrapping real messages?
- if messageBlock.Msg.Set != nil {
- topicRecordCount += int64(len(messageBlock.Msg.Set.Messages))
- } else {
- // A single uncompressed message
- topicRecordCount++
- }
- // Better be safe than sorry when computing the compression ratio
- if messageBlock.Msg.compressedSize != 0 {
- compressionRatio := float64(len(messageBlock.Msg.Value)) /
- float64(messageBlock.Msg.compressedSize)
- // Histogram do not support decimal values, let's multiple it by 100 for better precision
- intCompressionRatio := int64(100 * compressionRatio)
- compressionRatioMetric.Update(intCompressionRatio)
- topicCompressionRatioMetric.Update(intCompressionRatio)
- }
- }
- batchSize := int64(pe.offset() - startOffset)
- batchSizeMetric.Update(batchSize)
- getOrRegisterTopicHistogram("batch-size", topic, metricRegistry).Update(batchSize)
- }
- }
- if topicRecordCount > 0 {
- getOrRegisterTopicMeter("record-send-rate", topic, metricRegistry).Mark(topicRecordCount)
- getOrRegisterTopicHistogram("records-per-request", topic, metricRegistry).Update(topicRecordCount)
- totalRecordCount += topicRecordCount
- }
- }
- if totalRecordCount > 0 {
- metrics.GetOrRegisterMeter("record-send-rate", metricRegistry).Mark(totalRecordCount)
- getOrRegisterHistogram("records-per-request", metricRegistry).Update(totalRecordCount)
- }
-
- return nil
-}
-
-func (r *ProduceRequest) decode(pd packetDecoder, version int16) error {
- requiredAcks, err := pd.getInt16()
- if err != nil {
- return err
- }
- r.RequiredAcks = RequiredAcks(requiredAcks)
- if r.Timeout, err = pd.getInt32(); err != nil {
- return err
- }
- topicCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if topicCount == 0 {
- return nil
- }
- r.msgSets = make(map[string]map[int32]*MessageSet)
- for i := 0; i < topicCount; i++ {
- topic, err := pd.getString()
- if err != nil {
- return err
- }
- partitionCount, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- r.msgSets[topic] = make(map[int32]*MessageSet)
- for j := 0; j < partitionCount; j++ {
- partition, err := pd.getInt32()
- if err != nil {
- return err
- }
- messageSetSize, err := pd.getInt32()
- if err != nil {
- return err
- }
- msgSetDecoder, err := pd.getSubset(int(messageSetSize))
- if err != nil {
- return err
- }
- msgSet := &MessageSet{}
- err = msgSet.decode(msgSetDecoder)
- if err != nil {
- return err
- }
- r.msgSets[topic][partition] = msgSet
- }
- }
- return nil
-}
-
-func (r *ProduceRequest) key() int16 {
- return 0
-}
-
-func (r *ProduceRequest) version() int16 {
- return r.Version
-}
-
-func (r *ProduceRequest) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- default:
- return minVersion
- }
-}
-
-func (r *ProduceRequest) AddMessage(topic string, partition int32, msg *Message) {
- if r.msgSets == nil {
- r.msgSets = make(map[string]map[int32]*MessageSet)
- }
-
- if r.msgSets[topic] == nil {
- r.msgSets[topic] = make(map[int32]*MessageSet)
- }
-
- set := r.msgSets[topic][partition]
-
- if set == nil {
- set = new(MessageSet)
- r.msgSets[topic][partition] = set
- }
-
- set.addMessage(msg)
-}
-
-func (r *ProduceRequest) AddSet(topic string, partition int32, set *MessageSet) {
- if r.msgSets == nil {
- r.msgSets = make(map[string]map[int32]*MessageSet)
- }
-
- if r.msgSets[topic] == nil {
- r.msgSets[topic] = make(map[int32]*MessageSet)
- }
-
- r.msgSets[topic][partition] = set
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_response.go b/vendor/github.com/Shopify/sarama/produce_response.go
deleted file mode 100644
index 195abcb812e..00000000000
--- a/vendor/github.com/Shopify/sarama/produce_response.go
+++ /dev/null
@@ -1,158 +0,0 @@
-package sarama
-
-import "time"
-
-type ProduceResponseBlock struct {
- Err KError
- Offset int64
- // only provided if Version >= 2 and the broker is configured with `LogAppendTime`
- Timestamp time.Time
-}
-
-func (b *ProduceResponseBlock) decode(pd packetDecoder, version int16) (err error) {
- tmp, err := pd.getInt16()
- if err != nil {
- return err
- }
- b.Err = KError(tmp)
-
- b.Offset, err = pd.getInt64()
- if err != nil {
- return err
- }
-
- if version >= 2 {
- if millis, err := pd.getInt64(); err != nil {
- return err
- } else if millis != -1 {
- b.Timestamp = time.Unix(millis/1000, (millis%1000)*int64(time.Millisecond))
- }
- }
-
- return nil
-}
-
-type ProduceResponse struct {
- Blocks map[string]map[int32]*ProduceResponseBlock
- Version int16
- ThrottleTime time.Duration // only provided if Version >= 1
-}
-
-func (r *ProduceResponse) decode(pd packetDecoder, version int16) (err error) {
- r.Version = version
-
- numTopics, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock, numTopics)
- for i := 0; i < numTopics; i++ {
- name, err := pd.getString()
- if err != nil {
- return err
- }
-
- numBlocks, err := pd.getArrayLength()
- if err != nil {
- return err
- }
-
- r.Blocks[name] = make(map[int32]*ProduceResponseBlock, numBlocks)
-
- for j := 0; j < numBlocks; j++ {
- id, err := pd.getInt32()
- if err != nil {
- return err
- }
-
- block := new(ProduceResponseBlock)
- err = block.decode(pd, version)
- if err != nil {
- return err
- }
- r.Blocks[name][id] = block
- }
- }
-
- if r.Version >= 1 {
- if millis, err := pd.getInt32(); err != nil {
- return err
- } else {
- r.ThrottleTime = time.Duration(millis) * time.Millisecond
- }
- }
-
- return nil
-}
-
-func (r *ProduceResponse) encode(pe packetEncoder) error {
- err := pe.putArrayLength(len(r.Blocks))
- if err != nil {
- return err
- }
- for topic, partitions := range r.Blocks {
- err = pe.putString(topic)
- if err != nil {
- return err
- }
- err = pe.putArrayLength(len(partitions))
- if err != nil {
- return err
- }
- for id, prb := range partitions {
- pe.putInt32(id)
- pe.putInt16(int16(prb.Err))
- pe.putInt64(prb.Offset)
- }
- }
- if r.Version >= 1 {
- pe.putInt32(int32(r.ThrottleTime / time.Millisecond))
- }
- return nil
-}
-
-func (r *ProduceResponse) key() int16 {
- return 0
-}
-
-func (r *ProduceResponse) version() int16 {
- return r.Version
-}
-
-func (r *ProduceResponse) requiredVersion() KafkaVersion {
- switch r.Version {
- case 1:
- return V0_9_0_0
- case 2:
- return V0_10_0_0
- default:
- return minVersion
- }
-}
-
-func (r *ProduceResponse) GetBlock(topic string, partition int32) *ProduceResponseBlock {
- if r.Blocks == nil {
- return nil
- }
-
- if r.Blocks[topic] == nil {
- return nil
- }
-
- return r.Blocks[topic][partition]
-}
-
-// Testing API
-
-func (r *ProduceResponse) AddTopicPartition(topic string, partition int32, err KError) {
- if r.Blocks == nil {
- r.Blocks = make(map[string]map[int32]*ProduceResponseBlock)
- }
- byTopic, ok := r.Blocks[topic]
- if !ok {
- byTopic = make(map[int32]*ProduceResponseBlock)
- r.Blocks[topic] = byTopic
- }
- byTopic[partition] = &ProduceResponseBlock{Err: err}
-}
diff --git a/vendor/github.com/Shopify/sarama/produce_set.go b/vendor/github.com/Shopify/sarama/produce_set.go
deleted file mode 100644
index 158d9c4754c..00000000000
--- a/vendor/github.com/Shopify/sarama/produce_set.go
+++ /dev/null
@@ -1,176 +0,0 @@
-package sarama
-
-import "time"
-
-type partitionSet struct {
- msgs []*ProducerMessage
- setToSend *MessageSet
- bufferBytes int
-}
-
-type produceSet struct {
- parent *asyncProducer
- msgs map[string]map[int32]*partitionSet
-
- bufferBytes int
- bufferCount int
-}
-
-func newProduceSet(parent *asyncProducer) *produceSet {
- return &produceSet{
- msgs: make(map[string]map[int32]*partitionSet),
- parent: parent,
- }
-}
-
-func (ps *produceSet) add(msg *ProducerMessage) error {
- var err error
- var key, val []byte
-
- if msg.Key != nil {
- if key, err = msg.Key.Encode(); err != nil {
- return err
- }
- }
-
- if msg.Value != nil {
- if val, err = msg.Value.Encode(); err != nil {
- return err
- }
- }
-
- partitions := ps.msgs[msg.Topic]
- if partitions == nil {
- partitions = make(map[int32]*partitionSet)
- ps.msgs[msg.Topic] = partitions
- }
-
- set := partitions[msg.Partition]
- if set == nil {
- set = &partitionSet{setToSend: new(MessageSet)}
- partitions[msg.Partition] = set
- }
-
- set.msgs = append(set.msgs, msg)
- msgToSend := &Message{Codec: CompressionNone, Key: key, Value: val}
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- if msg.Timestamp.IsZero() {
- msgToSend.Timestamp = time.Now()
- } else {
- msgToSend.Timestamp = msg.Timestamp
- }
- msgToSend.Version = 1
- }
- set.setToSend.addMessage(msgToSend)
-
- size := producerMessageOverhead + len(key) + len(val)
- set.bufferBytes += size
- ps.bufferBytes += size
- ps.bufferCount++
-
- return nil
-}
-
-func (ps *produceSet) buildRequest() *ProduceRequest {
- req := &ProduceRequest{
- RequiredAcks: ps.parent.conf.Producer.RequiredAcks,
- Timeout: int32(ps.parent.conf.Producer.Timeout / time.Millisecond),
- }
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- req.Version = 2
- }
-
- for topic, partitionSet := range ps.msgs {
- for partition, set := range partitionSet {
- if ps.parent.conf.Producer.Compression == CompressionNone {
- req.AddSet(topic, partition, set.setToSend)
- } else {
- // When compression is enabled, the entire set for each partition is compressed
- // and sent as the payload of a single fake "message" with the appropriate codec
- // set and no key. When the server sees a message with a compression codec, it
- // decompresses the payload and treats the result as its message set.
- payload, err := encode(set.setToSend, ps.parent.conf.MetricRegistry)
- if err != nil {
- Logger.Println(err) // if this happens, it's basically our fault.
- panic(err)
- }
- compMsg := &Message{
- Codec: ps.parent.conf.Producer.Compression,
- Key: nil,
- Value: payload,
- Set: set.setToSend, // Provide the underlying message set for accurate metrics
- }
- if ps.parent.conf.Version.IsAtLeast(V0_10_0_0) {
- compMsg.Version = 1
- compMsg.Timestamp = set.setToSend.Messages[0].Msg.Timestamp
- }
- req.AddMessage(topic, partition, compMsg)
- }
- }
- }
-
- return req
-}
-
-func (ps *produceSet) eachPartition(cb func(topic string, partition int32, msgs []*ProducerMessage)) {
- for topic, partitionSet := range ps.msgs {
- for partition, set := range partitionSet {
- cb(topic, partition, set.msgs)
- }
- }
-}
-
-func (ps *produceSet) dropPartition(topic string, partition int32) []*ProducerMessage {
- if ps.msgs[topic] == nil {
- return nil
- }
- set := ps.msgs[topic][partition]
- if set == nil {
- return nil
- }
- ps.bufferBytes -= set.bufferBytes
- ps.bufferCount -= len(set.msgs)
- delete(ps.msgs[topic], partition)
- return set.msgs
-}
-
-func (ps *produceSet) wouldOverflow(msg *ProducerMessage) bool {
- switch {
- // Would we overflow our maximum possible size-on-the-wire? 10KiB is arbitrary overhead for safety.
- case ps.bufferBytes+msg.byteSize() >= int(MaxRequestSize-(10*1024)):
- return true
- // Would we overflow the size-limit of a compressed message-batch for this partition?
- case ps.parent.conf.Producer.Compression != CompressionNone &&
- ps.msgs[msg.Topic] != nil && ps.msgs[msg.Topic][msg.Partition] != nil &&
- ps.msgs[msg.Topic][msg.Partition].bufferBytes+msg.byteSize() >= ps.parent.conf.Producer.MaxMessageBytes:
- return true
- // Would we overflow simply in number of messages?
- case ps.parent.conf.Producer.Flush.MaxMessages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.MaxMessages:
- return true
- default:
- return false
- }
-}
-
-func (ps *produceSet) readyToFlush() bool {
- switch {
- // If we don't have any messages, nothing else matters
- case ps.empty():
- return false
- // If all three config values are 0, we always flush as-fast-as-possible
- case ps.parent.conf.Producer.Flush.Frequency == 0 && ps.parent.conf.Producer.Flush.Bytes == 0 && ps.parent.conf.Producer.Flush.Messages == 0:
- return true
- // If we've passed the message trigger-point
- case ps.parent.conf.Producer.Flush.Messages > 0 && ps.bufferCount >= ps.parent.conf.Producer.Flush.Messages:
- return true
- // If we've passed the byte trigger-point
- case ps.parent.conf.Producer.Flush.Bytes > 0 && ps.bufferBytes >= ps.parent.conf.Producer.Flush.Bytes:
- return true
- default:
- return false
- }
-}
-
-func (ps *produceSet) empty() bool {
- return ps.bufferCount == 0
-}
diff --git a/vendor/github.com/Shopify/sarama/real_decoder.go b/vendor/github.com/Shopify/sarama/real_decoder.go
deleted file mode 100644
index a0141af079c..00000000000
--- a/vendor/github.com/Shopify/sarama/real_decoder.go
+++ /dev/null
@@ -1,259 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "math"
-)
-
-var errInvalidArrayLength = PacketDecodingError{"invalid array length"}
-var errInvalidByteSliceLength = PacketDecodingError{"invalid byteslice length"}
-var errInvalidStringLength = PacketDecodingError{"invalid string length"}
-var errInvalidSubsetSize = PacketDecodingError{"invalid subset size"}
-
-type realDecoder struct {
- raw []byte
- off int
- stack []pushDecoder
-}
-
-// primitives
-
-func (rd *realDecoder) getInt8() (int8, error) {
- if rd.remaining() < 1 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int8(rd.raw[rd.off])
- rd.off++
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt16() (int16, error) {
- if rd.remaining() < 2 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int16(binary.BigEndian.Uint16(rd.raw[rd.off:]))
- rd.off += 2
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt32() (int32, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- return tmp, nil
-}
-
-func (rd *realDecoder) getInt64() (int64, error) {
- if rd.remaining() < 8 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
- rd.off += 8
- return tmp, nil
-}
-
-func (rd *realDecoder) getArrayLength() (int, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- }
- tmp := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- if tmp > rd.remaining() {
- rd.off = len(rd.raw)
- return -1, ErrInsufficientData
- } else if tmp > 2*math.MaxUint16 {
- return -1, errInvalidArrayLength
- }
- return tmp, nil
-}
-
-// collections
-
-func (rd *realDecoder) getBytes() ([]byte, error) {
- tmp, err := rd.getInt32()
-
- if err != nil {
- return nil, err
- }
-
- n := int(tmp)
-
- switch {
- case n < -1:
- return nil, errInvalidByteSliceLength
- case n == -1:
- return nil, nil
- case n == 0:
- return make([]byte, 0), nil
- case n > rd.remaining():
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- tmpStr := rd.raw[rd.off : rd.off+n]
- rd.off += n
- return tmpStr, nil
-}
-
-func (rd *realDecoder) getString() (string, error) {
- tmp, err := rd.getInt16()
-
- if err != nil {
- return "", err
- }
-
- n := int(tmp)
-
- switch {
- case n < -1:
- return "", errInvalidStringLength
- case n == -1:
- return "", nil
- case n == 0:
- return "", nil
- case n > rd.remaining():
- rd.off = len(rd.raw)
- return "", ErrInsufficientData
- }
-
- tmpStr := string(rd.raw[rd.off : rd.off+n])
- rd.off += n
- return tmpStr, nil
-}
-
-func (rd *realDecoder) getInt32Array() ([]int32, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if rd.remaining() < 4*n {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]int32, n)
- for i := range ret {
- ret[i] = int32(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
- }
- return ret, nil
-}
-
-func (rd *realDecoder) getInt64Array() ([]int64, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if rd.remaining() < 8*n {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]int64, n)
- for i := range ret {
- ret[i] = int64(binary.BigEndian.Uint64(rd.raw[rd.off:]))
- rd.off += 8
- }
- return ret, nil
-}
-
-func (rd *realDecoder) getStringArray() ([]string, error) {
- if rd.remaining() < 4 {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
- n := int(binary.BigEndian.Uint32(rd.raw[rd.off:]))
- rd.off += 4
-
- if n == 0 {
- return nil, nil
- }
-
- if n < 0 {
- return nil, errInvalidArrayLength
- }
-
- ret := make([]string, n)
- for i := range ret {
- if str, err := rd.getString(); err != nil {
- return nil, err
- } else {
- ret[i] = str
- }
- }
- return ret, nil
-}
-
-// subsets
-
-func (rd *realDecoder) remaining() int {
- return len(rd.raw) - rd.off
-}
-
-func (rd *realDecoder) getSubset(length int) (packetDecoder, error) {
- if length < 0 {
- return nil, errInvalidSubsetSize
- } else if length > rd.remaining() {
- rd.off = len(rd.raw)
- return nil, ErrInsufficientData
- }
-
- start := rd.off
- rd.off += length
- return &realDecoder{raw: rd.raw[start:rd.off]}, nil
-}
-
-// stacks
-
-func (rd *realDecoder) push(in pushDecoder) error {
- in.saveOffset(rd.off)
-
- reserve := in.reserveLength()
- if rd.remaining() < reserve {
- rd.off = len(rd.raw)
- return ErrInsufficientData
- }
-
- rd.stack = append(rd.stack, in)
-
- rd.off += reserve
-
- return nil
-}
-
-func (rd *realDecoder) pop() error {
- // this is go's ugly pop pattern (the inverse of append)
- in := rd.stack[len(rd.stack)-1]
- rd.stack = rd.stack[:len(rd.stack)-1]
-
- return in.check(rd.off, rd.raw)
-}
diff --git a/vendor/github.com/Shopify/sarama/real_encoder.go b/vendor/github.com/Shopify/sarama/real_encoder.go
deleted file mode 100644
index ced4267c39e..00000000000
--- a/vendor/github.com/Shopify/sarama/real_encoder.go
+++ /dev/null
@@ -1,129 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
-
- "github.com/rcrowley/go-metrics"
-)
-
-type realEncoder struct {
- raw []byte
- off int
- stack []pushEncoder
- registry metrics.Registry
-}
-
-// primitives
-
-func (re *realEncoder) putInt8(in int8) {
- re.raw[re.off] = byte(in)
- re.off++
-}
-
-func (re *realEncoder) putInt16(in int16) {
- binary.BigEndian.PutUint16(re.raw[re.off:], uint16(in))
- re.off += 2
-}
-
-func (re *realEncoder) putInt32(in int32) {
- binary.BigEndian.PutUint32(re.raw[re.off:], uint32(in))
- re.off += 4
-}
-
-func (re *realEncoder) putInt64(in int64) {
- binary.BigEndian.PutUint64(re.raw[re.off:], uint64(in))
- re.off += 8
-}
-
-func (re *realEncoder) putArrayLength(in int) error {
- re.putInt32(int32(in))
- return nil
-}
-
-// collection
-
-func (re *realEncoder) putRawBytes(in []byte) error {
- copy(re.raw[re.off:], in)
- re.off += len(in)
- return nil
-}
-
-func (re *realEncoder) putBytes(in []byte) error {
- if in == nil {
- re.putInt32(-1)
- return nil
- }
- re.putInt32(int32(len(in)))
- copy(re.raw[re.off:], in)
- re.off += len(in)
- return nil
-}
-
-func (re *realEncoder) putString(in string) error {
- re.putInt16(int16(len(in)))
- copy(re.raw[re.off:], in)
- re.off += len(in)
- return nil
-}
-
-func (re *realEncoder) putStringArray(in []string) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
-
- for _, val := range in {
- if err := re.putString(val); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (re *realEncoder) putInt32Array(in []int32) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
- for _, val := range in {
- re.putInt32(val)
- }
- return nil
-}
-
-func (re *realEncoder) putInt64Array(in []int64) error {
- err := re.putArrayLength(len(in))
- if err != nil {
- return err
- }
- for _, val := range in {
- re.putInt64(val)
- }
- return nil
-}
-
-func (re *realEncoder) offset() int {
- return re.off
-}
-
-// stacks
-
-func (re *realEncoder) push(in pushEncoder) {
- in.saveOffset(re.off)
- re.off += in.reserveLength()
- re.stack = append(re.stack, in)
-}
-
-func (re *realEncoder) pop() error {
- // this is go's ugly pop pattern (the inverse of append)
- in := re.stack[len(re.stack)-1]
- re.stack = re.stack[:len(re.stack)-1]
-
- return in.run(re.off, re.raw)
-}
-
-// we do record metrics during the real encoder pass
-func (re *realEncoder) metricRegistry() metrics.Registry {
- return re.registry
-}
diff --git a/vendor/github.com/Shopify/sarama/request.go b/vendor/github.com/Shopify/sarama/request.go
deleted file mode 100644
index 73310ca8705..00000000000
--- a/vendor/github.com/Shopify/sarama/request.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package sarama
-
-import (
- "encoding/binary"
- "fmt"
- "io"
-)
-
-type protocolBody interface {
- encoder
- versionedDecoder
- key() int16
- version() int16
- requiredVersion() KafkaVersion
-}
-
-type request struct {
- correlationID int32
- clientID string
- body protocolBody
-}
-
-func (r *request) encode(pe packetEncoder) (err error) {
- pe.push(&lengthField{})
- pe.putInt16(r.body.key())
- pe.putInt16(r.body.version())
- pe.putInt32(r.correlationID)
- err = pe.putString(r.clientID)
- if err != nil {
- return err
- }
- err = r.body.encode(pe)
- if err != nil {
- return err
- }
- return pe.pop()
-}
-
-func (r *request) decode(pd packetDecoder) (err error) {
- var key int16
- if key, err = pd.getInt16(); err != nil {
- return err
- }
- var version int16
- if version, err = pd.getInt16(); err != nil {
- return err
- }
- if r.correlationID, err = pd.getInt32(); err != nil {
- return err
- }
- r.clientID, err = pd.getString()
-
- r.body = allocateBody(key, version)
- if r.body == nil {
- return PacketDecodingError{fmt.Sprintf("unknown request key (%d)", key)}
- }
- return r.body.decode(pd, version)
-}
-
-func decodeRequest(r io.Reader) (req *request, bytesRead int, err error) {
- lengthBytes := make([]byte, 4)
- if _, err := io.ReadFull(r, lengthBytes); err != nil {
- return nil, bytesRead, err
- }
- bytesRead += len(lengthBytes)
-
- length := int32(binary.BigEndian.Uint32(lengthBytes))
- if length <= 4 || length > MaxRequestSize {
- return nil, bytesRead, PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", length)}
- }
-
- encodedReq := make([]byte, length)
- if _, err := io.ReadFull(r, encodedReq); err != nil {
- return nil, bytesRead, err
- }
- bytesRead += len(encodedReq)
-
- req = &request{}
- if err := decode(encodedReq, req); err != nil {
- return nil, bytesRead, err
- }
- return req, bytesRead, nil
-}
-
-func allocateBody(key, version int16) protocolBody {
- switch key {
- case 0:
- return &ProduceRequest{}
- case 1:
- return &FetchRequest{}
- case 2:
- return &OffsetRequest{Version: version}
- case 3:
- return &MetadataRequest{}
- case 8:
- return &OffsetCommitRequest{Version: version}
- case 9:
- return &OffsetFetchRequest{}
- case 10:
- return &ConsumerMetadataRequest{}
- case 11:
- return &JoinGroupRequest{}
- case 12:
- return &HeartbeatRequest{}
- case 13:
- return &LeaveGroupRequest{}
- case 14:
- return &SyncGroupRequest{}
- case 15:
- return &DescribeGroupsRequest{}
- case 16:
- return &ListGroupsRequest{}
- case 17:
- return &SaslHandshakeRequest{}
- case 18:
- return &ApiVersionsRequest{}
- }
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/response_header.go b/vendor/github.com/Shopify/sarama/response_header.go
deleted file mode 100644
index f3f4d27d6c4..00000000000
--- a/vendor/github.com/Shopify/sarama/response_header.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package sarama
-
-import "fmt"
-
-type responseHeader struct {
- length int32
- correlationID int32
-}
-
-func (r *responseHeader) decode(pd packetDecoder) (err error) {
- r.length, err = pd.getInt32()
- if err != nil {
- return err
- }
- if r.length <= 4 || r.length > MaxResponseSize {
- return PacketDecodingError{fmt.Sprintf("message of length %d too large or too small", r.length)}
- }
-
- r.correlationID, err = pd.getInt32()
- return err
-}
diff --git a/vendor/github.com/Shopify/sarama/sarama.go b/vendor/github.com/Shopify/sarama/sarama.go
deleted file mode 100644
index 7d5dc60d3e2..00000000000
--- a/vendor/github.com/Shopify/sarama/sarama.go
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
-Package sarama is a pure Go client library for dealing with Apache Kafka (versions 0.8 and later). It includes a high-level
-API for easily producing and consuming messages, and a low-level API for controlling bytes on the wire when the high-level
-API is insufficient. Usage examples for the high-level APIs are provided inline with their full documentation.
-
-To produce messages, use either the AsyncProducer or the SyncProducer. The AsyncProducer accepts messages on a channel
-and produces them asynchronously in the background as efficiently as possible; it is preferred in most cases.
-The SyncProducer provides a method which will block until Kafka acknowledges the message as produced. This can be
-useful but comes with two caveats: it will generally be less efficient, and the actual durability guarantees
-depend on the configured value of `Producer.RequiredAcks`. There are configurations where a message acknowledged by the
-SyncProducer can still sometimes be lost.
-
-To consume messages, use the Consumer. Note that Sarama's Consumer implementation does not currently support automatic
-consumer-group rebalancing and offset tracking. For Zookeeper-based tracking (Kafka 0.8.2 and earlier), the
-https://github.com/wvanbergen/kafka library builds on Sarama to add this support. For Kafka-based tracking (Kafka 0.9
-and later), the https://github.com/bsm/sarama-cluster library builds on Sarama to add this support.
-
-For lower-level needs, the Broker and Request/Response objects permit precise control over each connection
-and message sent on the wire; the Client provides higher-level metadata management that is shared between
-the producers and the consumer. The Request/Response objects and properties are mostly undocumented, as they line up
-exactly with the protocol fields documented by Kafka at
-https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol
-
-Metrics are exposed through https://github.com/rcrowley/go-metrics library in a local registry.
-
-Broker related metrics:
-
- +----------------------------------------------+------------+---------------------------------------------------------------+
- | Name | Type | Description |
- +----------------------------------------------+------------+---------------------------------------------------------------+
- | incoming-byte-rate | meter | Bytes/second read off all brokers |
- | incoming-byte-rate-for-broker- | meter | Bytes/second read off a given broker |
- | outgoing-byte-rate | meter | Bytes/second written off all brokers |
- | outgoing-byte-rate-for-broker- | meter | Bytes/second written off a given broker |
- | request-rate | meter | Requests/second sent to all brokers |
- | request-rate-for-broker- | meter | Requests/second sent to a given broker |
- | request-size | histogram | Distribution of the request size in bytes for all brokers |
- | request-size-for-broker- | histogram | Distribution of the request size in bytes for a given broker |
- | request-latency-in-ms | histogram | Distribution of the request latency in ms for all brokers |
- | request-latency-in-ms-for-broker- | histogram | Distribution of the request latency in ms for a given broker |
- | response-rate | meter | Responses/second received from all brokers |
- | response-rate-for-broker- | meter | Responses/second received from a given broker |
- | response-size | histogram | Distribution of the response size in bytes for all brokers |
- | response-size-for-broker- | histogram | Distribution of the response size in bytes for a given broker |
- +----------------------------------------------+------------+---------------------------------------------------------------+
-
-Note that we do not gather specific metrics for seed brokers but they are part of the "all brokers" metrics.
-
-Producer related metrics:
-
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | Name | Type | Description |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
- | batch-size | histogram | Distribution of the number of bytes sent per partition per request for all topics |
- | batch-size-for-topic- | histogram | Distribution of the number of bytes sent per partition per request for a given topic |
- | record-send-rate | meter | Records/second sent to all topics |
- | record-send-rate-for-topic- | meter | Records/second sent to a given topic |
- | records-per-request | histogram | Distribution of the number of records sent per request for all topics |
- | records-per-request-for-topic- | histogram | Distribution of the number of records sent per request for a given topic |
- | compression-ratio | histogram | Distribution of the compression ratio times 100 of record batches for all topics |
- | compression-ratio-for-topic- | histogram | Distribution of the compression ratio times 100 of record batches for a given topic |
- +-------------------------------------------+------------+--------------------------------------------------------------------------------------+
-
-*/
-package sarama
-
-import (
- "io/ioutil"
- "log"
-)
-
-// Logger is the instance of a StdLogger interface that Sarama writes connection
-// management events to. By default it is set to discard all log messages via ioutil.Discard,
-// but you can set it to redirect wherever you want.
-var Logger StdLogger = log.New(ioutil.Discard, "[Sarama] ", log.LstdFlags)
-
-// StdLogger is used to log error messages.
-type StdLogger interface {
- Print(v ...interface{})
- Printf(format string, v ...interface{})
- Println(v ...interface{})
-}
-
-// PanicHandler is called for recovering from panics spawned internally to the library (and thus
-// not recoverable by the caller's goroutine). Defaults to nil, which means panics are not recovered.
-var PanicHandler func(interface{})
-
-// MaxRequestSize is the maximum size (in bytes) of any request that Sarama will attempt to send. Trying
-// to send a request larger than this will result in an PacketEncodingError. The default of 100 MiB is aligned
-// with Kafka's default `socket.request.max.bytes`, which is the largest request the broker will attempt
-// to process.
-var MaxRequestSize int32 = 100 * 1024 * 1024
-
-// MaxResponseSize is the maximum size (in bytes) of any response that Sarama will attempt to parse. If
-// a broker returns a response message larger than this value, Sarama will return a PacketDecodingError to
-// protect the client from running out of memory. Please note that brokers do not have any natural limit on
-// the size of responses they send. In particular, they can send arbitrarily large fetch responses to consumers
-// (see https://issues.apache.org/jira/browse/KAFKA-2063).
-var MaxResponseSize int32 = 100 * 1024 * 1024
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go b/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
deleted file mode 100644
index fbbc8947b2e..00000000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_request.go
+++ /dev/null
@@ -1,33 +0,0 @@
-package sarama
-
-type SaslHandshakeRequest struct {
- Mechanism string
-}
-
-func (r *SaslHandshakeRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.Mechanism); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.Mechanism, err = pd.getString(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeRequest) key() int16 {
- return 17
-}
-
-func (r *SaslHandshakeRequest) version() int16 {
- return 0
-}
-
-func (r *SaslHandshakeRequest) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go b/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
deleted file mode 100644
index 8379bbb269b..00000000000
--- a/vendor/github.com/Shopify/sarama/sasl_handshake_response.go
+++ /dev/null
@@ -1,38 +0,0 @@
-package sarama
-
-type SaslHandshakeResponse struct {
- Err KError
- EnabledMechanisms []string
-}
-
-func (r *SaslHandshakeResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return pe.putStringArray(r.EnabledMechanisms)
-}
-
-func (r *SaslHandshakeResponse) decode(pd packetDecoder, version int16) error {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- var err error
- if r.EnabledMechanisms, err = pd.getStringArray(); err != nil {
- return err
- }
-
- return nil
-}
-
-func (r *SaslHandshakeResponse) key() int16 {
- return 17
-}
-
-func (r *SaslHandshakeResponse) version() int16 {
- return 0
-}
-
-func (r *SaslHandshakeResponse) requiredVersion() KafkaVersion {
- return V0_10_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_request.go b/vendor/github.com/Shopify/sarama/sync_group_request.go
deleted file mode 100644
index fe207080e03..00000000000
--- a/vendor/github.com/Shopify/sarama/sync_group_request.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package sarama
-
-type SyncGroupRequest struct {
- GroupId string
- GenerationId int32
- MemberId string
- GroupAssignments map[string][]byte
-}
-
-func (r *SyncGroupRequest) encode(pe packetEncoder) error {
- if err := pe.putString(r.GroupId); err != nil {
- return err
- }
-
- pe.putInt32(r.GenerationId)
-
- if err := pe.putString(r.MemberId); err != nil {
- return err
- }
-
- if err := pe.putArrayLength(len(r.GroupAssignments)); err != nil {
- return err
- }
- for memberId, memberAssignment := range r.GroupAssignments {
- if err := pe.putString(memberId); err != nil {
- return err
- }
- if err := pe.putBytes(memberAssignment); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (r *SyncGroupRequest) decode(pd packetDecoder, version int16) (err error) {
- if r.GroupId, err = pd.getString(); err != nil {
- return
- }
- if r.GenerationId, err = pd.getInt32(); err != nil {
- return
- }
- if r.MemberId, err = pd.getString(); err != nil {
- return
- }
-
- n, err := pd.getArrayLength()
- if err != nil {
- return err
- }
- if n == 0 {
- return nil
- }
-
- r.GroupAssignments = make(map[string][]byte)
- for i := 0; i < n; i++ {
- memberId, err := pd.getString()
- if err != nil {
- return err
- }
- memberAssignment, err := pd.getBytes()
- if err != nil {
- return err
- }
-
- r.GroupAssignments[memberId] = memberAssignment
- }
-
- return nil
-}
-
-func (r *SyncGroupRequest) key() int16 {
- return 14
-}
-
-func (r *SyncGroupRequest) version() int16 {
- return 0
-}
-
-func (r *SyncGroupRequest) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
-
-func (r *SyncGroupRequest) AddGroupAssignment(memberId string, memberAssignment []byte) {
- if r.GroupAssignments == nil {
- r.GroupAssignments = make(map[string][]byte)
- }
-
- r.GroupAssignments[memberId] = memberAssignment
-}
-
-func (r *SyncGroupRequest) AddGroupAssignmentMember(memberId string, memberAssignment *ConsumerGroupMemberAssignment) error {
- bin, err := encode(memberAssignment, nil)
- if err != nil {
- return err
- }
-
- r.AddGroupAssignment(memberId, bin)
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_group_response.go b/vendor/github.com/Shopify/sarama/sync_group_response.go
deleted file mode 100644
index 12aef673034..00000000000
--- a/vendor/github.com/Shopify/sarama/sync_group_response.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package sarama
-
-type SyncGroupResponse struct {
- Err KError
- MemberAssignment []byte
-}
-
-func (r *SyncGroupResponse) GetMemberAssignment() (*ConsumerGroupMemberAssignment, error) {
- assignment := new(ConsumerGroupMemberAssignment)
- err := decode(r.MemberAssignment, assignment)
- return assignment, err
-}
-
-func (r *SyncGroupResponse) encode(pe packetEncoder) error {
- pe.putInt16(int16(r.Err))
- return pe.putBytes(r.MemberAssignment)
-}
-
-func (r *SyncGroupResponse) decode(pd packetDecoder, version int16) (err error) {
- if kerr, err := pd.getInt16(); err != nil {
- return err
- } else {
- r.Err = KError(kerr)
- }
-
- r.MemberAssignment, err = pd.getBytes()
- return
-}
-
-func (r *SyncGroupResponse) key() int16 {
- return 14
-}
-
-func (r *SyncGroupResponse) version() int16 {
- return 0
-}
-
-func (r *SyncGroupResponse) requiredVersion() KafkaVersion {
- return V0_9_0_0
-}
diff --git a/vendor/github.com/Shopify/sarama/sync_producer.go b/vendor/github.com/Shopify/sarama/sync_producer.go
deleted file mode 100644
index c77ae314077..00000000000
--- a/vendor/github.com/Shopify/sarama/sync_producer.go
+++ /dev/null
@@ -1,164 +0,0 @@
-package sarama
-
-import "sync"
-
-// SyncProducer publishes Kafka messages, blocking until they have been acknowledged. It routes messages to the correct
-// broker, refreshing metadata as appropriate, and parses responses for errors. You must call Close() on a producer
-// to avoid leaks, it may not be garbage-collected automatically when it passes out of scope.
-//
-// The SyncProducer comes with two caveats: it will generally be less efficient than the AsyncProducer, and the actual
-// durability guarantee provided when a message is acknowledged depend on the configured value of `Producer.RequiredAcks`.
-// There are configurations where a message acknowledged by the SyncProducer can still sometimes be lost.
-//
-// For implementation reasons, the SyncProducer requires `Producer.Return.Errors` and `Producer.Return.Successes` to
-// be set to true in its configuration.
-type SyncProducer interface {
-
- // SendMessage produces a given message, and returns only when it either has
- // succeeded or failed to produce. It will return the partition and the offset
- // of the produced message, or an error if the message failed to produce.
- SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error)
-
- // SendMessages produces a given set of messages, and returns only when all
- // messages in the set have either succeeded or failed. Note that messages
- // can succeed and fail individually; if some succeed and some fail,
- // SendMessages will return an error.
- SendMessages(msgs []*ProducerMessage) error
-
- // Close shuts down the producer and flushes any messages it may have buffered.
- // You must call this function before a producer object passes out of scope, as
- // it may otherwise leak memory. You must call this before calling Close on the
- // underlying client.
- Close() error
-}
-
-type syncProducer struct {
- producer *asyncProducer
- wg sync.WaitGroup
-}
-
-// NewSyncProducer creates a new SyncProducer using the given broker addresses and configuration.
-func NewSyncProducer(addrs []string, config *Config) (SyncProducer, error) {
- if config == nil {
- config = NewConfig()
- config.Producer.Return.Successes = true
- }
-
- if err := verifyProducerConfig(config); err != nil {
- return nil, err
- }
-
- p, err := NewAsyncProducer(addrs, config)
- if err != nil {
- return nil, err
- }
- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-// NewSyncProducerFromClient creates a new SyncProducer using the given client. It is still
-// necessary to call Close() on the underlying client when shutting down this producer.
-func NewSyncProducerFromClient(client Client) (SyncProducer, error) {
- if err := verifyProducerConfig(client.Config()); err != nil {
- return nil, err
- }
-
- p, err := NewAsyncProducerFromClient(client)
- if err != nil {
- return nil, err
- }
- return newSyncProducerFromAsyncProducer(p.(*asyncProducer)), nil
-}
-
-func newSyncProducerFromAsyncProducer(p *asyncProducer) *syncProducer {
- sp := &syncProducer{producer: p}
-
- sp.wg.Add(2)
- go withRecover(sp.handleSuccesses)
- go withRecover(sp.handleErrors)
-
- return sp
-}
-
-func verifyProducerConfig(config *Config) error {
- if !config.Producer.Return.Errors {
- return ConfigurationError("Producer.Return.Errors must be true to be used in a SyncProducer")
- }
- if !config.Producer.Return.Successes {
- return ConfigurationError("Producer.Return.Successes must be true to be used in a SyncProducer")
- }
- return nil
-}
-
-func (sp *syncProducer) SendMessage(msg *ProducerMessage) (partition int32, offset int64, err error) {
- oldMetadata := msg.Metadata
- defer func() {
- msg.Metadata = oldMetadata
- }()
-
- expectation := make(chan *ProducerError, 1)
- msg.Metadata = expectation
- sp.producer.Input() <- msg
-
- if err := <-expectation; err != nil {
- return -1, -1, err.Err
- }
-
- return msg.Partition, msg.Offset, nil
-}
-
-func (sp *syncProducer) SendMessages(msgs []*ProducerMessage) error {
- savedMetadata := make([]interface{}, len(msgs))
- for i := range msgs {
- savedMetadata[i] = msgs[i].Metadata
- }
- defer func() {
- for i := range msgs {
- msgs[i].Metadata = savedMetadata[i]
- }
- }()
-
- expectations := make(chan chan *ProducerError, len(msgs))
- go func() {
- for _, msg := range msgs {
- expectation := make(chan *ProducerError, 1)
- msg.Metadata = expectation
- sp.producer.Input() <- msg
- expectations <- expectation
- }
- close(expectations)
- }()
-
- var errors ProducerErrors
- for expectation := range expectations {
- if err := <-expectation; err != nil {
- errors = append(errors, err)
- }
- }
-
- if len(errors) > 0 {
- return errors
- }
- return nil
-}
-
-func (sp *syncProducer) handleSuccesses() {
- defer sp.wg.Done()
- for msg := range sp.producer.Successes() {
- expectation := msg.Metadata.(chan *ProducerError)
- expectation <- nil
- }
-}
-
-func (sp *syncProducer) handleErrors() {
- defer sp.wg.Done()
- for err := range sp.producer.Errors() {
- expectation := err.Msg.Metadata.(chan *ProducerError)
- expectation <- err
- }
-}
-
-func (sp *syncProducer) Close() error {
- sp.producer.AsyncClose()
- sp.wg.Wait()
- return nil
-}
diff --git a/vendor/github.com/Shopify/sarama/utils.go b/vendor/github.com/Shopify/sarama/utils.go
deleted file mode 100644
index 3cbab2d92b1..00000000000
--- a/vendor/github.com/Shopify/sarama/utils.go
+++ /dev/null
@@ -1,152 +0,0 @@
-package sarama
-
-import (
- "bufio"
- "net"
- "sort"
-)
-
-type none struct{}
-
-// make []int32 sortable so we can sort partition numbers
-type int32Slice []int32
-
-func (slice int32Slice) Len() int {
- return len(slice)
-}
-
-func (slice int32Slice) Less(i, j int) bool {
- return slice[i] < slice[j]
-}
-
-func (slice int32Slice) Swap(i, j int) {
- slice[i], slice[j] = slice[j], slice[i]
-}
-
-func dupeAndSort(input []int32) []int32 {
- ret := make([]int32, 0, len(input))
- for _, val := range input {
- ret = append(ret, val)
- }
-
- sort.Sort(int32Slice(ret))
- return ret
-}
-
-func withRecover(fn func()) {
- defer func() {
- handler := PanicHandler
- if handler != nil {
- if err := recover(); err != nil {
- handler(err)
- }
- }
- }()
-
- fn()
-}
-
-func safeAsyncClose(b *Broker) {
- tmp := b // local var prevents clobbering in goroutine
- go withRecover(func() {
- if connected, _ := tmp.Connected(); connected {
- if err := tmp.Close(); err != nil {
- Logger.Println("Error closing broker", tmp.ID(), ":", err)
- }
- }
- })
-}
-
-// Encoder is a simple interface for any type that can be encoded as an array of bytes
-// in order to be sent as the key or value of a Kafka message. Length() is provided as an
-// optimization, and must return the same as len() on the result of Encode().
-type Encoder interface {
- Encode() ([]byte, error)
- Length() int
-}
-
-// make strings and byte slices encodable for convenience so they can be used as keys
-// and/or values in kafka messages
-
-// StringEncoder implements the Encoder interface for Go strings so that they can be used
-// as the Key or Value in a ProducerMessage.
-type StringEncoder string
-
-func (s StringEncoder) Encode() ([]byte, error) {
- return []byte(s), nil
-}
-
-func (s StringEncoder) Length() int {
- return len(s)
-}
-
-// ByteEncoder implements the Encoder interface for Go byte slices so that they can be used
-// as the Key or Value in a ProducerMessage.
-type ByteEncoder []byte
-
-func (b ByteEncoder) Encode() ([]byte, error) {
- return b, nil
-}
-
-func (b ByteEncoder) Length() int {
- return len(b)
-}
-
-// bufConn wraps a net.Conn with a buffer for reads to reduce the number of
-// reads that trigger syscalls.
-type bufConn struct {
- net.Conn
- buf *bufio.Reader
-}
-
-func newBufConn(conn net.Conn) *bufConn {
- return &bufConn{
- Conn: conn,
- buf: bufio.NewReader(conn),
- }
-}
-
-func (bc *bufConn) Read(b []byte) (n int, err error) {
- return bc.buf.Read(b)
-}
-
-// KafkaVersion instances represent versions of the upstream Kafka broker.
-type KafkaVersion struct {
- // it's a struct rather than just typing the array directly to make it opaque and stop people
- // generating their own arbitrary versions
- version [4]uint
-}
-
-func newKafkaVersion(major, minor, veryMinor, patch uint) KafkaVersion {
- return KafkaVersion{
- version: [4]uint{major, minor, veryMinor, patch},
- }
-}
-
-// IsAtLeast return true if and only if the version it is called on is
-// greater than or equal to the version passed in:
-// V1.IsAtLeast(V2) // false
-// V2.IsAtLeast(V1) // true
-func (v KafkaVersion) IsAtLeast(other KafkaVersion) bool {
- for i := range v.version {
- if v.version[i] > other.version[i] {
- return true
- } else if v.version[i] < other.version[i] {
- return false
- }
- }
- return true
-}
-
-// Effective constants defining the supported kafka versions.
-var (
- V0_8_2_0 = newKafkaVersion(0, 8, 2, 0)
- V0_8_2_1 = newKafkaVersion(0, 8, 2, 1)
- V0_8_2_2 = newKafkaVersion(0, 8, 2, 2)
- V0_9_0_0 = newKafkaVersion(0, 9, 0, 0)
- V0_9_0_1 = newKafkaVersion(0, 9, 0, 1)
- V0_10_0_0 = newKafkaVersion(0, 10, 0, 0)
- V0_10_0_1 = newKafkaVersion(0, 10, 0, 1)
- V0_10_1_0 = newKafkaVersion(0, 10, 1, 0)
- minVersion = V0_8_2_0
-)
diff --git a/vendor/github.com/eapache/go-resiliency/LICENSE b/vendor/github.com/eapache/go-resiliency/LICENSE
deleted file mode 100644
index 698a3f51397..00000000000
--- a/vendor/github.com/eapache/go-resiliency/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Evan Huus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/README.md b/vendor/github.com/eapache/go-resiliency/breaker/README.md
deleted file mode 100644
index 7262bfc2825..00000000000
--- a/vendor/github.com/eapache/go-resiliency/breaker/README.md
+++ /dev/null
@@ -1,33 +0,0 @@
-circuit-breaker
-===============
-
-[](https://travis-ci.org/eapache/go-resiliency)
-[](https://godoc.org/github.com/eapache/go-resiliency/breaker)
-
-The circuit-breaker resiliency pattern for golang.
-
-Creating a breaker takes three parameters:
-- error threshold (for opening the breaker)
-- success threshold (for closing the breaker)
-- timeout (how long to keep the breaker open)
-
-```go
-b := breaker.New(3, 1, 5*time.Second)
-
-for {
- result := b.Run(func() error {
- // communicate with some external service and
- // return an error if the communication failed
- return nil
- })
-
- switch result {
- case nil:
- // success!
- case breaker.ErrBreakerOpen:
- // our function wasn't run because the breaker was open
- default:
- // some other error
- }
-}
-```
diff --git a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go b/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
deleted file mode 100644
index f88ca7248b0..00000000000
--- a/vendor/github.com/eapache/go-resiliency/breaker/breaker.go
+++ /dev/null
@@ -1,161 +0,0 @@
-// Package breaker implements the circuit-breaker resiliency pattern for Go.
-package breaker
-
-import (
- "errors"
- "sync"
- "sync/atomic"
- "time"
-)
-
-// ErrBreakerOpen is the error returned from Run() when the function is not executed
-// because the breaker is currently open.
-var ErrBreakerOpen = errors.New("circuit breaker is open")
-
-const (
- closed uint32 = iota
- open
- halfOpen
-)
-
-// Breaker implements the circuit-breaker resiliency pattern
-type Breaker struct {
- errorThreshold, successThreshold int
- timeout time.Duration
-
- lock sync.Mutex
- state uint32
- errors, successes int
- lastError time.Time
-}
-
-// New constructs a new circuit-breaker that starts closed.
-// From closed, the breaker opens if "errorThreshold" errors are seen
-// without an error-free period of at least "timeout". From open, the
-// breaker half-closes after "timeout". From half-open, the breaker closes
-// after "successThreshold" consecutive successes, or opens on a single error.
-func New(errorThreshold, successThreshold int, timeout time.Duration) *Breaker {
- return &Breaker{
- errorThreshold: errorThreshold,
- successThreshold: successThreshold,
- timeout: timeout,
- }
-}
-
-// Run will either return ErrBreakerOpen immediately if the circuit-breaker is
-// already open, or it will run the given function and pass along its return
-// value. It is safe to call Run concurrently on the same Breaker.
-func (b *Breaker) Run(work func() error) error {
- state := atomic.LoadUint32(&b.state)
-
- if state == open {
- return ErrBreakerOpen
- }
-
- return b.doWork(state, work)
-}
-
-// Go will either return ErrBreakerOpen immediately if the circuit-breaker is
-// already open, or it will run the given function in a separate goroutine.
-// If the function is run, Go will return nil immediately, and will *not* return
-// the return value of the function. It is safe to call Go concurrently on the
-// same Breaker.
-func (b *Breaker) Go(work func() error) error {
- state := atomic.LoadUint32(&b.state)
-
- if state == open {
- return ErrBreakerOpen
- }
-
- // errcheck complains about ignoring the error return value, but
- // that's on purpose; if you want an error from a goroutine you have to
- // get it over a channel or something
- go b.doWork(state, work)
-
- return nil
-}
-
-func (b *Breaker) doWork(state uint32, work func() error) error {
- var panicValue interface{}
-
- result := func() error {
- defer func() {
- panicValue = recover()
- }()
- return work()
- }()
-
- if result == nil && panicValue == nil && state == closed {
- // short-circuit the normal, success path without contending
- // on the lock
- return nil
- }
-
- // oh well, I guess we have to contend on the lock
- b.processResult(result, panicValue)
-
- if panicValue != nil {
- // as close as Go lets us come to a "rethrow" although unfortunately
- // we lose the original panicing location
- panic(panicValue)
- }
-
- return result
-}
-
-func (b *Breaker) processResult(result error, panicValue interface{}) {
- b.lock.Lock()
- defer b.lock.Unlock()
-
- if result == nil && panicValue == nil {
- if b.state == halfOpen {
- b.successes++
- if b.successes == b.successThreshold {
- b.closeBreaker()
- }
- }
- } else {
- if b.errors > 0 {
- expiry := b.lastError.Add(b.timeout)
- if time.Now().After(expiry) {
- b.errors = 0
- }
- }
-
- switch b.state {
- case closed:
- b.errors++
- if b.errors == b.errorThreshold {
- b.openBreaker()
- } else {
- b.lastError = time.Now()
- }
- case halfOpen:
- b.openBreaker()
- }
- }
-}
-
-func (b *Breaker) openBreaker() {
- b.changeState(open)
- go b.timer()
-}
-
-func (b *Breaker) closeBreaker() {
- b.changeState(closed)
-}
-
-func (b *Breaker) timer() {
- time.Sleep(b.timeout)
-
- b.lock.Lock()
- defer b.lock.Unlock()
-
- b.changeState(halfOpen)
-}
-
-func (b *Breaker) changeState(newState uint32) {
- b.errors = 0
- b.successes = 0
- atomic.StoreUint32(&b.state, newState)
-}
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.gitignore b/vendor/github.com/eapache/go-xerial-snappy/.gitignore
deleted file mode 100644
index daf913b1b34..00000000000
--- a/vendor/github.com/eapache/go-xerial-snappy/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml b/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
deleted file mode 100644
index d6cf4f1fa1b..00000000000
--- a/vendor/github.com/eapache/go-xerial-snappy/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-
-go:
-- 1.5.4
-- 1.6.1
-
-sudo: false
diff --git a/vendor/github.com/eapache/go-xerial-snappy/LICENSE b/vendor/github.com/eapache/go-xerial-snappy/LICENSE
deleted file mode 100644
index 5bf3688d9e4..00000000000
--- a/vendor/github.com/eapache/go-xerial-snappy/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 Evan Huus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/eapache/go-xerial-snappy/README.md b/vendor/github.com/eapache/go-xerial-snappy/README.md
deleted file mode 100644
index 3f2695c7282..00000000000
--- a/vendor/github.com/eapache/go-xerial-snappy/README.md
+++ /dev/null
@@ -1,13 +0,0 @@
-# go-xerial-snappy
-
-[](https://travis-ci.org/eapache/go-xerial-snappy)
-
-Xerial-compatible Snappy framing support for golang.
-
-Packages using Xerial for snappy encoding use a framing format incompatible with
-basically everything else in existence. This package wraps Go's built-in snappy
-package to support it.
-
-Apps that use this format include Apache Kafka (see
-https://github.com/dpkp/kafka-python/issues/126#issuecomment-35478921 for
-details).
diff --git a/vendor/github.com/eapache/go-xerial-snappy/snappy.go b/vendor/github.com/eapache/go-xerial-snappy/snappy.go
deleted file mode 100644
index b8f8b51fcef..00000000000
--- a/vendor/github.com/eapache/go-xerial-snappy/snappy.go
+++ /dev/null
@@ -1,43 +0,0 @@
-package snappy
-
-import (
- "bytes"
- "encoding/binary"
-
- master "github.com/golang/snappy"
-)
-
-var xerialHeader = []byte{130, 83, 78, 65, 80, 80, 89, 0}
-
-// Encode encodes data as snappy with no framing header.
-func Encode(src []byte) []byte {
- return master.Encode(nil, src)
-}
-
-// Decode decodes snappy data whether it is traditional unframed
-// or includes the xerial framing format.
-func Decode(src []byte) ([]byte, error) {
- if !bytes.Equal(src[:8], xerialHeader) {
- return master.Decode(nil, src)
- }
-
- var (
- pos = uint32(16)
- max = uint32(len(src))
- dst = make([]byte, 0, len(src))
- chunk []byte
- err error
- )
- for pos < max {
- size := binary.BigEndian.Uint32(src[pos : pos+4])
- pos += 4
-
- chunk, err = master.Decode(chunk, src[pos:pos+size])
- if err != nil {
- return nil, err
- }
- pos += size
- dst = append(dst, chunk...)
- }
- return dst, nil
-}
diff --git a/vendor/github.com/eapache/queue/.gitignore b/vendor/github.com/eapache/queue/.gitignore
deleted file mode 100644
index 836562412fe..00000000000
--- a/vendor/github.com/eapache/queue/.gitignore
+++ /dev/null
@@ -1,23 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
diff --git a/vendor/github.com/eapache/queue/.travis.yml b/vendor/github.com/eapache/queue/.travis.yml
deleted file mode 100644
index 235a40a493f..00000000000
--- a/vendor/github.com/eapache/queue/.travis.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-language: go
-sudo: false
-
-go:
- - 1.2
- - 1.3
- - 1.4
diff --git a/vendor/github.com/eapache/queue/LICENSE b/vendor/github.com/eapache/queue/LICENSE
deleted file mode 100644
index d5f36dbcaaf..00000000000
--- a/vendor/github.com/eapache/queue/LICENSE
+++ /dev/null
@@ -1,21 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2014 Evan Huus
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
\ No newline at end of file
diff --git a/vendor/github.com/eapache/queue/README.md b/vendor/github.com/eapache/queue/README.md
deleted file mode 100644
index 8e782335cd7..00000000000
--- a/vendor/github.com/eapache/queue/README.md
+++ /dev/null
@@ -1,16 +0,0 @@
-Queue
-=====
-
-[](https://travis-ci.org/eapache/queue)
-[](https://godoc.org/github.com/eapache/queue)
-[](https://eapache.github.io/conduct.html)
-
-A fast Golang queue using a ring-buffer, based on the version suggested by Dariusz Górecki.
-Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
-substantial memory and time benefits, and fewer GC pauses.
-
-The queue implemented here is as fast as it is in part because it is *not* thread-safe.
-
-Follows semantic versioning using https://gopkg.in/ - import from
-[`gopkg.in/eapache/queue.v1`](https://gopkg.in/eapache/queue.v1)
-for guaranteed API stability.
diff --git a/vendor/github.com/eapache/queue/queue.go b/vendor/github.com/eapache/queue/queue.go
deleted file mode 100644
index 2dc8d939566..00000000000
--- a/vendor/github.com/eapache/queue/queue.go
+++ /dev/null
@@ -1,88 +0,0 @@
-/*
-Package queue provides a fast, ring-buffer queue based on the version suggested by Dariusz Górecki.
-Using this instead of other, simpler, queue implementations (slice+append or linked list) provides
-substantial memory and time benefits, and fewer GC pauses.
-
-The queue implemented here is as fast as it is for an additional reason: it is *not* thread-safe.
-*/
-package queue
-
-const minQueueLen = 16
-
-// Queue represents a single instance of the queue data structure.
-type Queue struct {
- buf []interface{}
- head, tail, count int
-}
-
-// New constructs and returns a new Queue.
-func New() *Queue {
- return &Queue{
- buf: make([]interface{}, minQueueLen),
- }
-}
-
-// Length returns the number of elements currently stored in the queue.
-func (q *Queue) Length() int {
- return q.count
-}
-
-// resizes the queue to fit exactly twice its current contents
-// this can result in shrinking if the queue is less than half-full
-func (q *Queue) resize() {
- newBuf := make([]interface{}, q.count*2)
-
- if q.tail > q.head {
- copy(newBuf, q.buf[q.head:q.tail])
- } else {
- n := copy(newBuf, q.buf[q.head:])
- copy(newBuf[n:], q.buf[:q.tail])
- }
-
- q.head = 0
- q.tail = q.count
- q.buf = newBuf
-}
-
-// Add puts an element on the end of the queue.
-func (q *Queue) Add(elem interface{}) {
- if q.count == len(q.buf) {
- q.resize()
- }
-
- q.buf[q.tail] = elem
- q.tail = (q.tail + 1) % len(q.buf)
- q.count++
-}
-
-// Peek returns the element at the head of the queue. This call panics
-// if the queue is empty.
-func (q *Queue) Peek() interface{} {
- if q.count <= 0 {
- panic("queue: Peek() called on empty queue")
- }
- return q.buf[q.head]
-}
-
-// Get returns the element at index i in the queue. If the index is
-// invalid, the call will panic.
-func (q *Queue) Get(i int) interface{} {
- if i < 0 || i >= q.count {
- panic("queue: Get() called with index out of range")
- }
- return q.buf[(q.head+i)%len(q.buf)]
-}
-
-// Remove removes the element from the front of the queue. If you actually
-// want the element, call Peek first. This call panics if the queue is empty.
-func (q *Queue) Remove() {
- if q.count <= 0 {
- panic("queue: Remove() called on empty queue")
- }
- q.buf[q.head] = nil
- q.head = (q.head + 1) % len(q.buf)
- q.count--
- if len(q.buf) > minQueueLen && q.count*4 == len(q.buf) {
- q.resize()
- }
-}
diff --git a/vendor/github.com/klauspost/crc32/.gitignore b/vendor/github.com/klauspost/crc32/.gitignore
deleted file mode 100644
index daf913b1b34..00000000000
--- a/vendor/github.com/klauspost/crc32/.gitignore
+++ /dev/null
@@ -1,24 +0,0 @@
-# Compiled Object files, Static and Dynamic libs (Shared Objects)
-*.o
-*.a
-*.so
-
-# Folders
-_obj
-_test
-
-# Architecture specific extensions/prefixes
-*.[568vq]
-[568vq].out
-
-*.cgo1.go
-*.cgo2.c
-_cgo_defun.c
-_cgo_gotypes.go
-_cgo_export.*
-
-_testmain.go
-
-*.exe
-*.test
-*.prof
diff --git a/vendor/github.com/klauspost/crc32/.travis.yml b/vendor/github.com/klauspost/crc32/.travis.yml
deleted file mode 100644
index de64ae491f1..00000000000
--- a/vendor/github.com/klauspost/crc32/.travis.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-language: go
-
-go:
- - 1.3
- - 1.4
- - 1.5
- - 1.6
- - 1.7
- - tip
-
-script:
- - go test -v .
- - go test -v -race .
diff --git a/vendor/github.com/klauspost/crc32/LICENSE b/vendor/github.com/klauspost/crc32/LICENSE
deleted file mode 100644
index 4fd5963e39c..00000000000
--- a/vendor/github.com/klauspost/crc32/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012 The Go Authors. All rights reserved.
-Copyright (c) 2015 Klaus Post
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/github.com/klauspost/crc32/README.md b/vendor/github.com/klauspost/crc32/README.md
deleted file mode 100644
index 029625d3609..00000000000
--- a/vendor/github.com/klauspost/crc32/README.md
+++ /dev/null
@@ -1,87 +0,0 @@
-# crc32
-CRC32 hash with x64 optimizations
-
-This package is a drop-in replacement for the standard library `hash/crc32` package, that features SSE 4.2 optimizations on x64 platforms, for a 10x speedup.
-
-[](https://travis-ci.org/klauspost/crc32)
-
-# usage
-
-Install using `go get github.com/klauspost/crc32`. This library is based on Go 1.5 code and requires Go 1.3 or newer.
-
-Replace `import "hash/crc32"` with `import "github.com/klauspost/crc32"` and you are good to go.
-
-# changes
-* Oct 20, 2016: Changes have been merged to upstream Go. Package updated to match.
-* Dec 4, 2015: Uses the "slice-by-8" trick more extensively, which gives a 1.5 to 2.5x speedup if assembler is unavailable.
-
-
-# performance
-
-For *Go 1.7* performance is equivalent to the standard library. So if you use this package for Go 1.7 you can switch back.
-
-
-For IEEE tables (the most common), there is approximately a factor 10 speedup with "CLMUL" (Carryless multiplication) instruction:
-```
-benchmark old ns/op new ns/op delta
-BenchmarkCrc32KB 99955 10258 -89.74%
-
-benchmark old MB/s new MB/s speedup
-BenchmarkCrc32KB 327.83 3194.20 9.74x
-```
-
-For other tables and "CLMUL" capable machines the performance is the same as the standard library.
-
-Here are some detailed benchmarks, comparing to go 1.5 standard library with and without assembler enabled.
-
-```
-Std: Standard Go 1.5 library
-Crc: Indicates IEEE type CRC.
-40B: Size of each slice encoded.
-NoAsm: Assembler was disabled (ie. not an AMD64 or SSE 4.2+ capable machine).
-Castagnoli: Castagnoli CRC type.
-
-BenchmarkStdCrc40B-4 10000000 158 ns/op 252.88 MB/s
-BenchmarkCrc40BNoAsm-4 20000000 105 ns/op 377.38 MB/s (slice8)
-BenchmarkCrc40B-4 20000000 105 ns/op 378.77 MB/s (slice8)
-
-BenchmarkStdCrc1KB-4 500000 3604 ns/op 284.10 MB/s
-BenchmarkCrc1KBNoAsm-4 1000000 1463 ns/op 699.79 MB/s (slice8)
-BenchmarkCrc1KB-4 3000000 396 ns/op 2583.69 MB/s (asm)
-
-BenchmarkStdCrc8KB-4 200000 11417 ns/op 717.48 MB/s (slice8)
-BenchmarkCrc8KBNoAsm-4 200000 11317 ns/op 723.85 MB/s (slice8)
-BenchmarkCrc8KB-4 500000 2919 ns/op 2805.73 MB/s (asm)
-
-BenchmarkStdCrc32KB-4 30000 45749 ns/op 716.24 MB/s (slice8)
-BenchmarkCrc32KBNoAsm-4 30000 45109 ns/op 726.42 MB/s (slice8)
-BenchmarkCrc32KB-4 100000 11497 ns/op 2850.09 MB/s (asm)
-
-BenchmarkStdNoAsmCastagnol40B-4 10000000 161 ns/op 246.94 MB/s
-BenchmarkStdCastagnoli40B-4 50000000 28.4 ns/op 1410.69 MB/s (asm)
-BenchmarkCastagnoli40BNoAsm-4 20000000 100 ns/op 398.01 MB/s (slice8)
-BenchmarkCastagnoli40B-4 50000000 28.2 ns/op 1419.54 MB/s (asm)
-
-BenchmarkStdNoAsmCastagnoli1KB-4 500000 3622 ns/op 282.67 MB/s
-BenchmarkStdCastagnoli1KB-4 10000000 144 ns/op 7099.78 MB/s (asm)
-BenchmarkCastagnoli1KBNoAsm-4 1000000 1475 ns/op 694.14 MB/s (slice8)
-BenchmarkCastagnoli1KB-4 10000000 146 ns/op 6993.35 MB/s (asm)
-
-BenchmarkStdNoAsmCastagnoli8KB-4 50000 28781 ns/op 284.63 MB/s
-BenchmarkStdCastagnoli8KB-4 1000000 1029 ns/op 7957.89 MB/s (asm)
-BenchmarkCastagnoli8KBNoAsm-4 200000 11410 ns/op 717.94 MB/s (slice8)
-BenchmarkCastagnoli8KB-4 1000000 1000 ns/op 8188.71 MB/s (asm)
-
-BenchmarkStdNoAsmCastagnoli32KB-4 10000 115426 ns/op 283.89 MB/s
-BenchmarkStdCastagnoli32KB-4 300000 4065 ns/op 8059.13 MB/s (asm)
-BenchmarkCastagnoli32KBNoAsm-4 30000 45171 ns/op 725.41 MB/s (slice8)
-BenchmarkCastagnoli32KB-4 500000 4077 ns/op 8035.89 MB/s (asm)
-```
-
-The IEEE assembler optimizations has been submitted and will be part of the Go 1.6 standard library.
-
-However, the improved use of slice-by-8 has not, but will probably be submitted for Go 1.7.
-
-# license
-
-Standard Go license. Changes are Copyright (c) 2015 Klaus Post under same conditions.
diff --git a/vendor/github.com/klauspost/crc32/crc32.go b/vendor/github.com/klauspost/crc32/crc32.go
deleted file mode 100644
index 8aa91b17e90..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32.go
+++ /dev/null
@@ -1,207 +0,0 @@
-// Copyright 2009 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package crc32 implements the 32-bit cyclic redundancy check, or CRC-32,
-// checksum. See http://en.wikipedia.org/wiki/Cyclic_redundancy_check for
-// information.
-//
-// Polynomials are represented in LSB-first form also known as reversed representation.
-//
-// See http://en.wikipedia.org/wiki/Mathematics_of_cyclic_redundancy_checks#Reversed_representations_and_reciprocal_polynomials
-// for information.
-package crc32
-
-import (
- "hash"
- "sync"
-)
-
-// The size of a CRC-32 checksum in bytes.
-const Size = 4
-
-// Predefined polynomials.
-const (
- // IEEE is by far and away the most common CRC-32 polynomial.
- // Used by ethernet (IEEE 802.3), v.42, fddi, gzip, zip, png, ...
- IEEE = 0xedb88320
-
- // Castagnoli's polynomial, used in iSCSI.
- // Has better error detection characteristics than IEEE.
- // http://dx.doi.org/10.1109/26.231911
- Castagnoli = 0x82f63b78
-
- // Koopman's polynomial.
- // Also has better error detection characteristics than IEEE.
- // http://dx.doi.org/10.1109/DSN.2002.1028931
- Koopman = 0xeb31d82e
-)
-
-// Table is a 256-word table representing the polynomial for efficient processing.
-type Table [256]uint32
-
-// This file makes use of functions implemented in architecture-specific files.
-// The interface that they implement is as follows:
-//
-// // archAvailableIEEE reports whether an architecture-specific CRC32-IEEE
-// // algorithm is available.
-// archAvailableIEEE() bool
-//
-// // archInitIEEE initializes the architecture-specific CRC3-IEEE algorithm.
-// // It can only be called if archAvailableIEEE() returns true.
-// archInitIEEE()
-//
-// // archUpdateIEEE updates the given CRC32-IEEE. It can only be called if
-// // archInitIEEE() was previously called.
-// archUpdateIEEE(crc uint32, p []byte) uint32
-//
-// // archAvailableCastagnoli reports whether an architecture-specific
-// // CRC32-C algorithm is available.
-// archAvailableCastagnoli() bool
-//
-// // archInitCastagnoli initializes the architecture-specific CRC32-C
-// // algorithm. It can only be called if archAvailableCastagnoli() returns
-// // true.
-// archInitCastagnoli()
-//
-// // archUpdateCastagnoli updates the given CRC32-C. It can only be called
-// // if archInitCastagnoli() was previously called.
-// archUpdateCastagnoli(crc uint32, p []byte) uint32
-
-// castagnoliTable points to a lazily initialized Table for the Castagnoli
-// polynomial. MakeTable will always return this value when asked to make a
-// Castagnoli table so we can compare against it to find when the caller is
-// using this polynomial.
-var castagnoliTable *Table
-var castagnoliTable8 *slicing8Table
-var castagnoliArchImpl bool
-var updateCastagnoli func(crc uint32, p []byte) uint32
-var castagnoliOnce sync.Once
-
-func castagnoliInit() {
- castagnoliTable = simpleMakeTable(Castagnoli)
- castagnoliArchImpl = archAvailableCastagnoli()
-
- if castagnoliArchImpl {
- archInitCastagnoli()
- updateCastagnoli = archUpdateCastagnoli
- } else {
- // Initialize the slicing-by-8 table.
- castagnoliTable8 = slicingMakeTable(Castagnoli)
- updateCastagnoli = func(crc uint32, p []byte) uint32 {
- return slicingUpdate(crc, castagnoliTable8, p)
- }
- }
-}
-
-// IEEETable is the table for the IEEE polynomial.
-var IEEETable = simpleMakeTable(IEEE)
-
-// ieeeTable8 is the slicing8Table for IEEE
-var ieeeTable8 *slicing8Table
-var ieeeArchImpl bool
-var updateIEEE func(crc uint32, p []byte) uint32
-var ieeeOnce sync.Once
-
-func ieeeInit() {
- ieeeArchImpl = archAvailableIEEE()
-
- if ieeeArchImpl {
- archInitIEEE()
- updateIEEE = archUpdateIEEE
- } else {
- // Initialize the slicing-by-8 table.
- ieeeTable8 = slicingMakeTable(IEEE)
- updateIEEE = func(crc uint32, p []byte) uint32 {
- return slicingUpdate(crc, ieeeTable8, p)
- }
- }
-}
-
-// MakeTable returns a Table constructed from the specified polynomial.
-// The contents of this Table must not be modified.
-func MakeTable(poly uint32) *Table {
- switch poly {
- case IEEE:
- ieeeOnce.Do(ieeeInit)
- return IEEETable
- case Castagnoli:
- castagnoliOnce.Do(castagnoliInit)
- return castagnoliTable
- }
- return simpleMakeTable(poly)
-}
-
-// digest represents the partial evaluation of a checksum.
-type digest struct {
- crc uint32
- tab *Table
-}
-
-// New creates a new hash.Hash32 computing the CRC-32 checksum
-// using the polynomial represented by the Table.
-// Its Sum method will lay the value out in big-endian byte order.
-func New(tab *Table) hash.Hash32 {
- if tab == IEEETable {
- ieeeOnce.Do(ieeeInit)
- }
- return &digest{0, tab}
-}
-
-// NewIEEE creates a new hash.Hash32 computing the CRC-32 checksum
-// using the IEEE polynomial.
-// Its Sum method will lay the value out in big-endian byte order.
-func NewIEEE() hash.Hash32 { return New(IEEETable) }
-
-func (d *digest) Size() int { return Size }
-
-func (d *digest) BlockSize() int { return 1 }
-
-func (d *digest) Reset() { d.crc = 0 }
-
-// Update returns the result of adding the bytes in p to the crc.
-func Update(crc uint32, tab *Table, p []byte) uint32 {
- switch tab {
- case castagnoliTable:
- return updateCastagnoli(crc, p)
- case IEEETable:
- // Unfortunately, because IEEETable is exported, IEEE may be used without a
- // call to MakeTable. We have to make sure it gets initialized in that case.
- ieeeOnce.Do(ieeeInit)
- return updateIEEE(crc, p)
- default:
- return simpleUpdate(crc, tab, p)
- }
-}
-
-func (d *digest) Write(p []byte) (n int, err error) {
- switch d.tab {
- case castagnoliTable:
- d.crc = updateCastagnoli(d.crc, p)
- case IEEETable:
- // We only create digest objects through New() which takes care of
- // initialization in this case.
- d.crc = updateIEEE(d.crc, p)
- default:
- d.crc = simpleUpdate(d.crc, d.tab, p)
- }
- return len(p), nil
-}
-
-func (d *digest) Sum32() uint32 { return d.crc }
-
-func (d *digest) Sum(in []byte) []byte {
- s := d.Sum32()
- return append(in, byte(s>>24), byte(s>>16), byte(s>>8), byte(s))
-}
-
-// Checksum returns the CRC-32 checksum of data
-// using the polynomial represented by the Table.
-func Checksum(data []byte, tab *Table) uint32 { return Update(0, tab, data) }
-
-// ChecksumIEEE returns the CRC-32 checksum of data
-// using the IEEE polynomial.
-func ChecksumIEEE(data []byte) uint32 {
- ieeeOnce.Do(ieeeInit)
- return updateIEEE(0, data)
-}
diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.go b/vendor/github.com/klauspost/crc32/crc32_amd64.go
deleted file mode 100644
index af2a0b844bd..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_amd64.go
+++ /dev/null
@@ -1,230 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine,!gccgo
-
-// AMD64-specific hardware-assisted CRC32 algorithms. See crc32.go for a
-// description of the interface that each architecture-specific file
-// implements.
-
-package crc32
-
-import "unsafe"
-
-// This file contains the code to call the SSE 4.2 version of the Castagnoli
-// and IEEE CRC.
-
-// haveSSE41/haveSSE42/haveCLMUL are defined in crc_amd64.s and use
-// CPUID to test for SSE 4.1, 4.2 and CLMUL support.
-func haveSSE41() bool
-func haveSSE42() bool
-func haveCLMUL() bool
-
-// castagnoliSSE42 is defined in crc32_amd64.s and uses the SSE4.2 CRC32
-// instruction.
-//go:noescape
-func castagnoliSSE42(crc uint32, p []byte) uint32
-
-// castagnoliSSE42Triple is defined in crc32_amd64.s and uses the SSE4.2 CRC32
-// instruction.
-//go:noescape
-func castagnoliSSE42Triple(
- crcA, crcB, crcC uint32,
- a, b, c []byte,
- rounds uint32,
-) (retA uint32, retB uint32, retC uint32)
-
-// ieeeCLMUL is defined in crc_amd64.s and uses the PCLMULQDQ
-// instruction as well as SSE 4.1.
-//go:noescape
-func ieeeCLMUL(crc uint32, p []byte) uint32
-
-var sse42 = haveSSE42()
-var useFastIEEE = haveCLMUL() && haveSSE41()
-
-const castagnoliK1 = 168
-const castagnoliK2 = 1344
-
-type sse42Table [4]Table
-
-var castagnoliSSE42TableK1 *sse42Table
-var castagnoliSSE42TableK2 *sse42Table
-
-func archAvailableCastagnoli() bool {
- return sse42
-}
-
-func archInitCastagnoli() {
- if !sse42 {
- panic("arch-specific Castagnoli not available")
- }
- castagnoliSSE42TableK1 = new(sse42Table)
- castagnoliSSE42TableK2 = new(sse42Table)
- // See description in updateCastagnoli.
- // t[0][i] = CRC(i000, O)
- // t[1][i] = CRC(0i00, O)
- // t[2][i] = CRC(00i0, O)
- // t[3][i] = CRC(000i, O)
- // where O is a sequence of K zeros.
- var tmp [castagnoliK2]byte
- for b := 0; b < 4; b++ {
- for i := 0; i < 256; i++ {
- val := uint32(i) << uint32(b*8)
- castagnoliSSE42TableK1[b][i] = castagnoliSSE42(val, tmp[:castagnoliK1])
- castagnoliSSE42TableK2[b][i] = castagnoliSSE42(val, tmp[:])
- }
- }
-}
-
-// castagnoliShift computes the CRC32-C of K1 or K2 zeroes (depending on the
-// table given) with the given initial crc value. This corresponds to
-// CRC(crc, O) in the description in updateCastagnoli.
-func castagnoliShift(table *sse42Table, crc uint32) uint32 {
- return table[3][crc>>24] ^
- table[2][(crc>>16)&0xFF] ^
- table[1][(crc>>8)&0xFF] ^
- table[0][crc&0xFF]
-}
-
-func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
- if !sse42 {
- panic("not available")
- }
-
- // This method is inspired from the algorithm in Intel's white paper:
- // "Fast CRC Computation for iSCSI Polynomial Using CRC32 Instruction"
- // The same strategy of splitting the buffer in three is used but the
- // combining calculation is different; the complete derivation is explained
- // below.
- //
- // -- The basic idea --
- //
- // The CRC32 instruction (available in SSE4.2) can process 8 bytes at a
- // time. In recent Intel architectures the instruction takes 3 cycles;
- // however the processor can pipeline up to three instructions if they
- // don't depend on each other.
- //
- // Roughly this means that we can process three buffers in about the same
- // time we can process one buffer.
- //
- // The idea is then to split the buffer in three, CRC the three pieces
- // separately and then combine the results.
- //
- // Combining the results requires precomputed tables, so we must choose a
- // fixed buffer length to optimize. The longer the length, the faster; but
- // only buffers longer than this length will use the optimization. We choose
- // two cutoffs and compute tables for both:
- // - one around 512: 168*3=504
- // - one around 4KB: 1344*3=4032
- //
- // -- The nitty gritty --
- //
- // Let CRC(I, X) be the non-inverted CRC32-C of the sequence X (with
- // initial non-inverted CRC I). This function has the following properties:
- // (a) CRC(I, AB) = CRC(CRC(I, A), B)
- // (b) CRC(I, A xor B) = CRC(I, A) xor CRC(0, B)
- //
- // Say we want to compute CRC(I, ABC) where A, B, C are three sequences of
- // K bytes each, where K is a fixed constant. Let O be the sequence of K zero
- // bytes.
- //
- // CRC(I, ABC) = CRC(I, ABO xor C)
- // = CRC(I, ABO) xor CRC(0, C)
- // = CRC(CRC(I, AB), O) xor CRC(0, C)
- // = CRC(CRC(I, AO xor B), O) xor CRC(0, C)
- // = CRC(CRC(I, AO) xor CRC(0, B), O) xor CRC(0, C)
- // = CRC(CRC(CRC(I, A), O) xor CRC(0, B), O) xor CRC(0, C)
- //
- // The castagnoliSSE42Triple function can compute CRC(I, A), CRC(0, B),
- // and CRC(0, C) efficiently. We just need to find a way to quickly compute
- // CRC(uvwx, O) given a 4-byte initial value uvwx. We can precompute these
- // values; since we can't have a 32-bit table, we break it up into four
- // 8-bit tables:
- //
- // CRC(uvwx, O) = CRC(u000, O) xor
- // CRC(0v00, O) xor
- // CRC(00w0, O) xor
- // CRC(000x, O)
- //
- // We can compute tables corresponding to the four terms for all 8-bit
- // values.
-
- crc = ^crc
-
- // If a buffer is long enough to use the optimization, process the first few
- // bytes to align the buffer to an 8 byte boundary (if necessary).
- if len(p) >= castagnoliK1*3 {
- delta := int(uintptr(unsafe.Pointer(&p[0])) & 7)
- if delta != 0 {
- delta = 8 - delta
- crc = castagnoliSSE42(crc, p[:delta])
- p = p[delta:]
- }
- }
-
- // Process 3*K2 at a time.
- for len(p) >= castagnoliK2*3 {
- // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
- crcA, crcB, crcC := castagnoliSSE42Triple(
- crc, 0, 0,
- p, p[castagnoliK2:], p[castagnoliK2*2:],
- castagnoliK2/24)
-
- // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
- crcAB := castagnoliShift(castagnoliSSE42TableK2, crcA) ^ crcB
- // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
- crc = castagnoliShift(castagnoliSSE42TableK2, crcAB) ^ crcC
- p = p[castagnoliK2*3:]
- }
-
- // Process 3*K1 at a time.
- for len(p) >= castagnoliK1*3 {
- // Compute CRC(I, A), CRC(0, B), and CRC(0, C).
- crcA, crcB, crcC := castagnoliSSE42Triple(
- crc, 0, 0,
- p, p[castagnoliK1:], p[castagnoliK1*2:],
- castagnoliK1/24)
-
- // CRC(I, AB) = CRC(CRC(I, A), O) xor CRC(0, B)
- crcAB := castagnoliShift(castagnoliSSE42TableK1, crcA) ^ crcB
- // CRC(I, ABC) = CRC(CRC(I, AB), O) xor CRC(0, C)
- crc = castagnoliShift(castagnoliSSE42TableK1, crcAB) ^ crcC
- p = p[castagnoliK1*3:]
- }
-
- // Use the simple implementation for what's left.
- crc = castagnoliSSE42(crc, p)
- return ^crc
-}
-
-func archAvailableIEEE() bool {
- return useFastIEEE
-}
-
-var archIeeeTable8 *slicing8Table
-
-func archInitIEEE() {
- if !useFastIEEE {
- panic("not available")
- }
- // We still use slicing-by-8 for small buffers.
- archIeeeTable8 = slicingMakeTable(IEEE)
-}
-
-func archUpdateIEEE(crc uint32, p []byte) uint32 {
- if !useFastIEEE {
- panic("not available")
- }
-
- if len(p) >= 64 {
- left := len(p) & 15
- do := len(p) - left
- crc = ^ieeeCLMUL(^crc, p[:do])
- p = p[do:]
- }
- if len(p) == 0 {
- return crc
- }
- return slicingUpdate(crc, archIeeeTable8, p)
-}
diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64.s b/vendor/github.com/klauspost/crc32/crc32_amd64.s
deleted file mode 100644
index e8a7941ce75..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_amd64.s
+++ /dev/null
@@ -1,319 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build gc
-
-#define NOSPLIT 4
-#define RODATA 8
-
-// castagnoliSSE42 updates the (non-inverted) crc with the given buffer.
-//
-// func castagnoliSSE42(crc uint32, p []byte) uint32
-TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
- MOVL crc+0(FP), AX // CRC value
- MOVQ p+8(FP), SI // data pointer
- MOVQ p_len+16(FP), CX // len(p)
-
- // If there are fewer than 8 bytes to process, skip alignment.
- CMPQ CX, $8
- JL less_than_8
-
- MOVQ SI, BX
- ANDQ $7, BX
- JZ aligned
-
- // Process the first few bytes to 8-byte align the input.
-
- // BX = 8 - BX. We need to process this many bytes to align.
- SUBQ $1, BX
- XORQ $7, BX
-
- BTQ $0, BX
- JNC align_2
-
- CRC32B (SI), AX
- DECQ CX
- INCQ SI
-
-align_2:
- BTQ $1, BX
- JNC align_4
-
- // CRC32W (SI), AX
- BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
-
- SUBQ $2, CX
- ADDQ $2, SI
-
-align_4:
- BTQ $2, BX
- JNC aligned
-
- // CRC32L (SI), AX
- BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
-
- SUBQ $4, CX
- ADDQ $4, SI
-
-aligned:
- // The input is now 8-byte aligned and we can process 8-byte chunks.
- CMPQ CX, $8
- JL less_than_8
-
- CRC32Q (SI), AX
- ADDQ $8, SI
- SUBQ $8, CX
- JMP aligned
-
-less_than_8:
- // We may have some bytes left over; process 4 bytes, then 2, then 1.
- BTQ $2, CX
- JNC less_than_4
-
- // CRC32L (SI), AX
- BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
- ADDQ $4, SI
-
-less_than_4:
- BTQ $1, CX
- JNC less_than_2
-
- // CRC32W (SI), AX
- BYTE $0x66; BYTE $0xf2; BYTE $0x0f; BYTE $0x38; BYTE $0xf1; BYTE $0x06
- ADDQ $2, SI
-
-less_than_2:
- BTQ $0, CX
- JNC done
-
- CRC32B (SI), AX
-
-done:
- MOVL AX, ret+32(FP)
- RET
-
-// castagnoliSSE42Triple updates three (non-inverted) crcs with (24*rounds)
-// bytes from each buffer.
-//
-// func castagnoliSSE42Triple(
-// crc1, crc2, crc3 uint32,
-// a, b, c []byte,
-// rounds uint32,
-// ) (retA uint32, retB uint32, retC uint32)
-TEXT ·castagnoliSSE42Triple(SB), NOSPLIT, $0
- MOVL crcA+0(FP), AX
- MOVL crcB+4(FP), CX
- MOVL crcC+8(FP), DX
-
- MOVQ a+16(FP), R8 // data pointer
- MOVQ b+40(FP), R9 // data pointer
- MOVQ c+64(FP), R10 // data pointer
-
- MOVL rounds+88(FP), R11
-
-loop:
- CRC32Q (R8), AX
- CRC32Q (R9), CX
- CRC32Q (R10), DX
-
- CRC32Q 8(R8), AX
- CRC32Q 8(R9), CX
- CRC32Q 8(R10), DX
-
- CRC32Q 16(R8), AX
- CRC32Q 16(R9), CX
- CRC32Q 16(R10), DX
-
- ADDQ $24, R8
- ADDQ $24, R9
- ADDQ $24, R10
-
- DECQ R11
- JNZ loop
-
- MOVL AX, retA+96(FP)
- MOVL CX, retB+100(FP)
- MOVL DX, retC+104(FP)
- RET
-
-// func haveSSE42() bool
-TEXT ·haveSSE42(SB), NOSPLIT, $0
- XORQ AX, AX
- INCL AX
- CPUID
- SHRQ $20, CX
- ANDQ $1, CX
- MOVB CX, ret+0(FP)
- RET
-
-// func haveCLMUL() bool
-TEXT ·haveCLMUL(SB), NOSPLIT, $0
- XORQ AX, AX
- INCL AX
- CPUID
- SHRQ $1, CX
- ANDQ $1, CX
- MOVB CX, ret+0(FP)
- RET
-
-// func haveSSE41() bool
-TEXT ·haveSSE41(SB), NOSPLIT, $0
- XORQ AX, AX
- INCL AX
- CPUID
- SHRQ $19, CX
- ANDQ $1, CX
- MOVB CX, ret+0(FP)
- RET
-
-// CRC32 polynomial data
-//
-// These constants are lifted from the
-// Linux kernel, since they avoid the costly
-// PSHUFB 16 byte reversal proposed in the
-// original Intel paper.
-DATA r2r1kp<>+0(SB)/8, $0x154442bd4
-DATA r2r1kp<>+8(SB)/8, $0x1c6e41596
-DATA r4r3kp<>+0(SB)/8, $0x1751997d0
-DATA r4r3kp<>+8(SB)/8, $0x0ccaa009e
-DATA rupolykp<>+0(SB)/8, $0x1db710641
-DATA rupolykp<>+8(SB)/8, $0x1f7011641
-DATA r5kp<>+0(SB)/8, $0x163cd6124
-
-GLOBL r2r1kp<>(SB), RODATA, $16
-GLOBL r4r3kp<>(SB), RODATA, $16
-GLOBL rupolykp<>(SB), RODATA, $16
-GLOBL r5kp<>(SB), RODATA, $8
-
-// Based on http://www.intel.com/content/dam/www/public/us/en/documents/white-papers/fast-crc-computation-generic-polynomials-pclmulqdq-paper.pdf
-// len(p) must be at least 64, and must be a multiple of 16.
-
-// func ieeeCLMUL(crc uint32, p []byte) uint32
-TEXT ·ieeeCLMUL(SB), NOSPLIT, $0
- MOVL crc+0(FP), X0 // Initial CRC value
- MOVQ p+8(FP), SI // data pointer
- MOVQ p_len+16(FP), CX // len(p)
-
- MOVOU (SI), X1
- MOVOU 16(SI), X2
- MOVOU 32(SI), X3
- MOVOU 48(SI), X4
- PXOR X0, X1
- ADDQ $64, SI // buf+=64
- SUBQ $64, CX // len-=64
- CMPQ CX, $64 // Less than 64 bytes left
- JB remain64
-
- MOVOA r2r1kp<>+0(SB), X0
-
-loopback64:
- MOVOA X1, X5
- MOVOA X2, X6
- MOVOA X3, X7
- MOVOA X4, X8
-
- PCLMULQDQ $0, X0, X1
- PCLMULQDQ $0, X0, X2
- PCLMULQDQ $0, X0, X3
- PCLMULQDQ $0, X0, X4
-
- // Load next early
- MOVOU (SI), X11
- MOVOU 16(SI), X12
- MOVOU 32(SI), X13
- MOVOU 48(SI), X14
-
- PCLMULQDQ $0x11, X0, X5
- PCLMULQDQ $0x11, X0, X6
- PCLMULQDQ $0x11, X0, X7
- PCLMULQDQ $0x11, X0, X8
-
- PXOR X5, X1
- PXOR X6, X2
- PXOR X7, X3
- PXOR X8, X4
-
- PXOR X11, X1
- PXOR X12, X2
- PXOR X13, X3
- PXOR X14, X4
-
- ADDQ $0x40, DI
- ADDQ $64, SI // buf+=64
- SUBQ $64, CX // len-=64
- CMPQ CX, $64 // Less than 64 bytes left?
- JGE loopback64
-
- // Fold result into a single register (X1)
-remain64:
- MOVOA r4r3kp<>+0(SB), X0
-
- MOVOA X1, X5
- PCLMULQDQ $0, X0, X1
- PCLMULQDQ $0x11, X0, X5
- PXOR X5, X1
- PXOR X2, X1
-
- MOVOA X1, X5
- PCLMULQDQ $0, X0, X1
- PCLMULQDQ $0x11, X0, X5
- PXOR X5, X1
- PXOR X3, X1
-
- MOVOA X1, X5
- PCLMULQDQ $0, X0, X1
- PCLMULQDQ $0x11, X0, X5
- PXOR X5, X1
- PXOR X4, X1
-
- // If there is less than 16 bytes left we are done
- CMPQ CX, $16
- JB finish
-
- // Encode 16 bytes
-remain16:
- MOVOU (SI), X10
- MOVOA X1, X5
- PCLMULQDQ $0, X0, X1
- PCLMULQDQ $0x11, X0, X5
- PXOR X5, X1
- PXOR X10, X1
- SUBQ $16, CX
- ADDQ $16, SI
- CMPQ CX, $16
- JGE remain16
-
-finish:
- // Fold final result into 32 bits and return it
- PCMPEQB X3, X3
- PCLMULQDQ $1, X1, X0
- PSRLDQ $8, X1
- PXOR X0, X1
-
- MOVOA X1, X2
- MOVQ r5kp<>+0(SB), X0
-
- // Creates 32 bit mask. Note that we don't care about upper half.
- PSRLQ $32, X3
-
- PSRLDQ $4, X2
- PAND X3, X1
- PCLMULQDQ $0, X0, X1
- PXOR X2, X1
-
- MOVOA rupolykp<>+0(SB), X0
-
- MOVOA X1, X2
- PAND X3, X1
- PCLMULQDQ $0x10, X0, X1
- PAND X3, X1
- PCLMULQDQ $0, X0, X1
- PXOR X2, X1
-
- // PEXTRD $1, X1, AX (SSE 4.1)
- BYTE $0x66; BYTE $0x0f; BYTE $0x3a
- BYTE $0x16; BYTE $0xc8; BYTE $0x01
- MOVL AX, ret+32(FP)
-
- RET
diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go b/vendor/github.com/klauspost/crc32/crc32_amd64p32.go
deleted file mode 100644
index 3222b06a5a0..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.go
+++ /dev/null
@@ -1,43 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !appengine,!gccgo
-
-package crc32
-
-// This file contains the code to call the SSE 4.2 version of the Castagnoli
-// CRC.
-
-// haveSSE42 is defined in crc32_amd64p32.s and uses CPUID to test for SSE 4.2
-// support.
-func haveSSE42() bool
-
-// castagnoliSSE42 is defined in crc32_amd64p32.s and uses the SSE4.2 CRC32
-// instruction.
-//go:noescape
-func castagnoliSSE42(crc uint32, p []byte) uint32
-
-var sse42 = haveSSE42()
-
-func archAvailableCastagnoli() bool {
- return sse42
-}
-
-func archInitCastagnoli() {
- if !sse42 {
- panic("not available")
- }
- // No initialization necessary.
-}
-
-func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
- if !sse42 {
- panic("not available")
- }
- return castagnoliSSE42(crc, p)
-}
-
-func archAvailableIEEE() bool { return false }
-func archInitIEEE() { panic("not available") }
-func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
diff --git a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s b/vendor/github.com/klauspost/crc32/crc32_amd64p32.s
deleted file mode 100644
index a578d685cc5..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_amd64p32.s
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build gc
-
-#define NOSPLIT 4
-#define RODATA 8
-
-// func castagnoliSSE42(crc uint32, p []byte) uint32
-TEXT ·castagnoliSSE42(SB), NOSPLIT, $0
- MOVL crc+0(FP), AX // CRC value
- MOVL p+4(FP), SI // data pointer
- MOVL p_len+8(FP), CX // len(p)
-
- NOTL AX
-
- // If there's less than 8 bytes to process, we do it byte-by-byte.
- CMPQ CX, $8
- JL cleanup
-
- // Process individual bytes until the input is 8-byte aligned.
-startup:
- MOVQ SI, BX
- ANDQ $7, BX
- JZ aligned
-
- CRC32B (SI), AX
- DECQ CX
- INCQ SI
- JMP startup
-
-aligned:
- // The input is now 8-byte aligned and we can process 8-byte chunks.
- CMPQ CX, $8
- JL cleanup
-
- CRC32Q (SI), AX
- ADDQ $8, SI
- SUBQ $8, CX
- JMP aligned
-
-cleanup:
- // We may have some bytes left over that we process one at a time.
- CMPQ CX, $0
- JE done
-
- CRC32B (SI), AX
- INCQ SI
- DECQ CX
- JMP cleanup
-
-done:
- NOTL AX
- MOVL AX, ret+16(FP)
- RET
-
-// func haveSSE42() bool
-TEXT ·haveSSE42(SB), NOSPLIT, $0
- XORQ AX, AX
- INCL AX
- CPUID
- SHRQ $20, CX
- ANDQ $1, CX
- MOVB CX, ret+0(FP)
- RET
-
diff --git a/vendor/github.com/klauspost/crc32/crc32_generic.go b/vendor/github.com/klauspost/crc32/crc32_generic.go
deleted file mode 100644
index abacbb663d4..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_generic.go
+++ /dev/null
@@ -1,89 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// This file contains CRC32 algorithms that are not specific to any architecture
-// and don't use hardware acceleration.
-//
-// The simple (and slow) CRC32 implementation only uses a 256*4 bytes table.
-//
-// The slicing-by-8 algorithm is a faster implementation that uses a bigger
-// table (8*256*4 bytes).
-
-package crc32
-
-// simpleMakeTable allocates and constructs a Table for the specified
-// polynomial. The table is suitable for use with the simple algorithm
-// (simpleUpdate).
-func simpleMakeTable(poly uint32) *Table {
- t := new(Table)
- simplePopulateTable(poly, t)
- return t
-}
-
-// simplePopulateTable constructs a Table for the specified polynomial, suitable
-// for use with simpleUpdate.
-func simplePopulateTable(poly uint32, t *Table) {
- for i := 0; i < 256; i++ {
- crc := uint32(i)
- for j := 0; j < 8; j++ {
- if crc&1 == 1 {
- crc = (crc >> 1) ^ poly
- } else {
- crc >>= 1
- }
- }
- t[i] = crc
- }
-}
-
-// simpleUpdate uses the simple algorithm to update the CRC, given a table that
-// was previously computed using simpleMakeTable.
-func simpleUpdate(crc uint32, tab *Table, p []byte) uint32 {
- crc = ^crc
- for _, v := range p {
- crc = tab[byte(crc)^v] ^ (crc >> 8)
- }
- return ^crc
-}
-
-// Use slicing-by-8 when payload >= this value.
-const slicing8Cutoff = 16
-
-// slicing8Table is array of 8 Tables, used by the slicing-by-8 algorithm.
-type slicing8Table [8]Table
-
-// slicingMakeTable constructs a slicing8Table for the specified polynomial. The
-// table is suitable for use with the slicing-by-8 algorithm (slicingUpdate).
-func slicingMakeTable(poly uint32) *slicing8Table {
- t := new(slicing8Table)
- simplePopulateTable(poly, &t[0])
- for i := 0; i < 256; i++ {
- crc := t[0][i]
- for j := 1; j < 8; j++ {
- crc = t[0][crc&0xFF] ^ (crc >> 8)
- t[j][i] = crc
- }
- }
- return t
-}
-
-// slicingUpdate uses the slicing-by-8 algorithm to update the CRC, given a
-// table that was previously computed using slicingMakeTable.
-func slicingUpdate(crc uint32, tab *slicing8Table, p []byte) uint32 {
- if len(p) >= slicing8Cutoff {
- crc = ^crc
- for len(p) > 8 {
- crc ^= uint32(p[0]) | uint32(p[1])<<8 | uint32(p[2])<<16 | uint32(p[3])<<24
- crc = tab[0][p[7]] ^ tab[1][p[6]] ^ tab[2][p[5]] ^ tab[3][p[4]] ^
- tab[4][crc>>24] ^ tab[5][(crc>>16)&0xFF] ^
- tab[6][(crc>>8)&0xFF] ^ tab[7][crc&0xFF]
- p = p[8:]
- }
- crc = ^crc
- }
- if len(p) == 0 {
- return crc
- }
- return simpleUpdate(crc, &tab[0], p)
-}
diff --git a/vendor/github.com/klauspost/crc32/crc32_otherarch.go b/vendor/github.com/klauspost/crc32/crc32_otherarch.go
deleted file mode 100644
index cc960764bce..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_otherarch.go
+++ /dev/null
@@ -1,15 +0,0 @@
-// Copyright 2011 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build !amd64,!amd64p32,!s390x
-
-package crc32
-
-func archAvailableIEEE() bool { return false }
-func archInitIEEE() { panic("not available") }
-func archUpdateIEEE(crc uint32, p []byte) uint32 { panic("not available") }
-
-func archAvailableCastagnoli() bool { return false }
-func archInitCastagnoli() { panic("not available") }
-func archUpdateCastagnoli(crc uint32, p []byte) uint32 { panic("not available") }
diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.go b/vendor/github.com/klauspost/crc32/crc32_s390x.go
deleted file mode 100644
index ce96f032819..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_s390x.go
+++ /dev/null
@@ -1,91 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build s390x
-
-package crc32
-
-const (
- vxMinLen = 64
- vxAlignMask = 15 // align to 16 bytes
-)
-
-// hasVectorFacility reports whether the machine has the z/Architecture
-// vector facility installed and enabled.
-func hasVectorFacility() bool
-
-var hasVX = hasVectorFacility()
-
-// vectorizedCastagnoli implements CRC32 using vector instructions.
-// It is defined in crc32_s390x.s.
-//go:noescape
-func vectorizedCastagnoli(crc uint32, p []byte) uint32
-
-// vectorizedIEEE implements CRC32 using vector instructions.
-// It is defined in crc32_s390x.s.
-//go:noescape
-func vectorizedIEEE(crc uint32, p []byte) uint32
-
-func archAvailableCastagnoli() bool {
- return hasVX
-}
-
-var archCastagnoliTable8 *slicing8Table
-
-func archInitCastagnoli() {
- if !hasVX {
- panic("not available")
- }
- // We still use slicing-by-8 for small buffers.
- archCastagnoliTable8 = slicingMakeTable(Castagnoli)
-}
-
-// archUpdateCastagnoli calculates the checksum of p using
-// vectorizedCastagnoli.
-func archUpdateCastagnoli(crc uint32, p []byte) uint32 {
- if !hasVX {
- panic("not available")
- }
- // Use vectorized function if data length is above threshold.
- if len(p) >= vxMinLen {
- aligned := len(p) & ^vxAlignMask
- crc = vectorizedCastagnoli(crc, p[:aligned])
- p = p[aligned:]
- }
- if len(p) == 0 {
- return crc
- }
- return slicingUpdate(crc, archCastagnoliTable8, p)
-}
-
-func archAvailableIEEE() bool {
- return hasVX
-}
-
-var archIeeeTable8 *slicing8Table
-
-func archInitIEEE() {
- if !hasVX {
- panic("not available")
- }
- // We still use slicing-by-8 for small buffers.
- archIeeeTable8 = slicingMakeTable(IEEE)
-}
-
-// archUpdateIEEE calculates the checksum of p using vectorizedIEEE.
-func archUpdateIEEE(crc uint32, p []byte) uint32 {
- if !hasVX {
- panic("not available")
- }
- // Use vectorized function if data length is above threshold.
- if len(p) >= vxMinLen {
- aligned := len(p) & ^vxAlignMask
- crc = vectorizedIEEE(crc, p[:aligned])
- p = p[aligned:]
- }
- if len(p) == 0 {
- return crc
- }
- return slicingUpdate(crc, archIeeeTable8, p)
-}
diff --git a/vendor/github.com/klauspost/crc32/crc32_s390x.s b/vendor/github.com/klauspost/crc32/crc32_s390x.s
deleted file mode 100644
index e980ca29d6d..00000000000
--- a/vendor/github.com/klauspost/crc32/crc32_s390x.s
+++ /dev/null
@@ -1,249 +0,0 @@
-// Copyright 2016 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// +build s390x
-
-#include "textflag.h"
-
-// Vector register range containing CRC-32 constants
-
-#define CONST_PERM_LE2BE V9
-#define CONST_R2R1 V10
-#define CONST_R4R3 V11
-#define CONST_R5 V12
-#define CONST_RU_POLY V13
-#define CONST_CRC_POLY V14
-
-// The CRC-32 constant block contains reduction constants to fold and
-// process particular chunks of the input data stream in parallel.
-//
-// Note that the constant definitions below are extended in order to compute
-// intermediate results with a single VECTOR GALOIS FIELD MULTIPLY instruction.
-// The rightmost doubleword can be 0 to prevent contribution to the result or
-// can be multiplied by 1 to perform an XOR without the need for a separate
-// VECTOR EXCLUSIVE OR instruction.
-//
-// The polynomials used are bit-reflected:
-//
-// IEEE: P'(x) = 0x0edb88320
-// Castagnoli: P'(x) = 0x082f63b78
-
-// IEEE polynomial constants
-DATA ·crcleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
-DATA ·crcleconskp+8(SB)/8, $0x0706050403020100
-DATA ·crcleconskp+16(SB)/8, $0x00000001c6e41596 // R2
-DATA ·crcleconskp+24(SB)/8, $0x0000000154442bd4 // R1
-DATA ·crcleconskp+32(SB)/8, $0x00000000ccaa009e // R4
-DATA ·crcleconskp+40(SB)/8, $0x00000001751997d0 // R3
-DATA ·crcleconskp+48(SB)/8, $0x0000000000000000
-DATA ·crcleconskp+56(SB)/8, $0x0000000163cd6124 // R5
-DATA ·crcleconskp+64(SB)/8, $0x0000000000000000
-DATA ·crcleconskp+72(SB)/8, $0x00000001F7011641 // u'
-DATA ·crcleconskp+80(SB)/8, $0x0000000000000000
-DATA ·crcleconskp+88(SB)/8, $0x00000001DB710641 // P'(x) << 1
-
-GLOBL ·crcleconskp(SB), RODATA, $144
-
-// Castagonli Polynomial constants
-DATA ·crccleconskp+0(SB)/8, $0x0F0E0D0C0B0A0908 // LE-to-BE mask
-DATA ·crccleconskp+8(SB)/8, $0x0706050403020100
-DATA ·crccleconskp+16(SB)/8, $0x000000009e4addf8 // R2
-DATA ·crccleconskp+24(SB)/8, $0x00000000740eef02 // R1
-DATA ·crccleconskp+32(SB)/8, $0x000000014cd00bd6 // R4
-DATA ·crccleconskp+40(SB)/8, $0x00000000f20c0dfe // R3
-DATA ·crccleconskp+48(SB)/8, $0x0000000000000000
-DATA ·crccleconskp+56(SB)/8, $0x00000000dd45aab8 // R5
-DATA ·crccleconskp+64(SB)/8, $0x0000000000000000
-DATA ·crccleconskp+72(SB)/8, $0x00000000dea713f1 // u'
-DATA ·crccleconskp+80(SB)/8, $0x0000000000000000
-DATA ·crccleconskp+88(SB)/8, $0x0000000105ec76f0 // P'(x) << 1
-
-GLOBL ·crccleconskp(SB), RODATA, $144
-
-// func hasVectorFacility() bool
-TEXT ·hasVectorFacility(SB), NOSPLIT, $24-1
- MOVD $x-24(SP), R1
- XC $24, 0(R1), 0(R1) // clear the storage
- MOVD $2, R0 // R0 is the number of double words stored -1
- WORD $0xB2B01000 // STFLE 0(R1)
- XOR R0, R0 // reset the value of R0
- MOVBZ z-8(SP), R1
- AND $0x40, R1
- BEQ novector
-
-vectorinstalled:
- // check if the vector instruction has been enabled
- VLEIB $0, $0xF, V16
- VLGVB $0, V16, R1
- CMPBNE R1, $0xF, novector
- MOVB $1, ret+0(FP) // have vx
- RET
-
-novector:
- MOVB $0, ret+0(FP) // no vx
- RET
-
-// The CRC-32 function(s) use these calling conventions:
-//
-// Parameters:
-//
-// R2: Initial CRC value, typically ~0; and final CRC (return) value.
-// R3: Input buffer pointer, performance might be improved if the
-// buffer is on a doubleword boundary.
-// R4: Length of the buffer, must be 64 bytes or greater.
-//
-// Register usage:
-//
-// R5: CRC-32 constant pool base pointer.
-// V0: Initial CRC value and intermediate constants and results.
-// V1..V4: Data for CRC computation.
-// V5..V8: Next data chunks that are fetched from the input buffer.
-//
-// V9..V14: CRC-32 constants.
-
-// func vectorizedIEEE(crc uint32, p []byte) uint32
-TEXT ·vectorizedIEEE(SB), NOSPLIT, $0
- MOVWZ crc+0(FP), R2 // R2 stores the CRC value
- MOVD p+8(FP), R3 // data pointer
- MOVD p_len+16(FP), R4 // len(p)
-
- MOVD $·crcleconskp(SB), R5
- BR vectorizedBody<>(SB)
-
-// func vectorizedCastagnoli(crc uint32, p []byte) uint32
-TEXT ·vectorizedCastagnoli(SB), NOSPLIT, $0
- MOVWZ crc+0(FP), R2 // R2 stores the CRC value
- MOVD p+8(FP), R3 // data pointer
- MOVD p_len+16(FP), R4 // len(p)
-
- // R5: crc-32 constant pool base pointer, constant is used to reduce crc
- MOVD $·crccleconskp(SB), R5
- BR vectorizedBody<>(SB)
-
-TEXT vectorizedBody<>(SB), NOSPLIT, $0
- XOR $0xffffffff, R2 // NOTW R2
- VLM 0(R5), CONST_PERM_LE2BE, CONST_CRC_POLY
-
- // Load the initial CRC value into the rightmost word of V0
- VZERO V0
- VLVGF $3, R2, V0
-
- // Crash if the input size is less than 64-bytes.
- CMP R4, $64
- BLT crash
-
- // Load a 64-byte data chunk and XOR with CRC
- VLM 0(R3), V1, V4 // 64-bytes into V1..V4
-
- // Reflect the data if the CRC operation is in the bit-reflected domain
- VPERM V1, V1, CONST_PERM_LE2BE, V1
- VPERM V2, V2, CONST_PERM_LE2BE, V2
- VPERM V3, V3, CONST_PERM_LE2BE, V3
- VPERM V4, V4, CONST_PERM_LE2BE, V4
-
- VX V0, V1, V1 // V1 ^= CRC
- ADD $64, R3 // BUF = BUF + 64
- ADD $(-64), R4
-
- // Check remaining buffer size and jump to proper folding method
- CMP R4, $64
- BLT less_than_64bytes
-
-fold_64bytes_loop:
- // Load the next 64-byte data chunk into V5 to V8
- VLM 0(R3), V5, V8
- VPERM V5, V5, CONST_PERM_LE2BE, V5
- VPERM V6, V6, CONST_PERM_LE2BE, V6
- VPERM V7, V7, CONST_PERM_LE2BE, V7
- VPERM V8, V8, CONST_PERM_LE2BE, V8
-
- // Perform a GF(2) multiplication of the doublewords in V1 with
- // the reduction constants in V0. The intermediate result is
- // then folded (accumulated) with the next data chunk in V5 and
- // stored in V1. Repeat this step for the register contents
- // in V2, V3, and V4 respectively.
-
- VGFMAG CONST_R2R1, V1, V5, V1
- VGFMAG CONST_R2R1, V2, V6, V2
- VGFMAG CONST_R2R1, V3, V7, V3
- VGFMAG CONST_R2R1, V4, V8, V4
-
- // Adjust buffer pointer and length for next loop
- ADD $64, R3 // BUF = BUF + 64
- ADD $(-64), R4 // LEN = LEN - 64
-
- CMP R4, $64
- BGE fold_64bytes_loop
-
-less_than_64bytes:
- // Fold V1 to V4 into a single 128-bit value in V1
- VGFMAG CONST_R4R3, V1, V2, V1
- VGFMAG CONST_R4R3, V1, V3, V1
- VGFMAG CONST_R4R3, V1, V4, V1
-
- // Check whether to continue with 64-bit folding
- CMP R4, $16
- BLT final_fold
-
-fold_16bytes_loop:
- VL 0(R3), V2 // Load next data chunk
- VPERM V2, V2, CONST_PERM_LE2BE, V2
-
- VGFMAG CONST_R4R3, V1, V2, V1 // Fold next data chunk
-
- // Adjust buffer pointer and size for folding next data chunk
- ADD $16, R3
- ADD $-16, R4
-
- // Process remaining data chunks
- CMP R4, $16
- BGE fold_16bytes_loop
-
-final_fold:
- VLEIB $7, $0x40, V9
- VSRLB V9, CONST_R4R3, V0
- VLEIG $0, $1, V0
-
- VGFMG V0, V1, V1
-
- VLEIB $7, $0x20, V9 // Shift by words
- VSRLB V9, V1, V2 // Store remaining bits in V2
- VUPLLF V1, V1 // Split rightmost doubleword
- VGFMAG CONST_R5, V1, V2, V1 // V1 = (V1 * R5) XOR V2
-
- // The input values to the Barret reduction are the degree-63 polynomial
- // in V1 (R(x)), degree-32 generator polynomial, and the reduction
- // constant u. The Barret reduction result is the CRC value of R(x) mod
- // P(x).
- //
- // The Barret reduction algorithm is defined as:
- //
- // 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
- // 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
- // 3. C(x) = R(x) XOR T2(x) mod x^32
- //
- // Note: To compensate the division by x^32, use the vector unpack
- // instruction to move the leftmost word into the leftmost doubleword
- // of the vector register. The rightmost doubleword is multiplied
- // with zero to not contribute to the intermedate results.
-
- // T1(x) = floor( R(x) / x^32 ) GF2MUL u
- VUPLLF V1, V2
- VGFMG CONST_RU_POLY, V2, V2
-
- // Compute the GF(2) product of the CRC polynomial in VO with T1(x) in
- // V2 and XOR the intermediate result, T2(x), with the value in V1.
- // The final result is in the rightmost word of V2.
-
- VUPLLF V2, V2
- VGFMAG CONST_CRC_POLY, V2, V1, V2
-
-done:
- VLGVF $2, V2, R2
- XOR $0xffffffff, R2 // NOTW R2
- MOVWZ R2, ret + 32(FP)
- RET
-
-crash:
- MOVD $0, (R0) // input size is less than 64-bytes
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore b/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore
deleted file mode 100644
index 37721f69f4e..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/.gitignore
+++ /dev/null
@@ -1,4 +0,0 @@
-build/*
-.idea/
-.project
-examples/**/build
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml b/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml
deleted file mode 100644
index 4f8b33ca5da..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/.travis.yml
+++ /dev/null
@@ -1,12 +0,0 @@
-language: go
-
-go:
- - 1.6
- - 1.7
- - tip
-
-install:
- - go get -d -t ./...
- - go get -u github.com/golang/lint/...
-script:
- - make test vet lint bench
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE b/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE
deleted file mode 100644
index 66fff971dea..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/LICENSE
+++ /dev/null
@@ -1,22 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2016 The OpenTracing Authors
-Copyright (c) 2016 Bas van Beek
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile b/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile
deleted file mode 100644
index 3ca00422324..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/Makefile
+++ /dev/null
@@ -1,23 +0,0 @@
-
-.DEFAULT_GOAL := test
-
-.PHONY: test
-test:
- go test -v -race -cover ./...
-
-.PHONY: bench
-bench:
- go test -v -run - -bench . -benchmem ./...
-
-.PHONY: lint
-lint:
- # Ignore grep's exit code since no match returns 1.
- -golint ./... | grep --invert-match -E '^.*\.pb\.go'
- @
- @! (golint ./... |grep --invert-match -E '^.*\.pb\.go' | read dummy)
-
-.PHONY: vet
-vet:
- go vet ./...
-
-.PHONY: example
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md b/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md
deleted file mode 100644
index 220e32b9e0c..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/README.md
+++ /dev/null
@@ -1,29 +0,0 @@
-# zipkin-go-opentracing
-
-[](https://travis-ci.org/openzipkin/zipkin-go-opentracing)
-[](https://circleci.com/gh/openzipkin/zipkin-go-opentracing)
-[](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing)
-[](https://goreportcard.com/report/github.com/openzipkin/zipkin-go-opentracing)
-
-
-[OpenTracing](http://opentracing.io) Tracer implementation for [Zipkin](http://zipkin.io) in Go.
-
-### Notes
-
-This package is a low level tracing "driver" to allow OpenTracing API consumers
-to use Zipkin as their tracing backend. For details on how to work with spans
-and traces we suggest looking at the documentation and README from the
-[OpenTracing API](https://github.com/opentracing/opentracing-go).
-
-For developers interested in adding Zipkin tracing to their Go services we
-suggest looking at [Go kit](https://gokit.io) which is an excellent toolkit to
-instrument your distributed system with Zipkin and much more with clean
-separation of domains like transport, middleware / instrumentation and
-business logic.
-
-### Examples
-
-For more information on zipkin-go-opentracing, please see the
-[examples](https://github.com/openzipkin/zipkin-go-opentracing/tree/master/examples)
-directory for usage examples as well as documentation at
-[go doc](https://godoc.org/github.com/openzipkin/zipkin-go-opentracing).
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/constants.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/constants.go
deleted file mode 100644
index 45fee5a9c7c..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/constants.go
+++ /dev/null
@@ -1,18 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package scribe
-
-import (
- "bytes"
- "fmt"
- "github.com/apache/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-func init() {
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/scribe.go
deleted file mode 100644
index c0de88e436f..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/scribe.go
+++ /dev/null
@@ -1,431 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package scribe
-
-import (
- "bytes"
- "fmt"
- "github.com/apache/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-type Scribe interface {
- // Parameters:
- // - Messages
- Log(messages []*LogEntry) (r ResultCode, err error)
-}
-
-type ScribeClient struct {
- Transport thrift.TTransport
- ProtocolFactory thrift.TProtocolFactory
- InputProtocol thrift.TProtocol
- OutputProtocol thrift.TProtocol
- SeqId int32
-}
-
-func NewScribeClientFactory(t thrift.TTransport, f thrift.TProtocolFactory) *ScribeClient {
- return &ScribeClient{Transport: t,
- ProtocolFactory: f,
- InputProtocol: f.GetProtocol(t),
- OutputProtocol: f.GetProtocol(t),
- SeqId: 0,
- }
-}
-
-func NewScribeClientProtocol(t thrift.TTransport, iprot thrift.TProtocol, oprot thrift.TProtocol) *ScribeClient {
- return &ScribeClient{Transport: t,
- ProtocolFactory: nil,
- InputProtocol: iprot,
- OutputProtocol: oprot,
- SeqId: 0,
- }
-}
-
-// Parameters:
-// - Messages
-func (p *ScribeClient) Log(messages []*LogEntry) (r ResultCode, err error) {
- if err = p.sendLog(messages); err != nil {
- return
- }
- return p.recvLog()
-}
-
-func (p *ScribeClient) sendLog(messages []*LogEntry) (err error) {
- oprot := p.OutputProtocol
- if oprot == nil {
- oprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.OutputProtocol = oprot
- }
- p.SeqId++
- if err = oprot.WriteMessageBegin("Log", thrift.CALL, p.SeqId); err != nil {
- return
- }
- args := ScribeLogArgs{
- Messages: messages,
- }
- if err = args.Write(oprot); err != nil {
- return
- }
- if err = oprot.WriteMessageEnd(); err != nil {
- return
- }
- return oprot.Flush()
-}
-
-func (p *ScribeClient) recvLog() (value ResultCode, err error) {
- iprot := p.InputProtocol
- if iprot == nil {
- iprot = p.ProtocolFactory.GetProtocol(p.Transport)
- p.InputProtocol = iprot
- }
- method, mTypeId, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return
- }
- if method != "Log" {
- err = thrift.NewTApplicationException(thrift.WRONG_METHOD_NAME, "Log failed: wrong method name")
- return
- }
- if p.SeqId != seqId {
- err = thrift.NewTApplicationException(thrift.BAD_SEQUENCE_ID, "Log failed: out of sequence response")
- return
- }
- if mTypeId == thrift.EXCEPTION {
- error0 := thrift.NewTApplicationException(thrift.UNKNOWN_APPLICATION_EXCEPTION, "Unknown Exception")
- var error1 error
- error1, err = error0.Read(iprot)
- if err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- err = error1
- return
- }
- if mTypeId != thrift.REPLY {
- err = thrift.NewTApplicationException(thrift.INVALID_MESSAGE_TYPE_EXCEPTION, "Log failed: invalid message type")
- return
- }
- result := ScribeLogResult{}
- if err = result.Read(iprot); err != nil {
- return
- }
- if err = iprot.ReadMessageEnd(); err != nil {
- return
- }
- value = result.GetSuccess()
- return
-}
-
-type ScribeProcessor struct {
- processorMap map[string]thrift.TProcessorFunction
- handler Scribe
-}
-
-func (p *ScribeProcessor) AddToProcessorMap(key string, processor thrift.TProcessorFunction) {
- p.processorMap[key] = processor
-}
-
-func (p *ScribeProcessor) GetProcessorFunction(key string) (processor thrift.TProcessorFunction, ok bool) {
- processor, ok = p.processorMap[key]
- return processor, ok
-}
-
-func (p *ScribeProcessor) ProcessorMap() map[string]thrift.TProcessorFunction {
- return p.processorMap
-}
-
-func NewScribeProcessor(handler Scribe) *ScribeProcessor {
-
- self2 := &ScribeProcessor{handler: handler, processorMap: make(map[string]thrift.TProcessorFunction)}
- self2.processorMap["Log"] = &scribeProcessorLog{handler: handler}
- return self2
-}
-
-func (p *ScribeProcessor) Process(iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- name, _, seqId, err := iprot.ReadMessageBegin()
- if err != nil {
- return false, err
- }
- if processor, ok := p.GetProcessorFunction(name); ok {
- return processor.Process(seqId, iprot, oprot)
- }
- iprot.Skip(thrift.STRUCT)
- iprot.ReadMessageEnd()
- x3 := thrift.NewTApplicationException(thrift.UNKNOWN_METHOD, "Unknown function "+name)
- oprot.WriteMessageBegin(name, thrift.EXCEPTION, seqId)
- x3.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, x3
-
-}
-
-type scribeProcessorLog struct {
- handler Scribe
-}
-
-func (p *scribeProcessorLog) Process(seqId int32, iprot, oprot thrift.TProtocol) (success bool, err thrift.TException) {
- args := ScribeLogArgs{}
- if err = args.Read(iprot); err != nil {
- iprot.ReadMessageEnd()
- x := thrift.NewTApplicationException(thrift.PROTOCOL_ERROR, err.Error())
- oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return false, err
- }
-
- iprot.ReadMessageEnd()
- result := ScribeLogResult{}
- var retval ResultCode
- var err2 error
- if retval, err2 = p.handler.Log(args.Messages); err2 != nil {
- x := thrift.NewTApplicationException(thrift.INTERNAL_ERROR, "Internal error processing Log: "+err2.Error())
- oprot.WriteMessageBegin("Log", thrift.EXCEPTION, seqId)
- x.Write(oprot)
- oprot.WriteMessageEnd()
- oprot.Flush()
- return true, err2
- } else {
- result.Success = &retval
- }
- if err2 = oprot.WriteMessageBegin("Log", thrift.REPLY, seqId); err2 != nil {
- err = err2
- }
- if err2 = result.Write(oprot); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.WriteMessageEnd(); err == nil && err2 != nil {
- err = err2
- }
- if err2 = oprot.Flush(); err == nil && err2 != nil {
- err = err2
- }
- if err != nil {
- return
- }
- return true, err
-}
-
-// HELPER FUNCTIONS AND STRUCTURES
-
-// Attributes:
-// - Messages
-type ScribeLogArgs struct {
- Messages []*LogEntry `thrift:"messages,1" json:"messages"`
-}
-
-func NewScribeLogArgs() *ScribeLogArgs {
- return &ScribeLogArgs{}
-}
-
-func (p *ScribeLogArgs) GetMessages() []*LogEntry {
- return p.Messages
-}
-func (p *ScribeLogArgs) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ScribeLogArgs) readField1(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*LogEntry, 0, size)
- p.Messages = tSlice
- for i := 0; i < size; i++ {
- _elem4 := &LogEntry{}
- if err := _elem4.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem4), err)
- }
- p.Messages = append(p.Messages, _elem4)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *ScribeLogArgs) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Log_args"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ScribeLogArgs) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("messages", thrift.LIST, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:messages: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Messages)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Messages {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:messages: ", p), err)
- }
- return err
-}
-
-func (p *ScribeLogArgs) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ScribeLogArgs(%+v)", *p)
-}
-
-// Attributes:
-// - Success
-type ScribeLogResult struct {
- Success *ResultCode `thrift:"success,0" json:"success,omitempty"`
-}
-
-func NewScribeLogResult() *ScribeLogResult {
- return &ScribeLogResult{}
-}
-
-var ScribeLogResult_Success_DEFAULT ResultCode
-
-func (p *ScribeLogResult) GetSuccess() ResultCode {
- if !p.IsSetSuccess() {
- return ScribeLogResult_Success_DEFAULT
- }
- return *p.Success
-}
-func (p *ScribeLogResult) IsSetSuccess() bool {
- return p.Success != nil
-}
-
-func (p *ScribeLogResult) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 0:
- if err := p.readField0(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *ScribeLogResult) readField0(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 0: ", err)
- } else {
- temp := ResultCode(v)
- p.Success = &temp
- }
- return nil
-}
-
-func (p *ScribeLogResult) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Log_result"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField0(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *ScribeLogResult) writeField0(oprot thrift.TProtocol) (err error) {
- if p.IsSetSuccess() {
- if err := oprot.WriteFieldBegin("success", thrift.I32, 0); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 0:success: ", p), err)
- }
- if err := oprot.WriteI32(int32(*p.Success)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.success (0) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 0:success: ", p), err)
- }
- }
- return err
-}
-
-func (p *ScribeLogResult) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("ScribeLogResult(%+v)", *p)
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/ttypes.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/ttypes.go
deleted file mode 100644
index 1e8bed6d359..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe/ttypes.go
+++ /dev/null
@@ -1,185 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package scribe
-
-import (
- "bytes"
- "fmt"
- "github.com/apache/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-type ResultCode int64
-
-const (
- ResultCode_OK ResultCode = 0
- ResultCode_TRY_LATER ResultCode = 1
-)
-
-func (p ResultCode) String() string {
- switch p {
- case ResultCode_OK:
- return "OK"
- case ResultCode_TRY_LATER:
- return "TRY_LATER"
- }
- return ""
-}
-
-func ResultCodeFromString(s string) (ResultCode, error) {
- switch s {
- case "OK":
- return ResultCode_OK, nil
- case "TRY_LATER":
- return ResultCode_TRY_LATER, nil
- }
- return ResultCode(0), fmt.Errorf("not a valid ResultCode string")
-}
-
-func ResultCodePtr(v ResultCode) *ResultCode { return &v }
-
-func (p ResultCode) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *ResultCode) UnmarshalText(text []byte) error {
- q, err := ResultCodeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-// Attributes:
-// - Category
-// - Message
-type LogEntry struct {
- Category string `thrift:"category,1" json:"category"`
- Message string `thrift:"message,2" json:"message"`
-}
-
-func NewLogEntry() *LogEntry {
- return &LogEntry{}
-}
-
-func (p *LogEntry) GetCategory() string {
- return p.Category
-}
-
-func (p *LogEntry) GetMessage() string {
- return p.Message
-}
-func (p *LogEntry) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *LogEntry) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Category = v
- }
- return nil
-}
-
-func (p *LogEntry) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Message = v
- }
- return nil
-}
-
-func (p *LogEntry) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("LogEntry"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *LogEntry) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("category", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:category: ", p), err)
- }
- if err := oprot.WriteString(string(p.Category)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.category (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:category: ", p), err)
- }
- return err
-}
-
-func (p *LogEntry) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("message", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:message: ", p), err)
- }
- if err := oprot.WriteString(string(p.Message)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.message (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:message: ", p), err)
- }
- return err
-}
-
-func (p *LogEntry) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("LogEntry(%+v)", *p)
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/constants.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/constants.go
deleted file mode 100644
index 1ee50641719..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/constants.go
+++ /dev/null
@@ -1,40 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
- "bytes"
- "fmt"
- "github.com/apache/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-const CLIENT_SEND = "cs"
-const CLIENT_RECV = "cr"
-const SERVER_SEND = "ss"
-const SERVER_RECV = "sr"
-const WIRE_SEND = "ws"
-const WIRE_RECV = "wr"
-const CLIENT_SEND_FRAGMENT = "csf"
-const CLIENT_RECV_FRAGMENT = "crf"
-const SERVER_SEND_FRAGMENT = "ssf"
-const SERVER_RECV_FRAGMENT = "srf"
-const HTTP_HOST = "http.host"
-const HTTP_METHOD = "http.method"
-const HTTP_PATH = "http.path"
-const HTTP_URL = "http.url"
-const HTTP_STATUS_CODE = "http.status_code"
-const HTTP_REQUEST_SIZE = "http.request.size"
-const HTTP_RESPONSE_SIZE = "http.response.size"
-const LOCAL_COMPONENT = "lc"
-const ERROR = "error"
-const CLIENT_ADDR = "ca"
-const SERVER_ADDR = "sa"
-
-func init() {
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/ttypes.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/ttypes.go
deleted file mode 100644
index 2852d368cc7..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore/ttypes.go
+++ /dev/null
@@ -1,1272 +0,0 @@
-// Autogenerated by Thrift Compiler (0.9.3)
-// DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
-
-package zipkincore
-
-import (
- "bytes"
- "fmt"
- "github.com/apache/thrift/lib/go/thrift"
-)
-
-// (needed to ensure safety because of naive import list construction.)
-var _ = thrift.ZERO
-var _ = fmt.Printf
-var _ = bytes.Equal
-
-var GoUnusedProtection__ int
-
-//A subset of thrift base types, except BYTES.
-type AnnotationType int64
-
-const (
- AnnotationType_BOOL AnnotationType = 0
- AnnotationType_BYTES AnnotationType = 1
- AnnotationType_I16 AnnotationType = 2
- AnnotationType_I32 AnnotationType = 3
- AnnotationType_I64 AnnotationType = 4
- AnnotationType_DOUBLE AnnotationType = 5
- AnnotationType_STRING AnnotationType = 6
-)
-
-func (p AnnotationType) String() string {
- switch p {
- case AnnotationType_BOOL:
- return "BOOL"
- case AnnotationType_BYTES:
- return "BYTES"
- case AnnotationType_I16:
- return "I16"
- case AnnotationType_I32:
- return "I32"
- case AnnotationType_I64:
- return "I64"
- case AnnotationType_DOUBLE:
- return "DOUBLE"
- case AnnotationType_STRING:
- return "STRING"
- }
- return ""
-}
-
-func AnnotationTypeFromString(s string) (AnnotationType, error) {
- switch s {
- case "BOOL":
- return AnnotationType_BOOL, nil
- case "BYTES":
- return AnnotationType_BYTES, nil
- case "I16":
- return AnnotationType_I16, nil
- case "I32":
- return AnnotationType_I32, nil
- case "I64":
- return AnnotationType_I64, nil
- case "DOUBLE":
- return AnnotationType_DOUBLE, nil
- case "STRING":
- return AnnotationType_STRING, nil
- }
- return AnnotationType(0), fmt.Errorf("not a valid AnnotationType string")
-}
-
-func AnnotationTypePtr(v AnnotationType) *AnnotationType { return &v }
-
-func (p AnnotationType) MarshalText() ([]byte, error) {
- return []byte(p.String()), nil
-}
-
-func (p *AnnotationType) UnmarshalText(text []byte) error {
- q, err := AnnotationTypeFromString(string(text))
- if err != nil {
- return err
- }
- *p = q
- return nil
-}
-
-// Indicates the network context of a service recording an annotation with two
-// exceptions.
-//
-// When a BinaryAnnotation, and key is CLIENT_ADDR or SERVER_ADDR,
-// the endpoint indicates the source or destination of an RPC. This exception
-// allows zipkin to display network context of uninstrumented services, or
-// clients such as web browsers.
-//
-// Attributes:
-// - Ipv4: IPv4 host address packed into 4 bytes.
-//
-// Ex for the ip 1.2.3.4, it would be (1 << 24) | (2 << 16) | (3 << 8) | 4
-// - Port: IPv4 port or 0, if unknown.
-//
-// Note: this is to be treated as an unsigned integer, so watch for negatives.
-// - ServiceName: Classifier of a source or destination in lowercase, such as "zipkin-web".
-//
-// This is the primary parameter for trace lookup, so should be intuitive as
-// possible, for example, matching names in service discovery.
-//
-// Conventionally, when the service name isn't known, service_name = "unknown".
-// However, it is also permissible to set service_name = "" (empty string).
-// The difference in the latter usage is that the span will not be queryable
-// by service name unless more information is added to the span with non-empty
-// service name, e.g. an additional annotation from the server.
-//
-// Particularly clients may not have a reliable service name at ingest. One
-// approach is to set service_name to "" at ingest, and later assign a
-// better label based on binary annotations, such as user agent.
-// - Ipv6: IPv6 host address packed into 16 bytes. Ex Inet6Address.getBytes()
-type Endpoint struct {
- Ipv4 int32 `thrift:"ipv4,1" json:"ipv4"`
- Port int16 `thrift:"port,2" json:"port"`
- ServiceName string `thrift:"service_name,3" json:"service_name"`
- Ipv6 []byte `thrift:"ipv6,4" json:"ipv6,omitempty"`
-}
-
-func NewEndpoint() *Endpoint {
- return &Endpoint{}
-}
-
-func (p *Endpoint) GetIpv4() int32 {
- return p.Ipv4
-}
-
-func (p *Endpoint) GetPort() int16 {
- return p.Port
-}
-
-func (p *Endpoint) GetServiceName() string {
- return p.ServiceName
-}
-
-var Endpoint_Ipv6_DEFAULT []byte
-
-func (p *Endpoint) GetIpv6() []byte {
- return p.Ipv6
-}
-func (p *Endpoint) IsSetIpv6() bool {
- return p.Ipv6 != nil
-}
-
-func (p *Endpoint) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Endpoint) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Ipv4 = v
- }
- return nil
-}
-
-func (p *Endpoint) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI16(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Port = v
- }
- return nil
-}
-
-func (p *Endpoint) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.ServiceName = v
- }
- return nil
-}
-
-func (p *Endpoint) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.Ipv6 = v
- }
- return nil
-}
-
-func (p *Endpoint) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Endpoint"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Endpoint) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("ipv4", thrift.I32, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:ipv4: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.Ipv4)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv4 (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:ipv4: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("port", thrift.I16, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:port: ", p), err)
- }
- if err := oprot.WriteI16(int16(p.Port)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.port (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:port: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("service_name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:service_name: ", p), err)
- }
- if err := oprot.WriteString(string(p.ServiceName)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.service_name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:service_name: ", p), err)
- }
- return err
-}
-
-func (p *Endpoint) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetIpv6() {
- if err := oprot.WriteFieldBegin("ipv6", thrift.STRING, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:ipv6: ", p), err)
- }
- if err := oprot.WriteBinary(p.Ipv6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.ipv6 (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:ipv6: ", p), err)
- }
- }
- return err
-}
-
-func (p *Endpoint) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Endpoint(%+v)", *p)
-}
-
-// Associates an event that explains latency with a timestamp.
-//
-// Unlike log statements, annotations are often codes: for example "sr".
-//
-// Attributes:
-// - Timestamp: Microseconds from epoch.
-//
-// This value should use the most precise value possible. For example,
-// gettimeofday or syncing nanoTime against a tick of currentTimeMillis.
-// - Value: Usually a short tag indicating an event, like "sr" or "finagle.retry".
-// - Host: The host that recorded the value, primarily for query by service name.
-type Annotation struct {
- Timestamp int64 `thrift:"timestamp,1" json:"timestamp"`
- Value string `thrift:"value,2" json:"value"`
- Host *Endpoint `thrift:"host,3" json:"host,omitempty"`
-}
-
-func NewAnnotation() *Annotation {
- return &Annotation{}
-}
-
-func (p *Annotation) GetTimestamp() int64 {
- return p.Timestamp
-}
-
-func (p *Annotation) GetValue() string {
- return p.Value
-}
-
-var Annotation_Host_DEFAULT *Endpoint
-
-func (p *Annotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return Annotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *Annotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *Annotation) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Annotation) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Timestamp = v
- }
- return nil
-}
-
-func (p *Annotation) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *Annotation) readField3(iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *Annotation) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Annotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Annotation) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:timestamp: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteString(string(p.Value)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *Annotation) writeField3(oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:host: ", p), err)
- }
- if err := p.Host.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *Annotation) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Annotation(%+v)", *p)
-}
-
-// Binary annotations are tags applied to a Span to give it context. For
-// example, a binary annotation of HTTP_PATH ("http.path") could the path
-// to a resource in a RPC call.
-//
-// Binary annotations of type STRING are always queryable, though more a
-// historical implementation detail than a structural concern.
-//
-// Binary annotations can repeat, and vary on the host. Similar to Annotation,
-// the host indicates who logged the event. This allows you to tell the
-// difference between the client and server side of the same key. For example,
-// the key "http.path" might be different on the client and server side due to
-// rewriting, like "/api/v1/myresource" vs "/myresource. Via the host field,
-// you can see the different points of view, which often help in debugging.
-//
-// Attributes:
-// - Key: Name used to lookup spans, such as "http.path" or "finagle.version".
-// - Value: Serialized thrift bytes, in TBinaryProtocol format.
-//
-// For legacy reasons, byte order is big-endian. See THRIFT-3217.
-// - AnnotationType: The thrift type of value, most often STRING.
-//
-// annotation_type shouldn't vary for the same key.
-// - Host: The host that recorded value, allowing query by service name or address.
-//
-// There are two exceptions: when key is "ca" or "sa", this is the source or
-// destination of an RPC. This exception allows zipkin to display network
-// context of uninstrumented services, such as browsers or databases.
-type BinaryAnnotation struct {
- Key string `thrift:"key,1" json:"key"`
- Value []byte `thrift:"value,2" json:"value"`
- AnnotationType AnnotationType `thrift:"annotation_type,3" json:"annotation_type"`
- Host *Endpoint `thrift:"host,4" json:"host,omitempty"`
-}
-
-func NewBinaryAnnotation() *BinaryAnnotation {
- return &BinaryAnnotation{}
-}
-
-func (p *BinaryAnnotation) GetKey() string {
- return p.Key
-}
-
-func (p *BinaryAnnotation) GetValue() []byte {
- return p.Value
-}
-
-func (p *BinaryAnnotation) GetAnnotationType() AnnotationType {
- return p.AnnotationType
-}
-
-var BinaryAnnotation_Host_DEFAULT *Endpoint
-
-func (p *BinaryAnnotation) GetHost() *Endpoint {
- if !p.IsSetHost() {
- return BinaryAnnotation_Host_DEFAULT
- }
- return p.Host
-}
-func (p *BinaryAnnotation) IsSetHost() bool {
- return p.Host != nil
-}
-
-func (p *BinaryAnnotation) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 2:
- if err := p.readField2(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.Key = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField2(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBinary(); err != nil {
- return thrift.PrependError("error reading field 2: ", err)
- } else {
- p.Value = v
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI32(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- temp := AnnotationType(v)
- p.AnnotationType = temp
- }
- return nil
-}
-
-func (p *BinaryAnnotation) readField4(iprot thrift.TProtocol) error {
- p.Host = &Endpoint{}
- if err := p.Host.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", p.Host), err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("BinaryAnnotation"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField2(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *BinaryAnnotation) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("key", thrift.STRING, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:key: ", p), err)
- }
- if err := oprot.WriteString(string(p.Key)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.key (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:key: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField2(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("value", thrift.STRING, 2); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 2:value: ", p), err)
- }
- if err := oprot.WriteBinary(p.Value); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.value (2) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 2:value: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("annotation_type", thrift.I32, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:annotation_type: ", p), err)
- }
- if err := oprot.WriteI32(int32(p.AnnotationType)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.annotation_type (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:annotation_type: ", p), err)
- }
- return err
-}
-
-func (p *BinaryAnnotation) writeField4(oprot thrift.TProtocol) (err error) {
- if p.IsSetHost() {
- if err := oprot.WriteFieldBegin("host", thrift.STRUCT, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:host: ", p), err)
- }
- if err := p.Host.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", p.Host), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:host: ", p), err)
- }
- }
- return err
-}
-
-func (p *BinaryAnnotation) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("BinaryAnnotation(%+v)", *p)
-}
-
-// A trace is a series of spans (often RPC calls) which form a latency tree.
-//
-// Spans are usually created by instrumentation in RPC clients or servers, but
-// can also represent in-process activity. Annotations in spans are similar to
-// log statements, and are sometimes created directly by application developers
-// to indicate events of interest, such as a cache miss.
-//
-// The root span is where parent_id = Nil; it usually has the longest duration
-// in the trace.
-//
-// Span identifiers are packed into i64s, but should be treated opaquely.
-// String encoding is fixed-width lower-hex, to avoid signed interpretation.
-//
-// Attributes:
-// - TraceID: Unique 8-byte identifier for a trace, set on all spans within it.
-// - Name: Span name in lowercase, rpc method for example. Conventionally, when the
-// span name isn't known, name = "unknown".
-// - ID: Unique 8-byte identifier of this span within a trace. A span is uniquely
-// identified in storage by (trace_id, id).
-// - ParentID: The parent's Span.id; absent if this the root span in a trace.
-// - Annotations: Associates events that explain latency with a timestamp. Unlike log
-// statements, annotations are often codes: for example SERVER_RECV("sr").
-// Annotations are sorted ascending by timestamp.
-// - BinaryAnnotations: Tags a span with context, usually to support query or aggregation. For
-// example, a binary annotation key could be "http.path".
-// - Debug: True is a request to store this span even if it overrides sampling policy.
-// - Timestamp: Epoch microseconds of the start of this span, absent if this an incomplete
-// span.
-//
-// This value should be set directly by instrumentation, using the most
-// precise value possible. For example, gettimeofday or syncing nanoTime
-// against a tick of currentTimeMillis.
-//
-// For compatibilty with instrumentation that precede this field, collectors
-// or span stores can derive this via Annotation.timestamp.
-// For example, SERVER_RECV.timestamp or CLIENT_SEND.timestamp.
-//
-// Timestamp is nullable for input only. Spans without a timestamp cannot be
-// presented in a timeline: Span stores should not output spans missing a
-// timestamp.
-//
-// There are two known edge-cases where this could be absent: both cases
-// exist when a collector receives a span in parts and a binary annotation
-// precedes a timestamp. This is possible when..
-// - The span is in-flight (ex not yet received a timestamp)
-// - The span's start event was lost
-// - Duration: Measurement in microseconds of the critical path, if known. Durations of
-// less than one microsecond must be rounded up to 1 microsecond.
-//
-// This value should be set directly, as opposed to implicitly via annotation
-// timestamps. Doing so encourages precision decoupled from problems of
-// clocks, such as skew or NTP updates causing time to move backwards.
-//
-// For compatibility with instrumentation that precede this field, collectors
-// or span stores can derive this by subtracting Annotation.timestamp.
-// For example, SERVER_SEND.timestamp - SERVER_RECV.timestamp.
-//
-// If this field is persisted as unset, zipkin will continue to work, except
-// duration query support will be implementation-specific. Similarly, setting
-// this field non-atomically is implementation-specific.
-//
-// This field is i64 vs i32 to support spans longer than 35 minutes.
-// - TraceIDHigh: Optional unique 8-byte additional identifier for a trace. If non zero, this
-// means the trace uses 128 bit traceIds instead of 64 bit.
-type Span struct {
- TraceID int64 `thrift:"trace_id,1" json:"trace_id"`
- // unused field # 2
- Name string `thrift:"name,3" json:"name"`
- ID int64 `thrift:"id,4" json:"id"`
- ParentID *int64 `thrift:"parent_id,5" json:"parent_id,omitempty"`
- Annotations []*Annotation `thrift:"annotations,6" json:"annotations"`
- // unused field # 7
- BinaryAnnotations []*BinaryAnnotation `thrift:"binary_annotations,8" json:"binary_annotations"`
- Debug bool `thrift:"debug,9" json:"debug,omitempty"`
- Timestamp *int64 `thrift:"timestamp,10" json:"timestamp,omitempty"`
- Duration *int64 `thrift:"duration,11" json:"duration,omitempty"`
- TraceIDHigh *int64 `thrift:"trace_id_high,12" json:"trace_id_high,omitempty"`
-}
-
-func NewSpan() *Span {
- return &Span{}
-}
-
-func (p *Span) GetTraceID() int64 {
- return p.TraceID
-}
-
-func (p *Span) GetName() string {
- return p.Name
-}
-
-func (p *Span) GetID() int64 {
- return p.ID
-}
-
-var Span_ParentID_DEFAULT int64
-
-func (p *Span) GetParentID() int64 {
- if !p.IsSetParentID() {
- return Span_ParentID_DEFAULT
- }
- return *p.ParentID
-}
-
-func (p *Span) GetAnnotations() []*Annotation {
- return p.Annotations
-}
-
-func (p *Span) GetBinaryAnnotations() []*BinaryAnnotation {
- return p.BinaryAnnotations
-}
-
-var Span_Debug_DEFAULT bool = false
-
-func (p *Span) GetDebug() bool {
- return p.Debug
-}
-
-var Span_Timestamp_DEFAULT int64
-
-func (p *Span) GetTimestamp() int64 {
- if !p.IsSetTimestamp() {
- return Span_Timestamp_DEFAULT
- }
- return *p.Timestamp
-}
-
-var Span_Duration_DEFAULT int64
-
-func (p *Span) GetDuration() int64 {
- if !p.IsSetDuration() {
- return Span_Duration_DEFAULT
- }
- return *p.Duration
-}
-
-var Span_TraceIDHigh_DEFAULT int64
-
-func (p *Span) GetTraceIDHigh() int64 {
- if !p.IsSetTraceIDHigh() {
- return Span_TraceIDHigh_DEFAULT
- }
- return *p.TraceIDHigh
-}
-func (p *Span) IsSetParentID() bool {
- return p.ParentID != nil
-}
-
-func (p *Span) IsSetDebug() bool {
- return p.Debug != Span_Debug_DEFAULT
-}
-
-func (p *Span) IsSetTimestamp() bool {
- return p.Timestamp != nil
-}
-
-func (p *Span) IsSetDuration() bool {
- return p.Duration != nil
-}
-
-func (p *Span) IsSetTraceIDHigh() bool {
- return p.TraceIDHigh != nil
-}
-
-func (p *Span) Read(iprot thrift.TProtocol) error {
- if _, err := iprot.ReadStructBegin(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read error: ", p), err)
- }
-
- for {
- _, fieldTypeId, fieldId, err := iprot.ReadFieldBegin()
- if err != nil {
- return thrift.PrependError(fmt.Sprintf("%T field %d read error: ", p, fieldId), err)
- }
- if fieldTypeId == thrift.STOP {
- break
- }
- switch fieldId {
- case 1:
- if err := p.readField1(iprot); err != nil {
- return err
- }
- case 3:
- if err := p.readField3(iprot); err != nil {
- return err
- }
- case 4:
- if err := p.readField4(iprot); err != nil {
- return err
- }
- case 5:
- if err := p.readField5(iprot); err != nil {
- return err
- }
- case 6:
- if err := p.readField6(iprot); err != nil {
- return err
- }
- case 8:
- if err := p.readField8(iprot); err != nil {
- return err
- }
- case 9:
- if err := p.readField9(iprot); err != nil {
- return err
- }
- case 10:
- if err := p.readField10(iprot); err != nil {
- return err
- }
- case 11:
- if err := p.readField11(iprot); err != nil {
- return err
- }
- case 12:
- if err := p.readField12(iprot); err != nil {
- return err
- }
- default:
- if err := iprot.Skip(fieldTypeId); err != nil {
- return err
- }
- }
- if err := iprot.ReadFieldEnd(); err != nil {
- return err
- }
- }
- if err := iprot.ReadStructEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T read struct end error: ", p), err)
- }
- return nil
-}
-
-func (p *Span) readField1(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 1: ", err)
- } else {
- p.TraceID = v
- }
- return nil
-}
-
-func (p *Span) readField3(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadString(); err != nil {
- return thrift.PrependError("error reading field 3: ", err)
- } else {
- p.Name = v
- }
- return nil
-}
-
-func (p *Span) readField4(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 4: ", err)
- } else {
- p.ID = v
- }
- return nil
-}
-
-func (p *Span) readField5(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 5: ", err)
- } else {
- p.ParentID = &v
- }
- return nil
-}
-
-func (p *Span) readField6(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*Annotation, 0, size)
- p.Annotations = tSlice
- for i := 0; i < size; i++ {
- _elem0 := &Annotation{}
- if err := _elem0.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem0), err)
- }
- p.Annotations = append(p.Annotations, _elem0)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField8(iprot thrift.TProtocol) error {
- _, size, err := iprot.ReadListBegin()
- if err != nil {
- return thrift.PrependError("error reading list begin: ", err)
- }
- tSlice := make([]*BinaryAnnotation, 0, size)
- p.BinaryAnnotations = tSlice
- for i := 0; i < size; i++ {
- _elem1 := &BinaryAnnotation{}
- if err := _elem1.Read(iprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error reading struct: ", _elem1), err)
- }
- p.BinaryAnnotations = append(p.BinaryAnnotations, _elem1)
- }
- if err := iprot.ReadListEnd(); err != nil {
- return thrift.PrependError("error reading list end: ", err)
- }
- return nil
-}
-
-func (p *Span) readField9(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadBool(); err != nil {
- return thrift.PrependError("error reading field 9: ", err)
- } else {
- p.Debug = v
- }
- return nil
-}
-
-func (p *Span) readField10(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 10: ", err)
- } else {
- p.Timestamp = &v
- }
- return nil
-}
-
-func (p *Span) readField11(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 11: ", err)
- } else {
- p.Duration = &v
- }
- return nil
-}
-
-func (p *Span) readField12(iprot thrift.TProtocol) error {
- if v, err := iprot.ReadI64(); err != nil {
- return thrift.PrependError("error reading field 12: ", err)
- } else {
- p.TraceIDHigh = &v
- }
- return nil
-}
-
-func (p *Span) Write(oprot thrift.TProtocol) error {
- if err := oprot.WriteStructBegin("Span"); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write struct begin error: ", p), err)
- }
- if err := p.writeField1(oprot); err != nil {
- return err
- }
- if err := p.writeField3(oprot); err != nil {
- return err
- }
- if err := p.writeField4(oprot); err != nil {
- return err
- }
- if err := p.writeField5(oprot); err != nil {
- return err
- }
- if err := p.writeField6(oprot); err != nil {
- return err
- }
- if err := p.writeField8(oprot); err != nil {
- return err
- }
- if err := p.writeField9(oprot); err != nil {
- return err
- }
- if err := p.writeField10(oprot); err != nil {
- return err
- }
- if err := p.writeField11(oprot); err != nil {
- return err
- }
- if err := p.writeField12(oprot); err != nil {
- return err
- }
- if err := oprot.WriteFieldStop(); err != nil {
- return thrift.PrependError("write field stop error: ", err)
- }
- if err := oprot.WriteStructEnd(); err != nil {
- return thrift.PrependError("write struct stop error: ", err)
- }
- return nil
-}
-
-func (p *Span) writeField1(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("trace_id", thrift.I64, 1); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 1:trace_id: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.TraceID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id (1) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 1:trace_id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField3(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("name", thrift.STRING, 3); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 3:name: ", p), err)
- }
- if err := oprot.WriteString(string(p.Name)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.name (3) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 3:name: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField4(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("id", thrift.I64, 4); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 4:id: ", p), err)
- }
- if err := oprot.WriteI64(int64(p.ID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.id (4) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 4:id: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField5(oprot thrift.TProtocol) (err error) {
- if p.IsSetParentID() {
- if err := oprot.WriteFieldBegin("parent_id", thrift.I64, 5); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 5:parent_id: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.ParentID)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.parent_id (5) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 5:parent_id: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField6(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("annotations", thrift.LIST, 6); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 6:annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.Annotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.Annotations {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 6:annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField8(oprot thrift.TProtocol) (err error) {
- if err := oprot.WriteFieldBegin("binary_annotations", thrift.LIST, 8); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 8:binary_annotations: ", p), err)
- }
- if err := oprot.WriteListBegin(thrift.STRUCT, len(p.BinaryAnnotations)); err != nil {
- return thrift.PrependError("error writing list begin: ", err)
- }
- for _, v := range p.BinaryAnnotations {
- if err := v.Write(oprot); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T error writing struct: ", v), err)
- }
- }
- if err := oprot.WriteListEnd(); err != nil {
- return thrift.PrependError("error writing list end: ", err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 8:binary_annotations: ", p), err)
- }
- return err
-}
-
-func (p *Span) writeField9(oprot thrift.TProtocol) (err error) {
- if p.IsSetDebug() {
- if err := oprot.WriteFieldBegin("debug", thrift.BOOL, 9); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 9:debug: ", p), err)
- }
- if err := oprot.WriteBool(bool(p.Debug)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.debug (9) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 9:debug: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField10(oprot thrift.TProtocol) (err error) {
- if p.IsSetTimestamp() {
- if err := oprot.WriteFieldBegin("timestamp", thrift.I64, 10); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 10:timestamp: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.Timestamp)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.timestamp (10) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 10:timestamp: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField11(oprot thrift.TProtocol) (err error) {
- if p.IsSetDuration() {
- if err := oprot.WriteFieldBegin("duration", thrift.I64, 11); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 11:duration: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.Duration)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.duration (11) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 11:duration: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) writeField12(oprot thrift.TProtocol) (err error) {
- if p.IsSetTraceIDHigh() {
- if err := oprot.WriteFieldBegin("trace_id_high", thrift.I64, 12); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field begin error 12:trace_id_high: ", p), err)
- }
- if err := oprot.WriteI64(int64(*p.TraceIDHigh)); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T.trace_id_high (12) field write error: ", p), err)
- }
- if err := oprot.WriteFieldEnd(); err != nil {
- return thrift.PrependError(fmt.Sprintf("%T write field end error 12:trace_id_high: ", p), err)
- }
- }
- return err
-}
-
-func (p *Span) String() string {
- if p == nil {
- return ""
- }
- return fmt.Sprintf("Span(%+v)", *p)
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go
deleted file mode 100644
index 8a4918475a2..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-http.go
+++ /dev/null
@@ -1,208 +0,0 @@
-package zipkintracer
-
-import (
- "bytes"
- "net/http"
- "sync"
- "time"
-
- "github.com/apache/thrift/lib/go/thrift"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// Default timeout for http request in seconds
-const defaultHTTPTimeout = time.Second * 5
-
-// defaultBatchInterval in seconds
-const defaultHTTPBatchInterval = 1
-
-const defaultHTTPBatchSize = 100
-
-const defaultHTTPMaxBacklog = 1000
-
-// HTTPCollector implements Collector by forwarding spans to a http server.
-type HTTPCollector struct {
- logger Logger
- url string
- client *http.Client
- batchInterval time.Duration
- batchSize int
- maxBacklog int
- batch []*zipkincore.Span
- spanc chan *zipkincore.Span
- quit chan struct{}
- shutdown chan error
- sendMutex *sync.Mutex
- batchMutex *sync.Mutex
-}
-
-// HTTPOption sets a parameter for the HttpCollector
-type HTTPOption func(c *HTTPCollector)
-
-// HTTPLogger sets the logger used to report errors in the collection
-// process. By default, a no-op logger is used, i.e. no errors are logged
-// anywhere. It's important to set this option in a production service.
-func HTTPLogger(logger Logger) HTTPOption {
- return func(c *HTTPCollector) { c.logger = logger }
-}
-
-// HTTPTimeout sets maximum timeout for http request.
-func HTTPTimeout(duration time.Duration) HTTPOption {
- return func(c *HTTPCollector) { c.client.Timeout = duration }
-}
-
-// HTTPBatchSize sets the maximum batch size, after which a collect will be
-// triggered. The default batch size is 100 traces.
-func HTTPBatchSize(n int) HTTPOption {
- return func(c *HTTPCollector) { c.batchSize = n }
-}
-
-// HTTPMaxBacklog sets the maximum backlog size,
-// when batch size reaches this threshold, spans from the
-// beginning of the batch will be disposed
-func HTTPMaxBacklog(n int) HTTPOption {
- return func(c *HTTPCollector) { c.maxBacklog = n }
-}
-
-// HTTPBatchInterval sets the maximum duration we will buffer traces before
-// emitting them to the collector. The default batch interval is 1 second.
-func HTTPBatchInterval(d time.Duration) HTTPOption {
- return func(c *HTTPCollector) { c.batchInterval = d }
-}
-
-// NewHTTPCollector returns a new HTTP-backend Collector. url should be a http
-// url for handle post request. timeout is passed to http client. queueSize control
-// the maximum size of buffer of async queue. The logger is used to log errors,
-// such as send failures;
-func NewHTTPCollector(url string, options ...HTTPOption) (Collector, error) {
- c := &HTTPCollector{
- logger: NewNopLogger(),
- url: url,
- client: &http.Client{Timeout: defaultHTTPTimeout},
- batchInterval: defaultHTTPBatchInterval * time.Second,
- batchSize: defaultHTTPBatchSize,
- maxBacklog: defaultHTTPMaxBacklog,
- batch: []*zipkincore.Span{},
- spanc: make(chan *zipkincore.Span),
- quit: make(chan struct{}, 1),
- shutdown: make(chan error, 1),
- sendMutex: &sync.Mutex{},
- batchMutex: &sync.Mutex{},
- }
-
- for _, option := range options {
- option(c)
- }
-
- go c.loop()
- return c, nil
-}
-
-// Collect implements Collector.
-func (c *HTTPCollector) Collect(s *zipkincore.Span) error {
- c.spanc <- s
- return nil
-}
-
-// Close implements Collector.
-func (c *HTTPCollector) Close() error {
- close(c.quit)
- return <-c.shutdown
-}
-
-func httpSerialize(spans []*zipkincore.Span) *bytes.Buffer {
- t := thrift.NewTMemoryBuffer()
- p := thrift.NewTBinaryProtocolTransport(t)
- if err := p.WriteListBegin(thrift.STRUCT, len(spans)); err != nil {
- panic(err)
- }
- for _, s := range spans {
- if err := s.Write(p); err != nil {
- panic(err)
- }
- }
- if err := p.WriteListEnd(); err != nil {
- panic(err)
- }
- return t.Buffer
-}
-
-func (c *HTTPCollector) loop() {
- var (
- nextSend = time.Now().Add(c.batchInterval)
- ticker = time.NewTicker(c.batchInterval / 10)
- tickc = ticker.C
- )
- defer ticker.Stop()
-
- for {
- select {
- case span := <-c.spanc:
- currentBatchSize := c.append(span)
- if currentBatchSize >= c.batchSize {
- nextSend = time.Now().Add(c.batchInterval)
- go c.send()
- }
- case <-tickc:
- if time.Now().After(nextSend) {
- nextSend = time.Now().Add(c.batchInterval)
- go c.send()
- }
- case <-c.quit:
- c.shutdown <- c.send()
- return
- }
- }
-}
-
-func (c *HTTPCollector) append(span *zipkincore.Span) (newBatchSize int) {
- c.batchMutex.Lock()
- defer c.batchMutex.Unlock()
-
- c.batch = append(c.batch, span)
- if len(c.batch) > c.maxBacklog {
- dispose := len(c.batch) - c.maxBacklog
- c.logger.Log("Backlog too long, disposing spans.", "count", dispose)
- c.batch = c.batch[dispose:]
- }
- newBatchSize = len(c.batch)
- return
-}
-
-func (c *HTTPCollector) send() error {
- // in order to prevent sending the same batch twice
- c.sendMutex.Lock()
- defer c.sendMutex.Unlock()
-
- // Select all current spans in the batch to be sent
- c.batchMutex.Lock()
- sendBatch := c.batch[:]
- c.batchMutex.Unlock()
-
- // Do not send an empty batch
- if len(sendBatch) == 0 {
- return nil
- }
-
- req, err := http.NewRequest(
- "POST",
- c.url,
- httpSerialize(sendBatch))
- if err != nil {
- c.logger.Log("err", err.Error())
- return err
- }
- req.Header.Set("Content-Type", "application/x-thrift")
- if _, err = c.client.Do(req); err != nil {
- c.logger.Log("err", err.Error())
- return err
- }
-
- // Remove sent spans from the batch
- c.batchMutex.Lock()
- c.batch = c.batch[len(sendBatch):]
- c.batchMutex.Unlock()
-
- return nil
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go
deleted file mode 100644
index daeb4a3dce0..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-kafka.go
+++ /dev/null
@@ -1,95 +0,0 @@
-package zipkintracer
-
-import (
- "github.com/Shopify/sarama"
- "github.com/apache/thrift/lib/go/thrift"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// defaultKafkaTopic sets the standard Kafka topic our Collector will publish
-// on. The default topic for zipkin-receiver-kafka is "zipkin", see:
-// https://github.com/openzipkin/zipkin/tree/master/zipkin-receiver-kafka
-const defaultKafkaTopic = "zipkin"
-
-// KafkaCollector implements Collector by publishing spans to a Kafka
-// broker.
-type KafkaCollector struct {
- producer sarama.AsyncProducer
- logger Logger
- topic string
-}
-
-// KafkaOption sets a parameter for the KafkaCollector
-type KafkaOption func(c *KafkaCollector)
-
-// KafkaLogger sets the logger used to report errors in the collection
-// process. By default, a no-op logger is used, i.e. no errors are logged
-// anywhere. It's important to set this option.
-func KafkaLogger(logger Logger) KafkaOption {
- return func(c *KafkaCollector) { c.logger = logger }
-}
-
-// KafkaProducer sets the producer used to produce to Kafka.
-func KafkaProducer(p sarama.AsyncProducer) KafkaOption {
- return func(c *KafkaCollector) { c.producer = p }
-}
-
-// KafkaTopic sets the kafka topic to attach the collector producer on.
-func KafkaTopic(t string) KafkaOption {
- return func(c *KafkaCollector) { c.topic = t }
-}
-
-// NewKafkaCollector returns a new Kafka-backed Collector. addrs should be a
-// slice of TCP endpoints of the form "host:port".
-func NewKafkaCollector(addrs []string, options ...KafkaOption) (Collector, error) {
- c := &KafkaCollector{
- logger: NewNopLogger(),
- topic: defaultKafkaTopic,
- }
-
- for _, option := range options {
- option(c)
- }
- if c.producer == nil {
- p, err := sarama.NewAsyncProducer(addrs, nil)
- if err != nil {
- return nil, err
- }
- c.producer = p
- }
-
- go c.logErrors()
-
- return c, nil
-}
-
-func (c *KafkaCollector) logErrors() {
- for pe := range c.producer.Errors() {
- _ = c.logger.Log("msg", pe.Msg, "err", pe.Err, "result", "failed to produce msg")
- }
-}
-
-// Collect implements Collector.
-func (c *KafkaCollector) Collect(s *zipkincore.Span) error {
- c.producer.Input() <- &sarama.ProducerMessage{
- Topic: c.topic,
- Key: nil,
- Value: sarama.ByteEncoder(kafkaSerialize(s)),
- }
- return nil
-}
-
-// Close implements Collector.
-func (c *KafkaCollector) Close() error {
- return c.producer.Close()
-}
-
-func kafkaSerialize(s *zipkincore.Span) []byte {
- t := thrift.NewTMemoryBuffer()
- p := thrift.NewTBinaryProtocolTransport(t)
- if err := s.Write(p); err != nil {
- panic(err)
- }
- return t.Buffer.Bytes()
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go
deleted file mode 100644
index 3c912ae3481..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector-scribe.go
+++ /dev/null
@@ -1,234 +0,0 @@
-package zipkintracer
-
-import (
- "encoding/base64"
- "fmt"
- "net"
- "sync"
- "time"
-
- "github.com/apache/thrift/lib/go/thrift"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/scribe"
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-const defaultScribeCategory = "zipkin"
-
-// defaultScribeBatchInterval in seconds
-const defaultScribeBatchInterval = 1
-
-const defaultScribeBatchSize = 100
-
-const defaultScribeMaxBacklog = 1000
-
-// ScribeCollector implements Collector by forwarding spans to a Scribe
-// service, in batches.
-type ScribeCollector struct {
- logger Logger
- category string
- factory func() (scribe.Scribe, error)
- client scribe.Scribe
- batchInterval time.Duration
- batchSize int
- maxBacklog int
- batch []*scribe.LogEntry
- spanc chan *zipkincore.Span
- quit chan struct{}
- shutdown chan error
- sendMutex *sync.Mutex
- batchMutex *sync.Mutex
-}
-
-// ScribeOption sets a parameter for the StdlibAdapter.
-type ScribeOption func(s *ScribeCollector)
-
-// ScribeLogger sets the logger used to report errors in the collection
-// process. By default, a no-op logger is used, i.e. no errors are logged
-// anywhere. It's important to set this option in a production service.
-func ScribeLogger(logger Logger) ScribeOption {
- return func(s *ScribeCollector) { s.logger = logger }
-}
-
-// ScribeBatchSize sets the maximum batch size, after which a collect will be
-// triggered. The default batch size is 100 traces.
-func ScribeBatchSize(n int) ScribeOption {
- return func(s *ScribeCollector) { s.batchSize = n }
-}
-
-// ScribeMaxBacklog sets the maximum backlog size,
-// when batch size reaches this threshold, spans from the
-// beginning of the batch will be disposed
-func ScribeMaxBacklog(n int) ScribeOption {
- return func(c *ScribeCollector) { c.maxBacklog = n }
-}
-
-// ScribeBatchInterval sets the maximum duration we will buffer traces before
-// emitting them to the collector. The default batch interval is 1 second.
-func ScribeBatchInterval(d time.Duration) ScribeOption {
- return func(s *ScribeCollector) { s.batchInterval = d }
-}
-
-// ScribeCategory sets the Scribe category used to transmit the spans.
-func ScribeCategory(category string) ScribeOption {
- return func(s *ScribeCollector) { s.category = category }
-}
-
-// NewScribeCollector returns a new Scribe-backed Collector. addr should be a
-// TCP endpoint of the form "host:port". timeout is passed to the Thrift dial
-// function NewTSocketFromAddrTimeout. batchSize and batchInterval control the
-// maximum size and interval of a batch of spans; as soon as either limit is
-// reached, the batch is sent. The logger is used to log errors, such as batch
-// send failures; users should provide an appropriate context, if desired.
-func NewScribeCollector(addr string, timeout time.Duration, options ...ScribeOption) (Collector, error) {
- factory := scribeClientFactory(addr, timeout)
- client, err := factory()
- if err != nil {
- return nil, err
- }
- c := &ScribeCollector{
- logger: NewNopLogger(),
- category: defaultScribeCategory,
- factory: factory,
- client: client,
- batchInterval: defaultScribeBatchInterval * time.Second,
- batchSize: defaultScribeBatchSize,
- maxBacklog: defaultScribeMaxBacklog,
- batch: []*scribe.LogEntry{},
- spanc: make(chan *zipkincore.Span),
- quit: make(chan struct{}),
- shutdown: make(chan error, 1),
- sendMutex: &sync.Mutex{},
- batchMutex: &sync.Mutex{},
- }
-
- for _, option := range options {
- option(c)
- }
-
- go c.loop()
- return c, nil
-}
-
-// Collect implements Collector.
-func (c *ScribeCollector) Collect(s *zipkincore.Span) error {
- c.spanc <- s
- return nil // accepted
-}
-
-// Close implements Collector.
-func (c *ScribeCollector) Close() error {
- close(c.quit)
- return <-c.shutdown
-}
-
-func scribeSerialize(s *zipkincore.Span) string {
- t := thrift.NewTMemoryBuffer()
- p := thrift.NewTBinaryProtocolTransport(t)
- if err := s.Write(p); err != nil {
- panic(err)
- }
- return base64.StdEncoding.EncodeToString(t.Buffer.Bytes())
-}
-
-func (c *ScribeCollector) loop() {
- var (
- nextSend = time.Now().Add(c.batchInterval)
- ticker = time.NewTicker(c.batchInterval / 10)
- tickc = ticker.C
- )
- defer ticker.Stop()
-
- for {
- select {
- case span := <-c.spanc:
- currentBatchSize := c.append(span)
- if currentBatchSize >= c.batchSize {
- nextSend = time.Now().Add(c.batchInterval)
- go c.send()
- }
- case <-tickc:
- if time.Now().After(nextSend) {
- nextSend = time.Now().Add(c.batchInterval)
- go c.send()
- }
- case <-c.quit:
- c.shutdown <- c.send()
- return
- }
- }
-}
-
-func (c *ScribeCollector) append(span *zipkincore.Span) (newBatchSize int) {
- c.batchMutex.Lock()
- defer c.batchMutex.Unlock()
-
- c.batch = append(c.batch, &scribe.LogEntry{
- Category: c.category,
- Message: scribeSerialize(span),
- })
- if len(c.batch) > c.maxBacklog {
- dispose := len(c.batch) - c.maxBacklog
- c.logger.Log("Backlog too long, disposing spans.", "count", dispose)
- c.batch = c.batch[dispose:]
- }
- newBatchSize = len(c.batch)
- return
-}
-
-func (c *ScribeCollector) send() error {
- // in order to prevent sending the same batch twice
- c.sendMutex.Lock()
- defer c.sendMutex.Unlock()
-
- // Select all current spans in the batch to be sent
- c.batchMutex.Lock()
- sendBatch := c.batch[:]
- c.batchMutex.Unlock()
-
- // Do not send an empty batch
- if len(sendBatch) == 0 {
- return nil
- }
-
- if c.client == nil {
- var err error
- if c.client, err = c.factory(); err != nil {
- _ = c.logger.Log("err", fmt.Sprintf("during reconnect: %v", err))
- return err
- }
- }
- if rc, err := c.client.Log(sendBatch); err != nil {
- c.client = nil
- _ = c.logger.Log("err", fmt.Sprintf("during Log: %v", err))
- return err
- } else if rc != scribe.ResultCode_OK {
- // probably transient error; don't reset client
- _ = c.logger.Log("err", fmt.Sprintf("remote returned %s", rc))
- }
-
- // Remove sent spans from the batch
- c.batchMutex.Lock()
- c.batch = c.batch[len(sendBatch):]
- c.batchMutex.Unlock()
-
- return nil
-}
-
-func scribeClientFactory(addr string, timeout time.Duration) func() (scribe.Scribe, error) {
- return func() (scribe.Scribe, error) {
- a, err := net.ResolveTCPAddr("tcp", addr)
- if err != nil {
- return nil, err
- }
- socket := thrift.NewTSocketFromAddrTimeout(a, timeout)
- transport := thrift.NewTFramedTransport(socket)
- if err := transport.Open(); err != nil {
- _ = socket.Close()
- return nil, err
- }
- proto := thrift.NewTBinaryProtocolTransport(transport)
- client := scribe.NewScribeClientProtocol(transport, proto, proto)
- return client, nil
- }
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go
deleted file mode 100644
index 30979694b1d..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/collector.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package zipkintracer
-
-import (
- "strings"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// Collector represents a Zipkin trace collector, which is probably a set of
-// remote endpoints.
-type Collector interface {
- Collect(*zipkincore.Span) error
- Close() error
-}
-
-// NopCollector implements Collector but performs no work.
-type NopCollector struct{}
-
-// Collect implements Collector.
-func (NopCollector) Collect(*zipkincore.Span) error { return nil }
-
-// Close implements Collector.
-func (NopCollector) Close() error { return nil }
-
-// MultiCollector implements Collector by sending spans to all collectors.
-type MultiCollector []Collector
-
-// Collect implements Collector.
-func (c MultiCollector) Collect(s *zipkincore.Span) error {
- return c.aggregateErrors(func(coll Collector) error { return coll.Collect(s) })
-}
-
-// Close implements Collector.
-func (c MultiCollector) Close() error {
- return c.aggregateErrors(func(coll Collector) error { return coll.Close() })
-}
-
-func (c MultiCollector) aggregateErrors(f func(c Collector) error) error {
- var e *collectionError
- for i, collector := range c {
- if err := f(collector); err != nil {
- if e == nil {
- e = &collectionError{
- errs: make([]error, len(c)),
- }
- }
- e.errs[i] = err
- }
- }
- return e
-}
-
-// CollectionError represents an array of errors returned by one or more
-// failed Collector methods.
-type CollectionError interface {
- Error() string
- GetErrors() []error
-}
-
-type collectionError struct {
- errs []error
-}
-
-func (c *collectionError) Error() string {
- errs := []string{}
- for _, err := range c.errs {
- if err != nil {
- errs = append(errs, err.Error())
- }
- }
- return strings.Join(errs, "; ")
-}
-
-// GetErrors implements CollectionError
-func (c *collectionError) GetErrors() []error {
- return c.errs
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go
deleted file mode 100644
index e9fe299118f..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/context.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package zipkintracer
-
-import (
- "github.com/openzipkin/zipkin-go-opentracing/flag"
- "github.com/openzipkin/zipkin-go-opentracing/types"
-)
-
-// SpanContext holds the basic Span metadata.
-type SpanContext struct {
- // A probabilistically unique identifier for a [multi-span] trace.
- TraceID types.TraceID
-
- // A probabilistically unique identifier for a span.
- SpanID uint64
-
- // Whether the trace is sampled.
- Sampled bool
-
- // The span's associated baggage.
- Baggage map[string]string // initialized on first use
-
- // The SpanID of this Context's parent, or nil if there is no parent.
- ParentSpanID *uint64
-
- // Flags provides the ability to create and communicate feature flags.
- Flags flag.Flags
-
- // Whether the span is owned by the current process
- Owner bool
-}
-
-// ForeachBaggageItem belongs to the opentracing.SpanContext interface
-func (c SpanContext) ForeachBaggageItem(handler func(k, v string) bool) {
- for k, v := range c.Baggage {
- if !handler(k, v) {
- break
- }
- }
-}
-
-// WithBaggageItem returns an entirely new basictracer SpanContext with the
-// given key:value baggage pair set.
-func (c SpanContext) WithBaggageItem(key, val string) SpanContext {
- var newBaggage map[string]string
- if c.Baggage == nil {
- newBaggage = map[string]string{key: val}
- } else {
- newBaggage = make(map[string]string, len(c.Baggage)+1)
- for k, v := range c.Baggage {
- newBaggage[k] = v
- }
- newBaggage[key] = val
- }
- var parentSpanID *uint64
- if c.ParentSpanID != nil {
- parentSpanID = new(uint64)
- *parentSpanID = *c.ParentSpanID
- }
- // Use positional parameters so the compiler will help catch new fields.
- return SpanContext{c.TraceID, c.SpanID, c.Sampled, newBaggage, parentSpanID, c.Flags, c.Owner}
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go
deleted file mode 100644
index 1ee00c8a689..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/debug.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package zipkintracer
-
-import (
- "bytes"
- "fmt"
- "runtime"
- "strconv"
- "sync"
-)
-
-const debugGoroutineIDTag = "_initial_goroutine"
-
-type errAssertionFailed struct {
- span *spanImpl
- msg string
-}
-
-// Error implements the error interface.
-func (err *errAssertionFailed) Error() string {
- return fmt.Sprintf("%s:\n%+v", err.msg, err.span)
-}
-
-func (s *spanImpl) Lock() {
- s.Mutex.Lock()
- s.maybeAssertSanityLocked()
-}
-
-func (s *spanImpl) maybeAssertSanityLocked() {
- if s.tracer == nil {
- s.Mutex.Unlock()
- panic(&errAssertionFailed{span: s, msg: "span used after call to Finish()"})
- }
- if s.tracer.options.debugAssertSingleGoroutine {
- startID := curGoroutineID()
- curID, ok := s.raw.Tags[debugGoroutineIDTag].(uint64)
- if !ok {
- // This is likely invoked in the context of the SetTag which sets
- // debugGoroutineTag.
- return
- }
- if startID != curID {
- s.Mutex.Unlock()
- panic(&errAssertionFailed{
- span: s,
- msg: fmt.Sprintf("span started on goroutine %d, but now running on %d", startID, curID),
- })
- }
- }
-}
-
-var goroutineSpace = []byte("goroutine ")
-var littleBuf = sync.Pool{
- New: func() interface{} {
- buf := make([]byte, 64)
- return &buf
- },
-}
-
-// Credit to @bradfitz:
-// https://github.com/golang/net/blob/master/http2/gotrack.go#L51
-func curGoroutineID() uint64 {
- bp := littleBuf.Get().(*[]byte)
- defer littleBuf.Put(bp)
- b := *bp
- b = b[:runtime.Stack(b, false)]
- // Parse the 4707 out of "goroutine 4707 ["
- b = bytes.TrimPrefix(b, goroutineSpace)
- i := bytes.IndexByte(b, ' ')
- if i < 0 {
- panic(fmt.Sprintf("No space found in %q", b))
- }
- b = b[:i]
- n, err := strconv.ParseUint(string(b), 10, 64)
- if err != nil {
- panic(fmt.Sprintf("Failed to parse goroutine ID out of %q: %v", b, err))
- }
- return n
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go
deleted file mode 100644
index 31b6a009edd..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/event.go
+++ /dev/null
@@ -1,62 +0,0 @@
-package zipkintracer
-
-import "github.com/opentracing/opentracing-go"
-
-// A SpanEvent is emitted when a mutating command is called on a Span.
-type SpanEvent interface{}
-
-// EventCreate is emitted when a Span is created.
-type EventCreate struct{ OperationName string }
-
-// EventTag is received when SetTag is called.
-type EventTag struct {
- Key string
- Value interface{}
-}
-
-// EventBaggage is received when SetBaggageItem is called.
-type EventBaggage struct {
- Key, Value string
-}
-
-// EventLogFields is received when LogFields or LogKV is called.
-type EventLogFields opentracing.LogRecord
-
-// EventLog is received when Log (or one of its derivatives) is called.
-//
-// DEPRECATED
-type EventLog opentracing.LogData
-
-// EventFinish is received when Finish is called.
-type EventFinish RawSpan
-
-func (s *spanImpl) onCreate(opName string) {
- if s.event != nil {
- s.event(EventCreate{OperationName: opName})
- }
-}
-func (s *spanImpl) onTag(key string, value interface{}) {
- if s.event != nil {
- s.event(EventTag{Key: key, Value: value})
- }
-}
-func (s *spanImpl) onLog(ld opentracing.LogData) {
- if s.event != nil {
- s.event(EventLog(ld))
- }
-}
-func (s *spanImpl) onLogFields(lr opentracing.LogRecord) {
- if s.event != nil {
- s.event(EventLogFields(lr))
- }
-}
-func (s *spanImpl) onBaggage(key, value string) {
- if s.event != nil {
- s.event(EventBaggage{Key: key, Value: value})
- }
-}
-func (s *spanImpl) onFinish(sp RawSpan) {
- if s.event != nil {
- s.event(EventFinish(sp))
- }
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go
deleted file mode 100644
index 05cb10ea391..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/flag/flags.go
+++ /dev/null
@@ -1,39 +0,0 @@
-package flag
-
-// Flags provides the ability to create and communicate feature flags.
-type Flags uint64
-
-// Flags is a bitset
-const (
- Debug Flags = 1 << 0
-
- // All flags below deal with binaryPropagators. They will be discarded in the
- // textMapPropagator (not read and not set)
-
- // SamplingSet and Sampled handle Sampled tribool logic for interop with
- // instrumenting libraries / propagation channels not using a separate Sampled
- // header and potentially encoding this in flags.
- //
- // When we receive a flag we do this:
- // 1. Sampled bit is set => true
- // 2. Sampled bit is not set => inspect SamplingSet bit.
- // 2a. SamplingSet bit is set => false
- // 2b. SamplingSet bit is not set => null
- // Note on 2b.: depending on the propagator having a separate Sampled header
- // we either assume Sampling is false or unknown. In the latter case we will
- // run our sampler even though we are not the root of the trace.
- //
- // When propagating to a downstream service we will always be explicit and
- // will provide a set SamplingSet bit in case of our binary propagator either
- SamplingSet Flags = 1 << 1
- Sampled Flags = 1 << 2
- // When set, we can ignore the value of the parentId. This is used for binary
- // fixed width transports or transports like proto3 that return a default
- // value if a value has not been set (thus not enabling you to distinguish
- // between the value being set to the default or not set at all).
- //
- // While many zipkin systems re-use a trace id as the root span id, we know
- // that some don't. With this flag, we can tell for sure if the span is root
- // as opposed to the convention trace id == span id == parent id.
- IsRoot Flags = 1 << 3
-)
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go
deleted file mode 100644
index f5695e0e27c..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/log-materializers.go
+++ /dev/null
@@ -1,113 +0,0 @@
-// Copyright (c) 2016 Uber Technologies, Inc.
-// Copyright (c) 2016 Bas van Beek
-
-// Permission is hereby granted, free of charge, to any person obtaining a copy
-// of this software and associated documentation files (the "Software"), to deal
-// in the Software without restriction, including without limitation the rights
-// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-// copies of the Software, and to permit persons to whom the Software is
-// furnished to do so, subject to the following conditions:
-//
-// The above copyright notice and this permission notice shall be included in
-// all copies or substantial portions of the Software.
-//
-// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
-// THE SOFTWARE.
-
-package zipkintracer
-
-import (
- "bytes"
- "encoding/json"
- "errors"
- "fmt"
-
- "github.com/go-logfmt/logfmt"
- "github.com/opentracing/opentracing-go/log"
-)
-
-var errEventLogNotFound = errors.New("event log field not found")
-
-type fieldsAsMap map[string]string
-
-// MaterializeWithJSON converts log Fields into JSON string
-func MaterializeWithJSON(logFields []log.Field) ([]byte, error) {
- fields := fieldsAsMap(make(map[string]string, len(logFields)))
- for _, field := range logFields {
- field.Marshal(fields)
- }
- return json.Marshal(fields)
-}
-
-// MaterializeWithLogFmt converts log Fields into LogFmt string
-func MaterializeWithLogFmt(logFields []log.Field) ([]byte, error) {
- var (
- buffer = bytes.NewBuffer(nil)
- encoder = logfmt.NewEncoder(buffer)
- )
- for _, field := range logFields {
- if err := encoder.EncodeKeyval(field.Key(), field.Value()); err != nil {
- encoder.EncodeKeyval(field.Key(), err.Error())
- }
- }
- return buffer.Bytes(), nil
-}
-
-// StrictZipkinMaterializer will only record a log.Field of type "event".
-func StrictZipkinMaterializer(logFields []log.Field) ([]byte, error) {
- for _, field := range logFields {
- if field.Key() == "event" {
- return []byte(fmt.Sprintf("%+v", field.Value())), nil
- }
- }
- return nil, errEventLogNotFound
-}
-
-func (ml fieldsAsMap) EmitString(key, value string) {
- ml[key] = value
-}
-
-func (ml fieldsAsMap) EmitBool(key string, value bool) {
- ml[key] = fmt.Sprintf("%t", value)
-}
-
-func (ml fieldsAsMap) EmitInt(key string, value int) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt32(key string, value int32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitInt64(key string, value int64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint32(key string, value uint32) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitUint64(key string, value uint64) {
- ml[key] = fmt.Sprintf("%d", value)
-}
-
-func (ml fieldsAsMap) EmitFloat32(key string, value float32) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitFloat64(key string, value float64) {
- ml[key] = fmt.Sprintf("%f", value)
-}
-
-func (ml fieldsAsMap) EmitObject(key string, value interface{}) {
- ml[key] = fmt.Sprintf("%+v", value)
-}
-
-func (ml fieldsAsMap) EmitLazyLogger(value log.LazyLogger) {
- value(ml)
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go
deleted file mode 100644
index c6f223a9785..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/logger.go
+++ /dev/null
@@ -1,63 +0,0 @@
-package zipkintracer
-
-import (
- "errors"
- "fmt"
- "log"
- "strings"
-)
-
-// ErrMissingValue adds a Missing Value Error when the Logging Parameters are
-// not even in number
-var ErrMissingValue = errors.New("(MISSING)")
-
-// Logger interface used by this package.
-// This means that we accept Go kit Log compatible loggers
-type Logger interface {
- Log(keyvals ...interface{}) error
-}
-
-// NewNopLogger provides a Logger that discards all Log data sent to it.
-func NewNopLogger() Logger {
- return &nopLogger{}
-}
-
-// LogWrapper wraps a standard library logger into a Logger compatible with this
-// package.
-func LogWrapper(l *log.Logger) Logger {
- return &wrappedLogger{l: l}
-}
-
-// wrappedLogger implements Logger
-type wrappedLogger struct {
- l *log.Logger
-}
-
-// Log implements Logger
-func (l *wrappedLogger) Log(k ...interface{}) error {
- if len(k)%2 == 1 {
- k = append(k, ErrMissingValue)
- }
- o := make([]string, len(k)/2)
- for i := 0; i < len(k); i += 2 {
- o[i/2] = fmt.Sprintf("%s=%q", k[i], k[i+1])
- }
- l.l.Println(strings.Join(o, " "))
- return nil
-}
-
-// nopLogger implements Logger
-type nopLogger struct{}
-
-// Log implements Logger
-func (*nopLogger) Log(_ ...interface{}) error { return nil }
-
-// LoggerFunc is an adapter to allow use of ordinary functions as Loggers. If
-// f is a function with the appropriate signature, LoggerFunc(f) is a Logger
-// object that calls f.
-type LoggerFunc func(...interface{}) error
-
-// Log implements Logger by calling f(keyvals...).
-func (f LoggerFunc) Log(keyvals ...interface{}) error {
- return f(keyvals...)
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go
deleted file mode 100644
index 56d2d5aa3db..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation.go
+++ /dev/null
@@ -1,68 +0,0 @@
-package zipkintracer
-
-import (
- opentracing "github.com/opentracing/opentracing-go"
-
- "github.com/openzipkin/zipkin-go-opentracing/flag"
- "github.com/openzipkin/zipkin-go-opentracing/types"
-)
-
-type accessorPropagator struct {
- tracer *tracerImpl
-}
-
-// DelegatingCarrier is a flexible carrier interface which can be implemented
-// by types which have a means of storing the trace metadata and already know
-// how to serialize themselves (for example, protocol buffers).
-type DelegatingCarrier interface {
- SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
- State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags)
- SetBaggageItem(key, value string)
- GetBaggage(func(key, value string))
-}
-
-func (p *accessorPropagator) Inject(
- spanContext opentracing.SpanContext,
- carrier interface{},
-) error {
- dc, ok := carrier.(DelegatingCarrier)
- if !ok || dc == nil {
- return opentracing.ErrInvalidCarrier
- }
- sc, ok := spanContext.(SpanContext)
- if !ok {
- return opentracing.ErrInvalidSpanContext
- }
- dc.SetState(sc.TraceID, sc.SpanID, sc.ParentSpanID, sc.Sampled, sc.Flags)
- for k, v := range sc.Baggage {
- dc.SetBaggageItem(k, v)
- }
- return nil
-}
-
-func (p *accessorPropagator) Extract(
- carrier interface{},
-) (opentracing.SpanContext, error) {
- dc, ok := carrier.(DelegatingCarrier)
- if !ok || dc == nil {
- return nil, opentracing.ErrInvalidCarrier
- }
-
- traceID, spanID, parentSpanID, sampled, flags := dc.State()
- sc := SpanContext{
- TraceID: traceID,
- SpanID: spanID,
- Sampled: sampled,
- Baggage: nil,
- ParentSpanID: parentSpanID,
- Flags: flags,
- }
- dc.GetBaggage(func(k, v string) {
- if sc.Baggage == nil {
- sc.Baggage = map[string]string{}
- }
- sc.Baggage[k] = v
- })
-
- return sc, nil
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go
deleted file mode 100644
index 2142d5e3241..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/propagation_ot.go
+++ /dev/null
@@ -1,252 +0,0 @@
-package zipkintracer
-
-import (
- "encoding/binary"
- "io"
- "strconv"
- "strings"
-
- "github.com/gogo/protobuf/proto"
- opentracing "github.com/opentracing/opentracing-go"
-
- "github.com/openzipkin/zipkin-go-opentracing/flag"
- "github.com/openzipkin/zipkin-go-opentracing/types"
- "github.com/openzipkin/zipkin-go-opentracing/wire"
-)
-
-type textMapPropagator struct {
- tracer *tracerImpl
-}
-type binaryPropagator struct {
- tracer *tracerImpl
-}
-
-const (
- prefixTracerState = "x-b3-" // we default to interop with non-opentracing zipkin tracers
- prefixBaggage = "ot-baggage-"
-
- tracerStateFieldCount = 3 // not 5, X-B3-ParentSpanId is optional and we allow optional Sampled header
- zipkinTraceID = prefixTracerState + "traceid"
- zipkinSpanID = prefixTracerState + "spanid"
- zipkinParentSpanID = prefixTracerState + "parentspanid"
- zipkinSampled = prefixTracerState + "sampled"
- zipkinFlags = prefixTracerState + "flags"
-)
-
-func (p *textMapPropagator) Inject(
- spanContext opentracing.SpanContext,
- opaqueCarrier interface{},
-) error {
- sc, ok := spanContext.(SpanContext)
- if !ok {
- return opentracing.ErrInvalidSpanContext
- }
- carrier, ok := opaqueCarrier.(opentracing.TextMapWriter)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
- carrier.Set(zipkinTraceID, sc.TraceID.ToHex())
- carrier.Set(zipkinSpanID, strconv.FormatUint(sc.SpanID, 16))
- carrier.Set(zipkinSampled, strconv.FormatBool(sc.Sampled))
-
- if sc.ParentSpanID != nil {
- // we only set ParentSpanID header if there is a parent span
- carrier.Set(zipkinParentSpanID, strconv.FormatUint(*sc.ParentSpanID, 16))
- }
- // we only need to inject the debug flag if set. see flag package for details.
- flags := sc.Flags & flag.Debug
- carrier.Set(zipkinFlags, strconv.FormatUint(uint64(flags), 10))
-
- for k, v := range sc.Baggage {
- carrier.Set(prefixBaggage+k, v)
- }
- return nil
-}
-
-func (p *textMapPropagator) Extract(
- opaqueCarrier interface{},
-) (opentracing.SpanContext, error) {
- carrier, ok := opaqueCarrier.(opentracing.TextMapReader)
- if !ok {
- return nil, opentracing.ErrInvalidCarrier
- }
- requiredFieldCount := 0
- var (
- traceID types.TraceID
- spanID uint64
- sampled bool
- parentSpanID *uint64
- flags flag.Flags
- err error
- )
- decodedBaggage := make(map[string]string)
- err = carrier.ForeachKey(func(k, v string) error {
- switch strings.ToLower(k) {
- case zipkinTraceID:
- traceID, err = types.TraceIDFromHex(v)
- if err != nil {
- return opentracing.ErrSpanContextCorrupted
- }
- case zipkinSpanID:
- spanID, err = strconv.ParseUint(v, 16, 64)
- if err != nil {
- return opentracing.ErrSpanContextCorrupted
- }
- case zipkinParentSpanID:
- var id uint64
- id, err = strconv.ParseUint(v, 16, 64)
- if err != nil {
- return opentracing.ErrSpanContextCorrupted
- }
- parentSpanID = &id
- case zipkinSampled:
- sampled, err = strconv.ParseBool(v)
- if err != nil {
- return opentracing.ErrSpanContextCorrupted
- }
- // Sampled header was explicitly set
- flags |= flag.SamplingSet
- case zipkinFlags:
- var f uint64
- f, err = strconv.ParseUint(v, 10, 64)
- if err != nil {
- return opentracing.ErrSpanContextCorrupted
- }
- if flag.Flags(f)&flag.Debug == flag.Debug {
- flags |= flag.Debug
- }
- default:
- lowercaseK := strings.ToLower(k)
- if strings.HasPrefix(lowercaseK, prefixBaggage) {
- decodedBaggage[strings.TrimPrefix(lowercaseK, prefixBaggage)] = v
- }
- // Balance off the requiredFieldCount++ just below...
- requiredFieldCount--
- }
- requiredFieldCount++
- return nil
- })
- if err != nil {
- return nil, err
- }
- if requiredFieldCount < tracerStateFieldCount {
- if requiredFieldCount == 0 {
- return nil, opentracing.ErrSpanContextNotFound
- }
- return nil, opentracing.ErrSpanContextCorrupted
- }
-
- // check if Sample state was communicated through the Flags bitset
- if !sampled && flags&flag.Sampled == flag.Sampled {
- sampled = true
- }
-
- return SpanContext{
- TraceID: traceID,
- SpanID: spanID,
- Sampled: sampled,
- Baggage: decodedBaggage,
- ParentSpanID: parentSpanID,
- Flags: flags,
- }, nil
-}
-
-func (p *binaryPropagator) Inject(
- spanContext opentracing.SpanContext,
- opaqueCarrier interface{},
-) error {
- sc, ok := spanContext.(SpanContext)
- if !ok {
- return opentracing.ErrInvalidSpanContext
- }
- carrier, ok := opaqueCarrier.(io.Writer)
- if !ok {
- return opentracing.ErrInvalidCarrier
- }
-
- state := wire.TracerState{}
- state.TraceId = sc.TraceID.Low
- state.TraceIdHigh = sc.TraceID.High
- state.SpanId = sc.SpanID
- state.Sampled = sc.Sampled
- state.BaggageItems = sc.Baggage
-
- // encode the debug bit
- flags := sc.Flags & flag.Debug
- if sc.ParentSpanID != nil {
- state.ParentSpanId = *sc.ParentSpanID
- } else {
- // root span...
- state.ParentSpanId = 0
- flags |= flag.IsRoot
- }
-
- // we explicitly inform our sampling state downstream
- flags |= flag.SamplingSet
- if sc.Sampled {
- flags |= flag.Sampled
- }
- state.Flags = uint64(flags)
-
- b, err := proto.Marshal(&state)
- if err != nil {
- return err
- }
-
- // Write the length of the marshalled binary to the writer.
- length := uint32(len(b))
- if err = binary.Write(carrier, binary.BigEndian, &length); err != nil {
- return err
- }
-
- _, err = carrier.Write(b)
- return err
-}
-
-func (p *binaryPropagator) Extract(
- opaqueCarrier interface{},
-) (opentracing.SpanContext, error) {
- carrier, ok := opaqueCarrier.(io.Reader)
- if !ok {
- return nil, opentracing.ErrInvalidCarrier
- }
-
- // Read the length of marshalled binary. io.ReadAll isn't that performant
- // since it keeps resizing the underlying buffer as it encounters more bytes
- // to read. By reading the length, we can allocate a fixed sized buf and read
- // the exact amount of bytes into it.
- var length uint32
- if err := binary.Read(carrier, binary.BigEndian, &length); err != nil {
- return nil, opentracing.ErrSpanContextCorrupted
- }
- buf := make([]byte, length)
- if n, err := carrier.Read(buf); err != nil {
- if n > 0 {
- return nil, opentracing.ErrSpanContextCorrupted
- }
- return nil, opentracing.ErrSpanContextNotFound
- }
-
- ctx := wire.TracerState{}
- if err := proto.Unmarshal(buf, &ctx); err != nil {
- return nil, opentracing.ErrSpanContextCorrupted
- }
-
- flags := flag.Flags(ctx.Flags)
- if flags&flag.Sampled == flag.Sampled {
- ctx.Sampled = true
- }
- // this propagator expects sampling state to be explicitly propagated by the
- // upstream service. so set this flag to indentify to tracer it should not
- // run its sampler in case it is not the root of the trace.
- flags |= flag.SamplingSet
-
- return SpanContext{
- TraceID: types.TraceID{Low: ctx.TraceId, High: ctx.TraceIdHigh},
- SpanID: ctx.SpanId,
- Sampled: ctx.Sampled,
- Baggage: ctx.BaggageItems,
- ParentSpanID: &ctx.ParentSpanId,
- Flags: flags,
- }, nil
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go
deleted file mode 100644
index 03bc15b237d..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/raw.go
+++ /dev/null
@@ -1,30 +0,0 @@
-package zipkintracer
-
-import (
- "time"
-
- opentracing "github.com/opentracing/opentracing-go"
-)
-
-// RawSpan encapsulates all state associated with a (finished) Span.
-type RawSpan struct {
- // Those recording the RawSpan should also record the contents of its
- // SpanContext.
- Context SpanContext
-
- // The name of the "operation" this span is an instance of. (Called a "span
- // name" in some implementations)
- Operation string
-
- // We store rather than so that only
- // one of the timestamps has global clock uncertainty issues.
- Start time.Time
- Duration time.Duration
-
- // Essentially an extension mechanism. Can be used for many purposes,
- // not to be enumerated here.
- Tags opentracing.Tags
-
- // The span's "microlog".
- Logs []opentracing.LogRecord
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go
deleted file mode 100644
index 0b8eeb7fc5f..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/recorder.go
+++ /dev/null
@@ -1,60 +0,0 @@
-package zipkintracer
-
-import "sync"
-
-// A SpanRecorder handles all of the `RawSpan` data generated via an
-// associated `Tracer` (see `NewStandardTracer`) instance. It also names
-// the containing process and provides access to a straightforward tag map.
-type SpanRecorder interface {
- // Implementations must determine whether and where to store `span`.
- RecordSpan(span RawSpan)
-}
-
-// InMemorySpanRecorder is a simple thread-safe implementation of
-// SpanRecorder that stores all reported spans in memory, accessible
-// via reporter.GetSpans(). It is primarily intended for testing purposes.
-type InMemorySpanRecorder struct {
- sync.RWMutex
- spans []RawSpan
-}
-
-// NewInMemoryRecorder creates new InMemorySpanRecorder
-func NewInMemoryRecorder() *InMemorySpanRecorder {
- return new(InMemorySpanRecorder)
-}
-
-// RecordSpan implements the respective method of SpanRecorder.
-func (r *InMemorySpanRecorder) RecordSpan(span RawSpan) {
- r.Lock()
- defer r.Unlock()
- r.spans = append(r.spans, span)
-}
-
-// GetSpans returns a copy of the array of spans accumulated so far.
-func (r *InMemorySpanRecorder) GetSpans() []RawSpan {
- r.RLock()
- defer r.RUnlock()
- spans := make([]RawSpan, len(r.spans))
- copy(spans, r.spans)
- return spans
-}
-
-// GetSampledSpans returns a slice of spans accumulated so far which were sampled.
-func (r *InMemorySpanRecorder) GetSampledSpans() []RawSpan {
- r.RLock()
- defer r.RUnlock()
- spans := make([]RawSpan, 0, len(r.spans))
- for _, span := range r.spans {
- if span.Context.Sampled {
- spans = append(spans, span)
- }
- }
- return spans
-}
-
-// Reset clears the internal array of spans.
-func (r *InMemorySpanRecorder) Reset() {
- r.Lock()
- defer r.Unlock()
- r.spans = nil
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go
deleted file mode 100644
index bb7ff0a5361..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/sample.go
+++ /dev/null
@@ -1,99 +0,0 @@
-package zipkintracer
-
-import (
- "math"
- "math/rand"
- "sync"
- "time"
-)
-
-// Sampler functions return if a Zipkin span should be sampled, based on its
-// traceID.
-type Sampler func(id uint64) bool
-
-func neverSample(_ uint64) bool { return false }
-
-func alwaysSample(_ uint64) bool { return true }
-
-// ModuloSampler provides a typical OpenTracing type Sampler.
-func ModuloSampler(mod uint64) Sampler {
- if mod < 2 {
- return alwaysSample
- }
- return func(id uint64) bool {
- return (id % mod) == 0
- }
-}
-
-// NewBoundarySampler is appropriate for high-traffic instrumentation who
-// provision random trace ids, and make the sampling decision only once.
-// It defends against nodes in the cluster selecting exactly the same ids.
-func NewBoundarySampler(rate float64, salt int64) Sampler {
- if rate <= 0 {
- return neverSample
- }
- if rate >= 1.0 {
- return alwaysSample
- }
- var (
- boundary = int64(rate * 10000)
- usalt = uint64(salt)
- )
- return func(id uint64) bool {
- return int64(math.Abs(float64(id^usalt)))%10000 < boundary
- }
-}
-
-// NewCountingSampler is appropriate for low-traffic instrumentation or
-// those who do not provision random trace ids. It is not appropriate for
-// collectors as the sampling decision isn't idempotent (consistent based
-// on trace id).
-func NewCountingSampler(rate float64) Sampler {
- if rate <= 0 {
- return neverSample
- }
- if rate >= 1.0 {
- return alwaysSample
- }
- var (
- i = 0
- outOf100 = int(rate*100 + math.Copysign(0.5, rate*100)) // for rounding float to int conversion instead of truncation
- decisions = randomBitSet(100, outOf100, rand.New(rand.NewSource(time.Now().UnixNano())))
- mtx = &sync.Mutex{}
- )
-
- return func(_ uint64) bool {
- mtx.Lock()
- defer mtx.Unlock()
- result := decisions[i]
- i++
- if i == 100 {
- i = 0
- }
- return result
- }
-}
-
-/**
- * Reservoir sampling algorithm borrowed from Stack Overflow.
- *
- * http://stackoverflow.com/questions/12817946/generate-a-random-bitset-with-n-1s
- */
-func randomBitSet(size int, cardinality int, rnd *rand.Rand) []bool {
- result := make([]bool, size)
- chosen := make([]int, cardinality)
- var i int
- for i = 0; i < cardinality; i++ {
- chosen[i] = i
- result[i] = true
- }
- for ; i < size; i++ {
- j := rnd.Intn(i + 1)
- if j < cardinality {
- result[chosen[j]] = false
- result[i] = true
- chosen[j] = i
- }
- }
- return result
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go
deleted file mode 100644
index 7fa78a9d623..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/span.go
+++ /dev/null
@@ -1,277 +0,0 @@
-package zipkintracer
-
-import (
- "sync"
- "time"
-
- opentracing "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
- "github.com/opentracing/opentracing-go/log"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// Span provides access to the essential details of the span, for use
-// by zipkintracer consumers. These methods may only be called prior
-// to (*opentracing.Span).Finish().
-type Span interface {
- opentracing.Span
-
- // Operation names the work done by this span instance
- Operation() string
-
- // Start indicates when the span began
- Start() time.Time
-}
-
-// Implements the `Span` interface. Created via tracerImpl (see
-// `zipkintracer.NewTracer()`).
-type spanImpl struct {
- tracer *tracerImpl
- event func(SpanEvent)
- sync.Mutex // protects the fields below
- raw RawSpan
- // The number of logs dropped because of MaxLogsPerSpan.
- numDroppedLogs int
- Endpoint *zipkincore.Endpoint
-}
-
-var spanPool = &sync.Pool{New: func() interface{} {
- return &spanImpl{}
-}}
-
-func (s *spanImpl) reset() {
- s.tracer, s.event = nil, nil
- // Note: Would like to do the following, but then the consumer of RawSpan
- // (the recorder) needs to make sure that they're not holding on to the
- // baggage or logs when they return (i.e. they need to copy if they care):
- //
- // logs, baggage := s.raw.Logs[:0], s.raw.Baggage
- // for k := range baggage {
- // delete(baggage, k)
- // }
- // s.raw.Logs, s.raw.Baggage = logs, baggage
- //
- // That's likely too much to ask for. But there is some magic we should
- // be able to do with `runtime.SetFinalizer` to reclaim that memory into
- // a buffer pool when GC considers them unreachable, which should ease
- // some of the load. Hard to say how quickly that would be in practice
- // though.
- s.raw = RawSpan{
- Context: SpanContext{},
- }
-}
-
-func (s *spanImpl) SetOperationName(operationName string) opentracing.Span {
- s.Lock()
- defer s.Unlock()
- s.raw.Operation = operationName
- return s
-}
-
-func (s *spanImpl) trim() bool {
- return !s.raw.Context.Sampled && s.tracer.options.trimUnsampledSpans
-}
-
-func (s *spanImpl) SetTag(key string, value interface{}) opentracing.Span {
- defer s.onTag(key, value)
- s.Lock()
- defer s.Unlock()
- if key == string(ext.SamplingPriority) {
- if v, ok := value.(uint16); ok {
- s.raw.Context.Sampled = v != 0
- return s
- }
- }
- if s.trim() {
- return s
- }
-
- if s.raw.Tags == nil {
- s.raw.Tags = opentracing.Tags{}
- }
- s.raw.Tags[key] = value
- return s
-}
-
-func (s *spanImpl) LogKV(keyValues ...interface{}) {
- fields, err := log.InterleavedKVToFields(keyValues...)
- if err != nil {
- s.LogFields(log.Error(err), log.String("function", "LogKV"))
- return
- }
- s.LogFields(fields...)
-}
-
-func (s *spanImpl) appendLog(lr opentracing.LogRecord) {
- maxLogs := s.tracer.options.maxLogsPerSpan
- if maxLogs == 0 || len(s.raw.Logs) < maxLogs {
- s.raw.Logs = append(s.raw.Logs, lr)
- return
- }
-
- // We have too many logs. We don't touch the first numOld logs; we treat the
- // rest as a circular buffer and overwrite the oldest log among those.
- numOld := (maxLogs - 1) / 2
- numNew := maxLogs - numOld
- s.raw.Logs[numOld+s.numDroppedLogs%numNew] = lr
- s.numDroppedLogs++
-}
-
-func (s *spanImpl) LogFields(fields ...log.Field) {
- lr := opentracing.LogRecord{
- Fields: fields,
- }
- defer s.onLogFields(lr)
- s.Lock()
- defer s.Unlock()
- if s.trim() || s.tracer.options.dropAllLogs {
- return
- }
- if lr.Timestamp.IsZero() {
- lr.Timestamp = time.Now()
- }
- s.appendLog(lr)
-}
-
-func (s *spanImpl) LogEvent(event string) {
- s.Log(opentracing.LogData{
- Event: event,
- })
-}
-
-func (s *spanImpl) LogEventWithPayload(event string, payload interface{}) {
- s.Log(opentracing.LogData{
- Event: event,
- Payload: payload,
- })
-}
-
-func (s *spanImpl) Log(ld opentracing.LogData) {
- defer s.onLog(ld)
- s.Lock()
- defer s.Unlock()
- if s.trim() || s.tracer.options.dropAllLogs {
- return
- }
-
- if ld.Timestamp.IsZero() {
- ld.Timestamp = time.Now()
- }
-
- s.appendLog(ld.ToLogRecord())
-}
-
-func (s *spanImpl) Finish() {
- s.FinishWithOptions(opentracing.FinishOptions{})
-}
-
-// rotateLogBuffer rotates the records in the buffer: records 0 to pos-1 move at
-// the end (i.e. pos circular left shifts).
-func rotateLogBuffer(buf []opentracing.LogRecord, pos int) {
- // This algorithm is described in:
- // http://www.cplusplus.com/reference/algorithm/rotate
- for first, middle, next := 0, pos, pos; first != middle; {
- buf[first], buf[next] = buf[next], buf[first]
- first++
- next++
- if next == len(buf) {
- next = middle
- } else if first == middle {
- middle = next
- }
- }
-}
-
-func (s *spanImpl) FinishWithOptions(opts opentracing.FinishOptions) {
- finishTime := opts.FinishTime
- if finishTime.IsZero() {
- finishTime = time.Now()
- }
- duration := finishTime.Sub(s.raw.Start)
-
- s.Lock()
- defer s.Unlock()
-
- for _, lr := range opts.LogRecords {
- s.appendLog(lr)
- }
- for _, ld := range opts.BulkLogData {
- s.appendLog(ld.ToLogRecord())
- }
-
- if s.numDroppedLogs > 0 {
- // We dropped some log events, which means that we used part of Logs as a
- // circular buffer (see appendLog). De-circularize it.
- numOld := (len(s.raw.Logs) - 1) / 2
- numNew := len(s.raw.Logs) - numOld
- rotateLogBuffer(s.raw.Logs[numOld:], s.numDroppedLogs%numNew)
-
- // Replace the log in the middle (the oldest "new" log) with information
- // about the dropped logs. This means that we are effectively dropping one
- // more "new" log.
- numDropped := s.numDroppedLogs + 1
- s.raw.Logs[numOld] = opentracing.LogRecord{
- // Keep the timestamp of the last dropped event.
- Timestamp: s.raw.Logs[numOld].Timestamp,
- Fields: []log.Field{
- log.String("event", "dropped Span logs"),
- log.Int("dropped_log_count", numDropped),
- log.String("component", "zipkintracer"),
- },
- }
- }
-
- s.raw.Duration = duration
-
- s.onFinish(s.raw)
- s.tracer.options.recorder.RecordSpan(s.raw)
-
- // Last chance to get options before the span is possibly reset.
- poolEnabled := s.tracer.options.enableSpanPool
- if s.tracer.options.debugAssertUseAfterFinish {
- // This makes it much more likely to catch a panic on any subsequent
- // operation since s.tracer is accessed on every call to `Lock`.
- // We don't call `reset()` here to preserve the logs in the Span
- // which are printed when the assertion triggers.
- s.tracer = nil
- }
-
- if poolEnabled {
- spanPool.Put(s)
- }
-}
-
-func (s *spanImpl) Tracer() opentracing.Tracer {
- return s.tracer
-}
-
-func (s *spanImpl) Context() opentracing.SpanContext {
- return s.raw.Context
-}
-
-func (s *spanImpl) SetBaggageItem(key, val string) opentracing.Span {
- s.onBaggage(key, val)
- if s.trim() {
- return s
- }
-
- s.Lock()
- defer s.Unlock()
- s.raw.Context = s.raw.Context.WithBaggageItem(key, val)
- return s
-}
-
-func (s *spanImpl) BaggageItem(key string) string {
- s.Lock()
- defer s.Unlock()
- return s.raw.Context.Baggage[key]
-}
-
-func (s *spanImpl) Operation() string {
- return s.raw.Operation
-}
-
-func (s *spanImpl) Start() time.Time {
- return s.raw.Start
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go
deleted file mode 100644
index e9c4f0cc6a5..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/tracer.go
+++ /dev/null
@@ -1,419 +0,0 @@
-package zipkintracer
-
-import (
- "errors"
- "time"
-
- opentracing "github.com/opentracing/opentracing-go"
- "github.com/opentracing/opentracing-go/ext"
-
- "github.com/openzipkin/zipkin-go-opentracing/flag"
-)
-
-// ErrInvalidEndpoint will be thrown if hostPort parameter is corrupted or host
-// can't be resolved
-var ErrInvalidEndpoint = errors.New("Invalid Endpoint. Please check hostPort parameter")
-
-// Tracer extends the opentracing.Tracer interface with methods to
-// probe implementation state, for use by zipkintracer consumers.
-type Tracer interface {
- opentracing.Tracer
-
- // Options gets the Options used in New() or NewWithOptions().
- Options() TracerOptions
-}
-
-// TracerOptions allows creating a customized Tracer.
-type TracerOptions struct {
- // shouldSample is a function which is called when creating a new Span and
- // determines whether that Span is sampled. The randomized TraceID is supplied
- // to allow deterministic sampling decisions to be made across different nodes.
- shouldSample func(traceID uint64) bool
- // trimUnsampledSpans turns potentially expensive operations on unsampled
- // Spans into no-ops. More precisely, tags and log events are silently
- // discarded. If NewSpanEventListener is set, the callbacks will still fire.
- trimUnsampledSpans bool
- // recorder receives Spans which have been finished.
- recorder SpanRecorder
- // newSpanEventListener can be used to enhance the tracer by effectively
- // attaching external code to trace events. See NetTraceIntegrator for a
- // practical example, and event.go for the list of possible events.
- newSpanEventListener func() func(SpanEvent)
- // dropAllLogs turns log events on all Spans into no-ops.
- // If NewSpanEventListener is set, the callbacks will still fire.
- dropAllLogs bool
- // MaxLogsPerSpan limits the number of Logs in a span (if set to a nonzero
- // value). If a span has more logs than this value, logs are dropped as
- // necessary (and replaced with a log describing how many were dropped).
- //
- // About half of the MaxLogPerSpan logs kept are the oldest logs, and about
- // half are the newest logs.
- //
- // If NewSpanEventListener is set, the callbacks will still fire for all log
- // events. This value is ignored if DropAllLogs is true.
- maxLogsPerSpan int
- // debugAssertSingleGoroutine internally records the ID of the goroutine
- // creating each Span and verifies that no operation is carried out on
- // it on a different goroutine.
- // Provided strictly for development purposes.
- // Passing Spans between goroutine without proper synchronization often
- // results in use-after-Finish() errors. For a simple example, consider the
- // following pseudocode:
- //
- // func (s *Server) Handle(req http.Request) error {
- // sp := s.StartSpan("server")
- // defer sp.Finish()
- // wait := s.queueProcessing(opentracing.ContextWithSpan(context.Background(), sp), req)
- // select {
- // case resp := <-wait:
- // return resp.Error
- // case <-time.After(10*time.Second):
- // sp.LogEvent("timed out waiting for processing")
- // return ErrTimedOut
- // }
- // }
- //
- // This looks reasonable at first, but a request which spends more than ten
- // seconds in the queue is abandoned by the main goroutine and its trace
- // finished, leading to use-after-finish when the request is finally
- // processed. Note also that even joining on to a finished Span via
- // StartSpanWithOptions constitutes an illegal operation.
- //
- // Code bases which do not require (or decide they do not want) Spans to
- // be passed across goroutine boundaries can run with this flag enabled in
- // tests to increase their chances of spotting wrong-doers.
- debugAssertSingleGoroutine bool
- // debugAssertUseAfterFinish is provided strictly for development purposes.
- // When set, it attempts to exacerbate issues emanating from use of Spans
- // after calling Finish by running additional assertions.
- debugAssertUseAfterFinish bool
- // enableSpanPool enables the use of a pool, so that the tracer reuses spans
- // after Finish has been called on it. Adds a slight performance gain as it
- // reduces allocations. However, if you have any use-after-finish race
- // conditions the code may panic.
- enableSpanPool bool
- // logger ...
- logger Logger
- // clientServerSameSpan allows for Zipkin V1 style span per RPC. This places
- // both client end and server end of a RPC call into the same span.
- clientServerSameSpan bool
- // debugMode activates Zipkin's debug request allowing for all Spans originating
- // from this tracer to pass through and bypass sampling. Use with extreme care
- // as it might flood your system if you have many traces starting from the
- // service you are instrumenting.
- debugMode bool
- // traceID128Bit enables the generation of 128 bit traceIDs in case the tracer
- // needs to create a root span. By default regular 64 bit traceIDs are used.
- // Regardless of this setting, the library will propagate and support both
- // 64 and 128 bit incoming traces from upstream sources.
- traceID128Bit bool
-}
-
-// TracerOption allows for functional options.
-// See: http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis
-type TracerOption func(opts *TracerOptions) error
-
-// WithSampler allows one to add a Sampler function
-func WithSampler(sampler Sampler) TracerOption {
- return func(opts *TracerOptions) error {
- opts.shouldSample = sampler
- return nil
- }
-}
-
-// TrimUnsampledSpans option
-func TrimUnsampledSpans(trim bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.trimUnsampledSpans = trim
- return nil
- }
-}
-
-// DropAllLogs option
-func DropAllLogs(dropAllLogs bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.dropAllLogs = dropAllLogs
- return nil
- }
-}
-
-// WithLogger option
-func WithLogger(logger Logger) TracerOption {
- return func(opts *TracerOptions) error {
- opts.logger = logger
- return nil
- }
-}
-
-// DebugAssertSingleGoroutine option
-func DebugAssertSingleGoroutine(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.debugAssertSingleGoroutine = val
- return nil
- }
-}
-
-// DebugAssertUseAfterFinish option
-func DebugAssertUseAfterFinish(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.debugAssertUseAfterFinish = val
- return nil
- }
-}
-
-// TraceID128Bit option
-func TraceID128Bit(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.traceID128Bit = val
- return nil
- }
-}
-
-// ClientServerSameSpan allows to place client-side and server-side annotations
-// for a RPC call in the same span (Zipkin V1 behavior). By default this Tracer
-// uses single host spans (so client-side and server-side in separate spans).
-func ClientServerSameSpan(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.clientServerSameSpan = val
- return nil
- }
-}
-
-// DebugMode allows to set the tracer to Zipkin debug mode
-func DebugMode(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.debugMode = val
- return nil
- }
-}
-
-// EnableSpanPool ...
-func EnableSpanPool(val bool) TracerOption {
- return func(opts *TracerOptions) error {
- opts.enableSpanPool = val
- return nil
- }
-}
-
-// NewSpanEventListener option
-func NewSpanEventListener(f func() func(SpanEvent)) TracerOption {
- return func(opts *TracerOptions) error {
- opts.newSpanEventListener = f
- return nil
- }
-}
-
-// WithMaxLogsPerSpan option
-func WithMaxLogsPerSpan(limit int) TracerOption {
- return func(opts *TracerOptions) error {
- if limit < 5 || limit > 10000 {
- return errors.New("invalid MaxLogsPerSpan limit. Should be between 5 and 10000")
- }
- opts.maxLogsPerSpan = limit
- return nil
- }
-}
-
-// NewTracer creates a new OpenTracing compatible Zipkin Tracer.
-func NewTracer(recorder SpanRecorder, options ...TracerOption) (opentracing.Tracer, error) {
- opts := &TracerOptions{
- recorder: recorder,
- shouldSample: alwaysSample,
- trimUnsampledSpans: false,
- newSpanEventListener: func() func(SpanEvent) { return nil },
- logger: &nopLogger{},
- debugAssertSingleGoroutine: false,
- debugAssertUseAfterFinish: false,
- clientServerSameSpan: false,
- debugMode: false,
- traceID128Bit: false,
- maxLogsPerSpan: 10000,
- }
- for _, o := range options {
- err := o(opts)
- if err != nil {
- return nil, err
- }
- }
- rval := &tracerImpl{options: *opts}
- rval.textPropagator = &textMapPropagator{rval}
- rval.binaryPropagator = &binaryPropagator{rval}
- rval.accessorPropagator = &accessorPropagator{rval}
- return rval, nil
-}
-
-// Implements the `Tracer` interface.
-type tracerImpl struct {
- options TracerOptions
- textPropagator *textMapPropagator
- binaryPropagator *binaryPropagator
- accessorPropagator *accessorPropagator
-}
-
-func (t *tracerImpl) StartSpan(
- operationName string,
- opts ...opentracing.StartSpanOption,
-) opentracing.Span {
- sso := opentracing.StartSpanOptions{}
- for _, o := range opts {
- o.Apply(&sso)
- }
- return t.startSpanWithOptions(operationName, sso)
-}
-
-func (t *tracerImpl) getSpan() *spanImpl {
- if t.options.enableSpanPool {
- sp := spanPool.Get().(*spanImpl)
- sp.reset()
- return sp
- }
- return &spanImpl{}
-}
-
-func (t *tracerImpl) startSpanWithOptions(
- operationName string,
- opts opentracing.StartSpanOptions,
-) opentracing.Span {
- // Start time.
- startTime := opts.StartTime
- if startTime.IsZero() {
- startTime = time.Now()
- }
-
- // Tags.
- tags := opts.Tags
-
- // Build the new span. This is the only allocation: We'll return this as
- // an opentracing.Span.
- sp := t.getSpan()
- // Look for a parent in the list of References.
- //
- // TODO: would be nice if basictracer did something with all
- // References, not just the first one.
-ReferencesLoop:
- for _, ref := range opts.References {
- switch ref.Type {
- case opentracing.ChildOfRef:
- refCtx := ref.ReferencedContext.(SpanContext)
- sp.raw.Context.TraceID = refCtx.TraceID
- sp.raw.Context.ParentSpanID = &refCtx.SpanID
- sp.raw.Context.Sampled = refCtx.Sampled
- sp.raw.Context.Flags = refCtx.Flags
- sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
-
- if t.options.clientServerSameSpan &&
- tags[string(ext.SpanKind)] == ext.SpanKindRPCServer.Value {
- sp.raw.Context.SpanID = refCtx.SpanID
- sp.raw.Context.ParentSpanID = refCtx.ParentSpanID
- sp.raw.Context.Owner = false
- } else {
- sp.raw.Context.SpanID = randomID()
- sp.raw.Context.ParentSpanID = &refCtx.SpanID
- sp.raw.Context.Owner = true
- }
-
- if l := len(refCtx.Baggage); l > 0 {
- sp.raw.Context.Baggage = make(map[string]string, l)
- for k, v := range refCtx.Baggage {
- sp.raw.Context.Baggage[k] = v
- }
- }
- break ReferencesLoop
- case opentracing.FollowsFromRef:
- refCtx := ref.ReferencedContext.(SpanContext)
- sp.raw.Context.TraceID = refCtx.TraceID
- sp.raw.Context.ParentSpanID = &refCtx.SpanID
- sp.raw.Context.Sampled = refCtx.Sampled
- sp.raw.Context.Flags = refCtx.Flags
- sp.raw.Context.Flags &^= flag.IsRoot // unset IsRoot flag if needed
-
- sp.raw.Context.SpanID = randomID()
- sp.raw.Context.ParentSpanID = &refCtx.SpanID
- sp.raw.Context.Owner = true
-
- if l := len(refCtx.Baggage); l > 0 {
- sp.raw.Context.Baggage = make(map[string]string, l)
- for k, v := range refCtx.Baggage {
- sp.raw.Context.Baggage[k] = v
- }
- }
- break ReferencesLoop
- }
- }
- if sp.raw.Context.TraceID.Empty() {
- // No parent Span found; allocate new trace and span ids and determine
- // the Sampled status.
- if t.options.traceID128Bit {
- sp.raw.Context.TraceID.High = randomID()
- }
- sp.raw.Context.TraceID.Low, sp.raw.Context.SpanID = randomID2()
- sp.raw.Context.Sampled = t.options.shouldSample(sp.raw.Context.TraceID.Low)
- sp.raw.Context.Flags = flag.IsRoot
- sp.raw.Context.Owner = true
- }
- if t.options.debugMode {
- sp.raw.Context.Flags |= flag.Debug
- }
- return t.startSpanInternal(
- sp,
- operationName,
- startTime,
- tags,
- )
-}
-
-func (t *tracerImpl) startSpanInternal(
- sp *spanImpl,
- operationName string,
- startTime time.Time,
- tags opentracing.Tags,
-) opentracing.Span {
- sp.tracer = t
- if t.options.newSpanEventListener != nil {
- sp.event = t.options.newSpanEventListener()
- }
- sp.raw.Operation = operationName
- sp.raw.Start = startTime
- sp.raw.Duration = -1
- sp.raw.Tags = tags
- if t.options.debugAssertSingleGoroutine {
- sp.SetTag(debugGoroutineIDTag, curGoroutineID())
- }
- defer sp.onCreate(operationName)
- return sp
-}
-
-type delegatorType struct{}
-
-// Delegator is the format to use for DelegatingCarrier.
-var Delegator delegatorType
-
-func (t *tracerImpl) Inject(sc opentracing.SpanContext, format interface{}, carrier interface{}) error {
- switch format {
- case opentracing.TextMap, opentracing.HTTPHeaders:
- return t.textPropagator.Inject(sc, carrier)
- case opentracing.Binary:
- return t.binaryPropagator.Inject(sc, carrier)
- }
- if _, ok := format.(delegatorType); ok {
- return t.accessorPropagator.Inject(sc, carrier)
- }
- return opentracing.ErrUnsupportedFormat
-}
-
-func (t *tracerImpl) Extract(format interface{}, carrier interface{}) (opentracing.SpanContext, error) {
- switch format {
- case opentracing.TextMap, opentracing.HTTPHeaders:
- return t.textPropagator.Extract(carrier)
- case opentracing.Binary:
- return t.binaryPropagator.Extract(carrier)
- }
- if _, ok := format.(delegatorType); ok {
- return t.accessorPropagator.Extract(carrier)
- }
- return nil, opentracing.ErrUnsupportedFormat
-}
-
-func (t *tracerImpl) Options() TracerOptions {
- return t.options
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go
deleted file mode 100644
index 9e9771927cc..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/types/traceid.go
+++ /dev/null
@@ -1,40 +0,0 @@
-package types
-
-import (
- "fmt"
- "strconv"
-)
-
-// TraceID is a 128 bit number internally stored as 2x uint64 (high & low).
-type TraceID struct {
- High uint64
- Low uint64
-}
-
-// TraceIDFromHex returns the TraceID from a Hex string.
-func TraceIDFromHex(h string) (t TraceID, err error) {
- if len(h) > 16 {
- if t.High, err = strconv.ParseUint(h[0:len(h)-16], 16, 64); err != nil {
- return
- }
- t.Low, err = strconv.ParseUint(h[len(h)-16:], 16, 64)
- return
- }
- t.Low, err = strconv.ParseUint(h, 16, 64)
- return
-}
-
-// ToHex outputs the 128-bit traceID as hex string.
-func (t TraceID) ToHex() string {
- if t.High == 0 {
- return strconv.FormatUint(t.Low, 16)
- }
- return fmt.Sprintf(
- "%s%016s", strconv.FormatUint(t.High, 16), strconv.FormatUint(t.Low, 16),
- )
-}
-
-// Empty returns if TraceID has zero value
-func (t TraceID) Empty() bool {
- return t.Low == 0 && t.High == 0
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go
deleted file mode 100644
index 27066150222..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/util.go
+++ /dev/null
@@ -1,25 +0,0 @@
-package zipkintracer
-
-import (
- "math/rand"
- "sync"
- "time"
-)
-
-var (
- seededIDGen = rand.New(rand.NewSource(time.Now().UnixNano()))
- // The golang rand generators are *not* intrinsically thread-safe.
- seededIDLock sync.Mutex
-)
-
-func randomID() uint64 {
- seededIDLock.Lock()
- defer seededIDLock.Unlock()
- return uint64(seededIDGen.Int63())
-}
-
-func randomID2() (uint64, uint64) {
- seededIDLock.Lock()
- defer seededIDLock.Unlock()
- return uint64(seededIDGen.Int63()), uint64(seededIDGen.Int63())
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go
deleted file mode 100644
index 79364998ced..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/carrier.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package wire
-
-import (
- "github.com/openzipkin/zipkin-go-opentracing/flag"
- "github.com/openzipkin/zipkin-go-opentracing/types"
-)
-
-// ProtobufCarrier is a DelegatingCarrier that uses protocol buffers as the
-// the underlying datastructure. The reason for implementing DelagatingCarrier
-// is to allow for end users to serialize the underlying protocol buffers using
-// jsonpb or any other serialization forms they want.
-type ProtobufCarrier TracerState
-
-// SetState set's the tracer state.
-func (p *ProtobufCarrier) SetState(traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
- p.TraceId = traceID.Low
- p.TraceIdHigh = traceID.High
- p.SpanId = spanID
- if parentSpanID == nil {
- flags |= flag.IsRoot
- p.ParentSpanId = 0
- } else {
- flags &^= flag.IsRoot
- p.ParentSpanId = *parentSpanID
- }
- flags |= flag.SamplingSet
- if sampled {
- flags |= flag.Sampled
- p.Sampled = sampled
- } else {
- flags &^= flag.Sampled
- }
- p.Flags = uint64(flags)
-}
-
-// State returns the tracer state.
-func (p *ProtobufCarrier) State() (traceID types.TraceID, spanID uint64, parentSpanID *uint64, sampled bool, flags flag.Flags) {
- traceID.Low = p.TraceId
- traceID.High = p.TraceIdHigh
- spanID = p.SpanId
- sampled = p.Sampled
- flags = flag.Flags(p.Flags)
- if flags&flag.IsRoot == 0 {
- parentSpanID = &p.ParentSpanId
- }
- return traceID, spanID, parentSpanID, sampled, flags
-}
-
-// SetBaggageItem sets a baggage item.
-func (p *ProtobufCarrier) SetBaggageItem(key, value string) {
- if p.BaggageItems == nil {
- p.BaggageItems = map[string]string{key: value}
- return
- }
-
- p.BaggageItems[key] = value
-}
-
-// GetBaggage iterates over each baggage item and executes the callback with
-// the key:value pair.
-func (p *ProtobufCarrier) GetBaggage(f func(k, v string)) {
- for k, v := range p.BaggageItems {
- f(k, v)
- }
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go
deleted file mode 100644
index 86242a9fed5..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/gen.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package wire
-
-//go:generate protoc --gogofaster_out=$GOPATH/src/github.com/openzipkin/zipkin-go-opentracing/wire wire.proto
-
-// Run `go get github.com/gogo/protobuf/protoc-gen-gogofaster` to install the
-// gogofaster generator binary.
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go
deleted file mode 100644
index df2c1198989..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.pb.go
+++ /dev/null
@@ -1,647 +0,0 @@
-// Code generated by protoc-gen-gogo.
-// source: wire.proto
-// DO NOT EDIT!
-
-/*
- Package wire is a generated protocol buffer package.
-
- It is generated from these files:
- wire.proto
-
- It has these top-level messages:
- TracerState
-*/
-package wire
-
-import proto "github.com/gogo/protobuf/proto"
-import fmt "fmt"
-import math "math"
-
-import io "io"
-
-// Reference imports to suppress errors if they are not otherwise used.
-var _ = proto.Marshal
-var _ = fmt.Errorf
-var _ = math.Inf
-
-// This is a compile-time assertion to ensure that this generated file
-// is compatible with the proto package it is being compiled against.
-// A compilation error at this line likely means your copy of the
-// proto package needs to be updated.
-const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
-
-type TracerState struct {
- TraceId uint64 `protobuf:"fixed64,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"`
- SpanId uint64 `protobuf:"fixed64,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"`
- Sampled bool `protobuf:"varint,3,opt,name=sampled,proto3" json:"sampled,omitempty"`
- BaggageItems map[string]string `protobuf:"bytes,4,rep,name=baggage_items,json=baggageItems" json:"baggage_items,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
- TraceIdHigh uint64 `protobuf:"fixed64,20,opt,name=trace_id_high,json=traceIdHigh,proto3" json:"trace_id_high,omitempty"`
- ParentSpanId uint64 `protobuf:"fixed64,21,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"`
- Flags uint64 `protobuf:"fixed64,22,opt,name=flags,proto3" json:"flags,omitempty"`
-}
-
-func (m *TracerState) Reset() { *m = TracerState{} }
-func (m *TracerState) String() string { return proto.CompactTextString(m) }
-func (*TracerState) ProtoMessage() {}
-func (*TracerState) Descriptor() ([]byte, []int) { return fileDescriptorWire, []int{0} }
-
-func (m *TracerState) GetTraceId() uint64 {
- if m != nil {
- return m.TraceId
- }
- return 0
-}
-
-func (m *TracerState) GetSpanId() uint64 {
- if m != nil {
- return m.SpanId
- }
- return 0
-}
-
-func (m *TracerState) GetSampled() bool {
- if m != nil {
- return m.Sampled
- }
- return false
-}
-
-func (m *TracerState) GetBaggageItems() map[string]string {
- if m != nil {
- return m.BaggageItems
- }
- return nil
-}
-
-func (m *TracerState) GetTraceIdHigh() uint64 {
- if m != nil {
- return m.TraceIdHigh
- }
- return 0
-}
-
-func (m *TracerState) GetParentSpanId() uint64 {
- if m != nil {
- return m.ParentSpanId
- }
- return 0
-}
-
-func (m *TracerState) GetFlags() uint64 {
- if m != nil {
- return m.Flags
- }
- return 0
-}
-
-func init() {
- proto.RegisterType((*TracerState)(nil), "zipkintracer_go.wire.TracerState")
-}
-func (m *TracerState) Marshal() (dAtA []byte, err error) {
- size := m.Size()
- dAtA = make([]byte, size)
- n, err := m.MarshalTo(dAtA)
- if err != nil {
- return nil, err
- }
- return dAtA[:n], nil
-}
-
-func (m *TracerState) MarshalTo(dAtA []byte) (int, error) {
- var i int
- _ = i
- var l int
- _ = l
- if m.TraceId != 0 {
- dAtA[i] = 0x9
- i++
- i = encodeFixed64Wire(dAtA, i, uint64(m.TraceId))
- }
- if m.SpanId != 0 {
- dAtA[i] = 0x11
- i++
- i = encodeFixed64Wire(dAtA, i, uint64(m.SpanId))
- }
- if m.Sampled {
- dAtA[i] = 0x18
- i++
- if m.Sampled {
- dAtA[i] = 1
- } else {
- dAtA[i] = 0
- }
- i++
- }
- if len(m.BaggageItems) > 0 {
- for k, _ := range m.BaggageItems {
- dAtA[i] = 0x22
- i++
- v := m.BaggageItems[k]
- mapSize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
- i = encodeVarintWire(dAtA, i, uint64(mapSize))
- dAtA[i] = 0xa
- i++
- i = encodeVarintWire(dAtA, i, uint64(len(k)))
- i += copy(dAtA[i:], k)
- dAtA[i] = 0x12
- i++
- i = encodeVarintWire(dAtA, i, uint64(len(v)))
- i += copy(dAtA[i:], v)
- }
- }
- if m.TraceIdHigh != 0 {
- dAtA[i] = 0xa1
- i++
- dAtA[i] = 0x1
- i++
- i = encodeFixed64Wire(dAtA, i, uint64(m.TraceIdHigh))
- }
- if m.ParentSpanId != 0 {
- dAtA[i] = 0xa9
- i++
- dAtA[i] = 0x1
- i++
- i = encodeFixed64Wire(dAtA, i, uint64(m.ParentSpanId))
- }
- if m.Flags != 0 {
- dAtA[i] = 0xb1
- i++
- dAtA[i] = 0x1
- i++
- i = encodeFixed64Wire(dAtA, i, uint64(m.Flags))
- }
- return i, nil
-}
-
-func encodeFixed64Wire(dAtA []byte, offset int, v uint64) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- dAtA[offset+4] = uint8(v >> 32)
- dAtA[offset+5] = uint8(v >> 40)
- dAtA[offset+6] = uint8(v >> 48)
- dAtA[offset+7] = uint8(v >> 56)
- return offset + 8
-}
-func encodeFixed32Wire(dAtA []byte, offset int, v uint32) int {
- dAtA[offset] = uint8(v)
- dAtA[offset+1] = uint8(v >> 8)
- dAtA[offset+2] = uint8(v >> 16)
- dAtA[offset+3] = uint8(v >> 24)
- return offset + 4
-}
-func encodeVarintWire(dAtA []byte, offset int, v uint64) int {
- for v >= 1<<7 {
- dAtA[offset] = uint8(v&0x7f | 0x80)
- v >>= 7
- offset++
- }
- dAtA[offset] = uint8(v)
- return offset + 1
-}
-func (m *TracerState) Size() (n int) {
- var l int
- _ = l
- if m.TraceId != 0 {
- n += 9
- }
- if m.SpanId != 0 {
- n += 9
- }
- if m.Sampled {
- n += 2
- }
- if len(m.BaggageItems) > 0 {
- for k, v := range m.BaggageItems {
- _ = k
- _ = v
- mapEntrySize := 1 + len(k) + sovWire(uint64(len(k))) + 1 + len(v) + sovWire(uint64(len(v)))
- n += mapEntrySize + 1 + sovWire(uint64(mapEntrySize))
- }
- }
- if m.TraceIdHigh != 0 {
- n += 10
- }
- if m.ParentSpanId != 0 {
- n += 10
- }
- if m.Flags != 0 {
- n += 10
- }
- return n
-}
-
-func sovWire(x uint64) (n int) {
- for {
- n++
- x >>= 7
- if x == 0 {
- break
- }
- }
- return n
-}
-func sozWire(x uint64) (n int) {
- return sovWire(uint64((x << 1) ^ uint64((int64(x) >> 63))))
-}
-func (m *TracerState) Unmarshal(dAtA []byte) error {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- preIndex := iNdEx
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- fieldNum := int32(wire >> 3)
- wireType := int(wire & 0x7)
- if wireType == 4 {
- return fmt.Errorf("proto: TracerState: wiretype end group for non-group")
- }
- if fieldNum <= 0 {
- return fmt.Errorf("proto: TracerState: illegal tag %d (wire type %d)", fieldNum, wire)
- }
- switch fieldNum {
- case 1:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceId", wireType)
- }
- m.TraceId = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += 8
- m.TraceId = uint64(dAtA[iNdEx-8])
- m.TraceId |= uint64(dAtA[iNdEx-7]) << 8
- m.TraceId |= uint64(dAtA[iNdEx-6]) << 16
- m.TraceId |= uint64(dAtA[iNdEx-5]) << 24
- m.TraceId |= uint64(dAtA[iNdEx-4]) << 32
- m.TraceId |= uint64(dAtA[iNdEx-3]) << 40
- m.TraceId |= uint64(dAtA[iNdEx-2]) << 48
- m.TraceId |= uint64(dAtA[iNdEx-1]) << 56
- case 2:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field SpanId", wireType)
- }
- m.SpanId = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += 8
- m.SpanId = uint64(dAtA[iNdEx-8])
- m.SpanId |= uint64(dAtA[iNdEx-7]) << 8
- m.SpanId |= uint64(dAtA[iNdEx-6]) << 16
- m.SpanId |= uint64(dAtA[iNdEx-5]) << 24
- m.SpanId |= uint64(dAtA[iNdEx-4]) << 32
- m.SpanId |= uint64(dAtA[iNdEx-3]) << 40
- m.SpanId |= uint64(dAtA[iNdEx-2]) << 48
- m.SpanId |= uint64(dAtA[iNdEx-1]) << 56
- case 3:
- if wireType != 0 {
- return fmt.Errorf("proto: wrong wireType = %d for field Sampled", wireType)
- }
- var v int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- v |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- m.Sampled = bool(v != 0)
- case 4:
- if wireType != 2 {
- return fmt.Errorf("proto: wrong wireType = %d for field BaggageItems", wireType)
- }
- var msglen int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- msglen |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- if msglen < 0 {
- return ErrInvalidLengthWire
- }
- postIndex := iNdEx + msglen
- if postIndex > l {
- return io.ErrUnexpectedEOF
- }
- var keykey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- keykey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapkey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapkey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapkey := int(stringLenmapkey)
- if intStringLenmapkey < 0 {
- return ErrInvalidLengthWire
- }
- postStringIndexmapkey := iNdEx + intStringLenmapkey
- if postStringIndexmapkey > l {
- return io.ErrUnexpectedEOF
- }
- mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
- iNdEx = postStringIndexmapkey
- if m.BaggageItems == nil {
- m.BaggageItems = make(map[string]string)
- }
- if iNdEx < postIndex {
- var valuekey uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- valuekey |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- var stringLenmapvalue uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return ErrIntOverflowWire
- }
- if iNdEx >= l {
- return io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- stringLenmapvalue |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- intStringLenmapvalue := int(stringLenmapvalue)
- if intStringLenmapvalue < 0 {
- return ErrInvalidLengthWire
- }
- postStringIndexmapvalue := iNdEx + intStringLenmapvalue
- if postStringIndexmapvalue > l {
- return io.ErrUnexpectedEOF
- }
- mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
- iNdEx = postStringIndexmapvalue
- m.BaggageItems[mapkey] = mapvalue
- } else {
- var mapvalue string
- m.BaggageItems[mapkey] = mapvalue
- }
- iNdEx = postIndex
- case 20:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field TraceIdHigh", wireType)
- }
- m.TraceIdHigh = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += 8
- m.TraceIdHigh = uint64(dAtA[iNdEx-8])
- m.TraceIdHigh |= uint64(dAtA[iNdEx-7]) << 8
- m.TraceIdHigh |= uint64(dAtA[iNdEx-6]) << 16
- m.TraceIdHigh |= uint64(dAtA[iNdEx-5]) << 24
- m.TraceIdHigh |= uint64(dAtA[iNdEx-4]) << 32
- m.TraceIdHigh |= uint64(dAtA[iNdEx-3]) << 40
- m.TraceIdHigh |= uint64(dAtA[iNdEx-2]) << 48
- m.TraceIdHigh |= uint64(dAtA[iNdEx-1]) << 56
- case 21:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field ParentSpanId", wireType)
- }
- m.ParentSpanId = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += 8
- m.ParentSpanId = uint64(dAtA[iNdEx-8])
- m.ParentSpanId |= uint64(dAtA[iNdEx-7]) << 8
- m.ParentSpanId |= uint64(dAtA[iNdEx-6]) << 16
- m.ParentSpanId |= uint64(dAtA[iNdEx-5]) << 24
- m.ParentSpanId |= uint64(dAtA[iNdEx-4]) << 32
- m.ParentSpanId |= uint64(dAtA[iNdEx-3]) << 40
- m.ParentSpanId |= uint64(dAtA[iNdEx-2]) << 48
- m.ParentSpanId |= uint64(dAtA[iNdEx-1]) << 56
- case 22:
- if wireType != 1 {
- return fmt.Errorf("proto: wrong wireType = %d for field Flags", wireType)
- }
- m.Flags = 0
- if (iNdEx + 8) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += 8
- m.Flags = uint64(dAtA[iNdEx-8])
- m.Flags |= uint64(dAtA[iNdEx-7]) << 8
- m.Flags |= uint64(dAtA[iNdEx-6]) << 16
- m.Flags |= uint64(dAtA[iNdEx-5]) << 24
- m.Flags |= uint64(dAtA[iNdEx-4]) << 32
- m.Flags |= uint64(dAtA[iNdEx-3]) << 40
- m.Flags |= uint64(dAtA[iNdEx-2]) << 48
- m.Flags |= uint64(dAtA[iNdEx-1]) << 56
- default:
- iNdEx = preIndex
- skippy, err := skipWire(dAtA[iNdEx:])
- if err != nil {
- return err
- }
- if skippy < 0 {
- return ErrInvalidLengthWire
- }
- if (iNdEx + skippy) > l {
- return io.ErrUnexpectedEOF
- }
- iNdEx += skippy
- }
- }
-
- if iNdEx > l {
- return io.ErrUnexpectedEOF
- }
- return nil
-}
-func skipWire(dAtA []byte) (n int, err error) {
- l := len(dAtA)
- iNdEx := 0
- for iNdEx < l {
- var wire uint64
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowWire
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- wire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- wireType := int(wire & 0x7)
- switch wireType {
- case 0:
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowWire
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- iNdEx++
- if dAtA[iNdEx-1] < 0x80 {
- break
- }
- }
- return iNdEx, nil
- case 1:
- iNdEx += 8
- return iNdEx, nil
- case 2:
- var length int
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowWire
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- length |= (int(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- iNdEx += length
- if length < 0 {
- return 0, ErrInvalidLengthWire
- }
- return iNdEx, nil
- case 3:
- for {
- var innerWire uint64
- var start int = iNdEx
- for shift := uint(0); ; shift += 7 {
- if shift >= 64 {
- return 0, ErrIntOverflowWire
- }
- if iNdEx >= l {
- return 0, io.ErrUnexpectedEOF
- }
- b := dAtA[iNdEx]
- iNdEx++
- innerWire |= (uint64(b) & 0x7F) << shift
- if b < 0x80 {
- break
- }
- }
- innerWireType := int(innerWire & 0x7)
- if innerWireType == 4 {
- break
- }
- next, err := skipWire(dAtA[start:])
- if err != nil {
- return 0, err
- }
- iNdEx = start + next
- }
- return iNdEx, nil
- case 4:
- return iNdEx, nil
- case 5:
- iNdEx += 4
- return iNdEx, nil
- default:
- return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
- }
- }
- panic("unreachable")
-}
-
-var (
- ErrInvalidLengthWire = fmt.Errorf("proto: negative length found during unmarshaling")
- ErrIntOverflowWire = fmt.Errorf("proto: integer overflow")
-)
-
-func init() { proto.RegisterFile("wire.proto", fileDescriptorWire) }
-
-var fileDescriptorWire = []byte{
- // 300 bytes of a gzipped FileDescriptorProto
- 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xe2, 0xe2, 0x2a, 0xcf, 0x2c, 0x4a,
- 0xd5, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xa9, 0xca, 0x2c, 0xc8, 0xce, 0xcc, 0x2b, 0x29,
- 0x4a, 0x4c, 0x4e, 0x2d, 0x8a, 0x4f, 0xcf, 0xd7, 0x03, 0xc9, 0x29, 0x5d, 0x63, 0xe2, 0xe2, 0x0e,
- 0x01, 0x0b, 0x05, 0x97, 0x24, 0x96, 0xa4, 0x0a, 0x49, 0x72, 0x71, 0x80, 0x55, 0xc4, 0x67, 0xa6,
- 0x48, 0x30, 0x2a, 0x30, 0x6a, 0xb0, 0x05, 0xb1, 0x83, 0xf9, 0x9e, 0x29, 0x42, 0xe2, 0x5c, 0xec,
- 0xc5, 0x05, 0x89, 0x79, 0x20, 0x19, 0x26, 0xb0, 0x0c, 0x1b, 0x88, 0xeb, 0x99, 0x22, 0x24, 0xc1,
- 0xc5, 0x5e, 0x9c, 0x98, 0x5b, 0x90, 0x93, 0x9a, 0x22, 0xc1, 0xac, 0xc0, 0xa8, 0xc1, 0x11, 0x04,
- 0xe3, 0x0a, 0x45, 0x70, 0xf1, 0x26, 0x25, 0xa6, 0xa7, 0x27, 0xa6, 0xa7, 0xc6, 0x67, 0x96, 0xa4,
- 0xe6, 0x16, 0x4b, 0xb0, 0x28, 0x30, 0x6b, 0x70, 0x1b, 0x19, 0xeb, 0x61, 0x73, 0x8b, 0x1e, 0x92,
- 0x3b, 0xf4, 0x9c, 0x20, 0xda, 0x3c, 0x41, 0xba, 0x5c, 0xf3, 0x4a, 0x8a, 0x2a, 0x83, 0x78, 0x92,
- 0x90, 0x84, 0x84, 0x94, 0xb8, 0x78, 0x61, 0xee, 0x8c, 0xcf, 0xc8, 0x4c, 0xcf, 0x90, 0x10, 0x01,
- 0x3b, 0x89, 0x1b, 0xea, 0x58, 0x8f, 0xcc, 0xf4, 0x0c, 0x21, 0x15, 0x2e, 0xbe, 0x82, 0xc4, 0xa2,
- 0xd4, 0xbc, 0x92, 0x78, 0x98, 0xbb, 0x45, 0xc1, 0x8a, 0x78, 0x20, 0xa2, 0xc1, 0x10, 0xd7, 0x8b,
- 0x70, 0xb1, 0xa6, 0xe5, 0x24, 0xa6, 0x17, 0x4b, 0x88, 0x81, 0x25, 0x21, 0x1c, 0x29, 0x7b, 0x2e,
- 0x41, 0x0c, 0x27, 0x08, 0x09, 0x70, 0x31, 0x67, 0xa7, 0x56, 0x82, 0xc3, 0x85, 0x33, 0x08, 0xc4,
- 0x04, 0x69, 0x2e, 0x4b, 0xcc, 0x29, 0x4d, 0x05, 0x87, 0x08, 0x67, 0x10, 0x84, 0x63, 0xc5, 0x64,
- 0xc1, 0xe8, 0x24, 0x76, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31,
- 0x4e, 0x78, 0x2c, 0xc7, 0x10, 0xc5, 0x02, 0xf2, 0x64, 0x12, 0x1b, 0x38, 0x36, 0x8c, 0x01, 0x01,
- 0x00, 0x00, 0xff, 0xff, 0xb5, 0x5e, 0x0d, 0x33, 0x9b, 0x01, 0x00, 0x00,
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto b/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto
deleted file mode 100644
index 7fa01d45fd3..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/wire/wire.proto
+++ /dev/null
@@ -1,13 +0,0 @@
-syntax = "proto3";
-package zipkintracer_go.wire;
-option go_package = "wire";
-
-message TracerState {
- fixed64 trace_id = 1;
- fixed64 span_id = 2;
- bool sampled = 3;
- map baggage_items = 4;
- fixed64 trace_id_high = 20;
- fixed64 parent_span_id = 21;
- fixed64 flags = 22;
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go
deleted file mode 100644
index 30f49fc113b..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-endpoint.go
+++ /dev/null
@@ -1,72 +0,0 @@
-package zipkintracer
-
-import (
- "encoding/binary"
- "net"
- "strconv"
- "strings"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// makeEndpoint takes the hostport and service name that represent this Zipkin
-// service, and returns an endpoint that's embedded into the Zipkin core Span
-// type. It will return a nil endpoint if the input parameters are malformed.
-func makeEndpoint(hostport, serviceName string) (ep *zipkincore.Endpoint) {
- ep = zipkincore.NewEndpoint()
-
- // Set the ServiceName
- ep.ServiceName = serviceName
-
- if strings.IndexByte(hostport, ':') < 0 {
- // "" becomes ":0"
- hostport = hostport + ":0"
- }
-
- // try to parse provided ":"
- host, port, err := net.SplitHostPort(hostport)
- if err != nil {
- // if unparsable, return as "undefined:0"
- return
- }
-
- // try to set port number
- p, _ := strconv.ParseUint(port, 10, 16)
- ep.Port = int16(p)
-
- // if is a domain name, look it up
- addrs, err := net.LookupIP(host)
- if err != nil {
- // return as "undefined:"
- return
- }
-
- var addr4, addr16 net.IP
- for i := range addrs {
- addr := addrs[i].To4()
- if addr == nil {
- // IPv6
- if addr16 == nil {
- addr16 = addrs[i].To16() // IPv6 - 16 bytes
- }
- } else {
- // IPv4
- if addr4 == nil {
- addr4 = addr // IPv4 - 4 bytes
- }
- }
- if addr16 != nil && addr4 != nil {
- // IPv4 & IPv6 have been set, we can stop looking further
- break
- }
- }
- // default to 0 filled 4 byte array for IPv4 if IPv6 only host was found
- if addr4 == nil {
- addr4 = make([]byte, 4)
- }
-
- // set IPv4 and IPv6 addresses
- ep.Ipv4 = (int32)(binary.BigEndian.Uint32(addr4))
- ep.Ipv6 = []byte(addr16)
- return
-}
diff --git a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go b/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go
deleted file mode 100644
index a889011a0fa..00000000000
--- a/vendor/github.com/openzipkin/zipkin-go-opentracing/zipkin-recorder.go
+++ /dev/null
@@ -1,287 +0,0 @@
-package zipkintracer
-
-import (
- "encoding/binary"
- "fmt"
- "math"
- "net"
- "strconv"
- "time"
-
- otext "github.com/opentracing/opentracing-go/ext"
- "github.com/opentracing/opentracing-go/log"
-
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
- "github.com/openzipkin/zipkin-go-opentracing/flag"
-)
-
-var (
- // SpanKindResource will be regarded as a SA annotation by Zipkin.
- SpanKindResource = otext.SpanKindEnum("resource")
-)
-
-// Recorder implements the SpanRecorder interface.
-type Recorder struct {
- collector Collector
- debug bool
- endpoint *zipkincore.Endpoint
- materializer func(logFields []log.Field) ([]byte, error)
-}
-
-// RecorderOption allows for functional options.
-type RecorderOption func(r *Recorder)
-
-// WithLogFmtMaterializer will convert OpenTracing Log fields to a LogFmt representation.
-func WithLogFmtMaterializer() RecorderOption {
- return func(r *Recorder) {
- r.materializer = MaterializeWithLogFmt
- }
-}
-
-// WithJSONMaterializer will convert OpenTracing Log fields to a JSON representation.
-func WithJSONMaterializer() RecorderOption {
- return func(r *Recorder) {
- r.materializer = MaterializeWithJSON
- }
-}
-
-// WithStrictMaterializer will only record event Log fields and discard the rest.
-func WithStrictMaterializer() RecorderOption {
- return func(r *Recorder) {
- r.materializer = StrictZipkinMaterializer
- }
-}
-
-// NewRecorder creates a new Zipkin Recorder backed by the provided Collector.
-//
-// hostPort and serviceName allow you to set the default Zipkin endpoint
-// information which will be added to the application's standard core
-// annotations. hostPort will be resolved into an IPv4 and/or IPv6 address and
-// Port number, serviceName will be used as the application's service
-// identifier.
-//
-// If application does not listen for incoming requests or an endpoint Context
-// does not involve network address and/or port these cases can be solved like
-// this:
-// # port is not applicable:
-// NewRecorder(c, debug, "192.168.1.12:0", "ServiceA")
-//
-// # network address and port are not applicable:
-// NewRecorder(c, debug, "0.0.0.0:0", "ServiceB")
-func NewRecorder(c Collector, debug bool, hostPort, serviceName string, options ...RecorderOption) SpanRecorder {
- r := &Recorder{
- collector: c,
- debug: debug,
- endpoint: makeEndpoint(hostPort, serviceName),
- materializer: MaterializeWithLogFmt,
- }
- for _, opts := range options {
- opts(r)
- }
- return r
-}
-
-// RecordSpan converts a RawSpan into the Zipkin representation of a span
-// and records it to the underlying collector.
-func (r *Recorder) RecordSpan(sp RawSpan) {
- if !sp.Context.Sampled {
- return
- }
-
- var parentSpanID *int64
- if sp.Context.ParentSpanID != nil {
- id := int64(*sp.Context.ParentSpanID)
- parentSpanID = &id
- }
-
- var traceIDHigh *int64
- if sp.Context.TraceID.High > 0 {
- tidh := int64(sp.Context.TraceID.High)
- traceIDHigh = &tidh
- }
-
- span := &zipkincore.Span{
- Name: sp.Operation,
- ID: int64(sp.Context.SpanID),
- TraceID: int64(sp.Context.TraceID.Low),
- TraceIDHigh: traceIDHigh,
- ParentID: parentSpanID,
- Debug: r.debug || (sp.Context.Flags&flag.Debug == flag.Debug),
- }
- // only send timestamp and duration if this process owns the current span.
- if sp.Context.Owner {
- timestamp := sp.Start.UnixNano() / 1e3
- duration := sp.Duration.Nanoseconds() / 1e3
- // since we always time our spans we will round up to 1 microsecond if the
- // span took less.
- if duration == 0 {
- duration = 1
- }
- span.Timestamp = ×tamp
- span.Duration = &duration
- }
- if kind, ok := sp.Tags[string(otext.SpanKind)]; ok {
- switch kind {
- case otext.SpanKindRPCClient, otext.SpanKindRPCClientEnum:
- annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
- annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
- case otext.SpanKindRPCServer, otext.SpanKindRPCServerEnum:
- annotate(span, sp.Start, zipkincore.SERVER_RECV, r.endpoint)
- annotate(span, sp.Start.Add(sp.Duration), zipkincore.SERVER_SEND, r.endpoint)
- case SpanKindResource:
- serviceName, ok := sp.Tags[string(otext.PeerService)]
- if !ok {
- serviceName = r.endpoint.GetServiceName()
- }
- host, ok := sp.Tags[string(otext.PeerHostname)].(string)
- if !ok {
- if r.endpoint.GetIpv4() > 0 {
- ip := make([]byte, 4)
- binary.BigEndian.PutUint32(ip, uint32(r.endpoint.GetIpv4()))
- host = net.IP(ip).To4().String()
- } else {
- ip := r.endpoint.GetIpv6()
- host = net.IP(ip).String()
- }
- }
- var sPort string
- port, ok := sp.Tags[string(otext.PeerPort)]
- if !ok {
- sPort = strconv.FormatInt(int64(r.endpoint.GetPort()), 10)
- } else {
- sPort = strconv.FormatInt(int64(port.(uint16)), 10)
- }
- re := makeEndpoint(net.JoinHostPort(host, sPort), serviceName.(string))
- if re != nil {
- annotateBinary(span, zipkincore.SERVER_ADDR, serviceName, re)
- } else {
- fmt.Printf("endpoint creation failed: host: %q port: %q", host, sPort)
- }
- annotate(span, sp.Start, zipkincore.CLIENT_SEND, r.endpoint)
- annotate(span, sp.Start.Add(sp.Duration), zipkincore.CLIENT_RECV, r.endpoint)
- default:
- annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
- }
- } else {
- annotateBinary(span, zipkincore.LOCAL_COMPONENT, r.endpoint.GetServiceName(), r.endpoint)
- }
-
- for key, value := range sp.Tags {
- annotateBinary(span, key, value, r.endpoint)
- }
-
- for _, spLog := range sp.Logs {
- if len(spLog.Fields) == 1 && spLog.Fields[0].Key() == "event" {
- // proper Zipkin annotation
- annotate(span, spLog.Timestamp, fmt.Sprintf("%+v", spLog.Fields[0].Value()), r.endpoint)
- continue
- }
- // OpenTracing Log with key-value pair(s). Try to materialize using the
- // materializer chosen for the recorder.
- if logs, err := r.materializer(spLog.Fields); err != nil {
- fmt.Printf("Materialization of OpenTracing LogFields failed: %+v", err)
- } else {
- annotate(span, spLog.Timestamp, string(logs), r.endpoint)
- }
- }
- _ = r.collector.Collect(span)
-}
-
-// annotate annotates the span with the given value.
-func annotate(span *zipkincore.Span, timestamp time.Time, value string, host *zipkincore.Endpoint) {
- if timestamp.IsZero() {
- timestamp = time.Now()
- }
- span.Annotations = append(span.Annotations, &zipkincore.Annotation{
- Timestamp: timestamp.UnixNano() / 1e3,
- Value: value,
- Host: host,
- })
-}
-
-// annotateBinary annotates the span with a key and a value that will be []byte
-// encoded.
-func annotateBinary(span *zipkincore.Span, key string, value interface{}, host *zipkincore.Endpoint) {
- var a zipkincore.AnnotationType
- var b []byte
- // We are not using zipkincore.AnnotationType_I16 for types that could fit
- // as reporting on it seems to be broken on the zipkin web interface
- // (however, we can properly extract the number from zipkin storage
- // directly). int64 has issues with negative numbers but seems ok for
- // positive numbers needing more than 32 bit.
- switch v := value.(type) {
- case bool:
- a = zipkincore.AnnotationType_BOOL
- b = []byte("\x00")
- if v {
- b = []byte("\x01")
- }
- case []byte:
- a = zipkincore.AnnotationType_BYTES
- b = v
- case byte:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(v))
- case int8:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(v))
- case int16:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(v))
- case uint16:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(v))
- case int32:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, uint32(v))
- case uint32:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 4)
- binary.BigEndian.PutUint32(b, v)
- case int64:
- a = zipkincore.AnnotationType_I64
- b = make([]byte, 8)
- binary.BigEndian.PutUint64(b, uint64(v))
- case int:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 8)
- binary.BigEndian.PutUint32(b, uint32(v))
- case uint:
- a = zipkincore.AnnotationType_I32
- b = make([]byte, 8)
- binary.BigEndian.PutUint32(b, uint32(v))
- case uint64:
- a = zipkincore.AnnotationType_I64
- b = make([]byte, 8)
- binary.BigEndian.PutUint64(b, v)
- case float32:
- a = zipkincore.AnnotationType_DOUBLE
- b = make([]byte, 8)
- bits := math.Float64bits(float64(v))
- binary.BigEndian.PutUint64(b, bits)
- case float64:
- a = zipkincore.AnnotationType_DOUBLE
- b = make([]byte, 8)
- bits := math.Float64bits(v)
- binary.BigEndian.PutUint64(b, bits)
- case string:
- a = zipkincore.AnnotationType_STRING
- b = []byte(v)
- default:
- // we have no handler for type's value, but let's get a string
- // representation of it.
- a = zipkincore.AnnotationType_STRING
- b = []byte(fmt.Sprintf("%+v", value))
- }
- span.BinaryAnnotations = append(span.BinaryAnnotations, &zipkincore.BinaryAnnotation{
- Key: key,
- Value: b,
- AnnotationType: a,
- Host: host,
- })
-}
diff --git a/vendor/github.com/pierrec/lz4/.gitignore b/vendor/github.com/pierrec/lz4/.gitignore
deleted file mode 100644
index c2bb6e4af12..00000000000
--- a/vendor/github.com/pierrec/lz4/.gitignore
+++ /dev/null
@@ -1,31 +0,0 @@
-# Created by https://www.gitignore.io/api/macos
-
-### macOS ###
-*.DS_Store
-.AppleDouble
-.LSOverride
-
-# Icon must end with two \r
-Icon
-
-
-# Thumbnails
-._*
-
-# Files that might appear in the root of a volume
-.DocumentRevisions-V100
-.fseventsd
-.Spotlight-V100
-.TemporaryItems
-.Trashes
-.VolumeIcon.icns
-.com.apple.timemachine.donotpresent
-
-# Directories potentially created on remote AFP share
-.AppleDB
-.AppleDesktop
-Network Trash Folder
-Temporary Items
-.apdisk
-
-# End of https://www.gitignore.io/api/macos
diff --git a/vendor/github.com/pierrec/lz4/.travis.yml b/vendor/github.com/pierrec/lz4/.travis.yml
deleted file mode 100644
index 5fd6236336f..00000000000
--- a/vendor/github.com/pierrec/lz4/.travis.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-language: go
-
-go:
- - 1.4
- - 1.5
-
-script:
- - go test -cpu=2
- - go test -cpu=2 -race
\ No newline at end of file
diff --git a/vendor/github.com/pierrec/lz4/LICENSE b/vendor/github.com/pierrec/lz4/LICENSE
deleted file mode 100644
index bd899d8353d..00000000000
--- a/vendor/github.com/pierrec/lz4/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2015, Pierre Curto
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* Neither the name of xxHash nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/pierrec/lz4/README.md b/vendor/github.com/pierrec/lz4/README.md
deleted file mode 100644
index dd3c9d47e18..00000000000
--- a/vendor/github.com/pierrec/lz4/README.md
+++ /dev/null
@@ -1,31 +0,0 @@
-[](https://godoc.org/github.com/pierrec/lz4)
-[](https://travis-ci.org/pierrec/lz4)
-
-# lz4
-LZ4 compression and decompression in pure Go
-
-## Usage
-
-```go
-import "github.com/pierrec/lz4"
-```
-
-## Description
-
-Package lz4 implements reading and writing lz4 compressed data (a frame),
-as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
-using an io.Reader (decompression) and io.Writer (compression).
-It is designed to minimize memory usage while maximizing throughput by being able to
-[de]compress data concurrently.
-
-The Reader and the Writer support concurrent processing provided the supplied buffers are
-large enough (in multiples of BlockMaxSize) and there is no block dependency.
-Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
-The runtime.GOMAXPROCS() value is used to apply concurrency or not.
-
-Although the block level compression and decompression functions are exposed and are fully compatible
-with the lz4 block format definition, they are low level and should not be used directly.
-For a complete description of an lz4 compressed block, see:
-http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-
-See https://github.com/Cyan4973/lz4 for the reference C implementation.
diff --git a/vendor/github.com/pierrec/lz4/block.go b/vendor/github.com/pierrec/lz4/block.go
deleted file mode 100644
index 145eec27035..00000000000
--- a/vendor/github.com/pierrec/lz4/block.go
+++ /dev/null
@@ -1,445 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "errors"
-)
-
-// block represents a frame data block.
-// Used when compressing or decompressing frame blocks concurrently.
-type block struct {
- compressed bool
- zdata []byte // compressed data
- data []byte // decompressed data
- offset int // offset within the data as with block dependency the 64Kb window is prepended to it
- checksum uint32 // compressed data checksum
- err error // error while [de]compressing
-}
-
-var (
- // ErrInvalidSource is returned by UncompressBlock when a compressed block is corrupted.
- ErrInvalidSource = errors.New("lz4: invalid source")
- // ErrShortBuffer is returned by UncompressBlock, CompressBlock or CompressBlockHC when
- // the supplied buffer for [de]compression is too small.
- ErrShortBuffer = errors.New("lz4: short buffer")
-)
-
-// CompressBlockBound returns the maximum size of a given buffer of size n, when not compressible.
-func CompressBlockBound(n int) int {
- return n + n/255 + 16
-}
-
-// UncompressBlock decompresses the source buffer into the destination one,
-// starting at the di index and returning the decompressed size.
-//
-// The destination buffer must be sized appropriately.
-//
-// An error is returned if the source data is invalid or the destination buffer is too small.
-func UncompressBlock(src, dst []byte, di int) (int, error) {
- si, sn, di0 := 0, len(src), di
- if sn == 0 {
- return 0, nil
- }
-
- for {
- // literals and match lengths (token)
- lLen := int(src[si] >> 4)
- mLen := int(src[si] & 0xF)
- if si++; si == sn {
- return di, ErrInvalidSource
- }
-
- // literals
- if lLen > 0 {
- if lLen == 0xF {
- for src[si] == 0xFF {
- lLen += 0xFF
- if si++; si == sn {
- return di - di0, ErrInvalidSource
- }
- }
- lLen += int(src[si])
- if si++; si == sn {
- return di - di0, ErrInvalidSource
- }
- }
- if len(dst)-di < lLen || si+lLen > sn {
- return di - di0, ErrShortBuffer
- }
- di += copy(dst[di:], src[si:si+lLen])
-
- if si += lLen; si >= sn {
- return di - di0, nil
- }
- }
-
- if si += 2; si >= sn {
- return di, ErrInvalidSource
- }
- offset := int(src[si-2]) | int(src[si-1])<<8
- if di-offset < 0 || offset == 0 {
- return di - di0, ErrInvalidSource
- }
-
- // match
- if mLen == 0xF {
- for src[si] == 0xFF {
- mLen += 0xFF
- if si++; si == sn {
- return di - di0, ErrInvalidSource
- }
- }
- mLen += int(src[si])
- if si++; si == sn {
- return di - di0, ErrInvalidSource
- }
- }
- // minimum match length is 4
- mLen += 4
- if len(dst)-di <= mLen {
- return di - di0, ErrShortBuffer
- }
-
- // copy the match (NB. match is at least 4 bytes long)
- // NB. past di, copy() would write old bytes instead of
- // the ones we just copied, so split the work into the largest chunk.
- for ; mLen >= offset; mLen -= offset {
- di += copy(dst[di:], dst[di-offset:di])
- }
- di += copy(dst[di:], dst[di-offset:di-offset+mLen])
- }
-}
-
-// CompressBlock compresses the source buffer starting at soffet into the destination one.
-// This is the fast version of LZ4 compression and also the default one.
-//
-// The size of the compressed data is returned. If it is 0 and no error, then the data is incompressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlock(src, dst []byte, soffset int) (int, error) {
- sn, dn := len(src)-mfLimit, len(dst)
- if sn <= 0 || dn == 0 || soffset >= sn {
- return 0, nil
- }
- var si, di int
-
- // fast scan strategy:
- // we only need a hash table to store the last sequences (4 bytes)
- var hashTable [1 << hashLog]int
- var hashShift = uint((minMatch * 8) - hashLog)
-
- // Initialise the hash table with the first 64Kb of the input buffer
- // (used when compressing dependent blocks)
- for si < soffset {
- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
- si++
- hashTable[h] = si
- }
-
- anchor := si
- fma := 1 << skipStrength
- for si < sn-minMatch {
- // hash the next 4 bytes (sequence)...
- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
- // -1 to separate existing entries from new ones
- ref := hashTable[h] - 1
- // ...and store the position of the hash in the hash table (+1 to compensate the -1 upon saving)
- hashTable[h] = si + 1
- // no need to check the last 3 bytes in the first literal 4 bytes as
- // this guarantees that the next match, if any, is compressed with
- // a lower size, since to have some compression we must have:
- // ll+ml-overlap > 1 + (ll-15)/255 + (ml-4-15)/255 + 2 (uncompressed size>compressed size)
- // => ll+ml>3+2*overlap => ll+ml>= 4+2*overlap
- // and by definition we do have:
- // ll >= 1, ml >= 4
- // => ll+ml >= 5
- // => so overlap must be 0
-
- // the sequence is new, out of bound (64kb) or not valid: try next sequence
- if ref < 0 || fma&(1<>winSizeLog > 0 ||
- src[ref] != src[si] ||
- src[ref+1] != src[si+1] ||
- src[ref+2] != src[si+2] ||
- src[ref+3] != src[si+3] {
- // variable step: improves performance on non-compressible data
- si += fma >> skipStrength
- fma++
- continue
- }
- // match found
- fma = 1 << skipStrength
- lLen := si - anchor
- offset := si - ref
-
- // encode match length part 1
- si += minMatch
- mLen := si // match length has minMatch already
- for si <= sn && src[si] == src[si-offset] {
- si++
- }
- mLen = si - mLen
- if mLen < 0xF {
- dst[di] = byte(mLen)
- } else {
- dst[di] = 0xF
- }
-
- // encode literals length
- if lLen < 0xF {
- dst[di] |= byte(lLen << 4)
- } else {
- dst[di] |= 0xF0
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- l := lLen - 0xF
- for ; l >= 0xFF; l -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(l)
- }
- if di++; di == dn {
- return di, ErrShortBuffer
- }
-
- // literals
- if di+lLen >= dn {
- return di, ErrShortBuffer
- }
- di += copy(dst[di:], src[anchor:anchor+lLen])
- anchor = si
-
- // encode offset
- if di += 2; di >= dn {
- return di, ErrShortBuffer
- }
- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
- // encode match length part 2
- if mLen >= 0xF {
- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(mLen)
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- }
-
- if anchor == 0 {
- // incompressible
- return 0, nil
- }
-
- // last literals
- lLen := len(src) - anchor
- if lLen < 0xF {
- dst[di] = byte(lLen << 4)
- } else {
- dst[di] = 0xF0
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- lLen -= 0xF
- for ; lLen >= 0xFF; lLen -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(lLen)
- }
- if di++; di == dn {
- return di, ErrShortBuffer
- }
-
- // write literals
- src = src[anchor:]
- switch n := di + len(src); {
- case n > dn:
- return di, ErrShortBuffer
- case n >= sn:
- // incompressible
- return 0, nil
- }
- di += copy(dst[di:], src)
- return di, nil
-}
-
-// CompressBlockHC compresses the source buffer starting at soffet into the destination one.
-// CompressBlockHC compression ratio is better than CompressBlock but it is also slower.
-//
-// The size of the compressed data is returned. If it is 0 and no error, then the data is not compressible.
-//
-// An error is returned if the destination buffer is too small.
-func CompressBlockHC(src, dst []byte, soffset int) (int, error) {
- sn, dn := len(src)-mfLimit, len(dst)
- if sn <= 0 || dn == 0 || soffset >= sn {
- return 0, nil
- }
- var si, di int
-
- // Hash Chain strategy:
- // we need a hash table and a chain table
- // the chain table cannot contain more entries than the window size (64Kb entries)
- var hashTable [1 << hashLog]int
- var chainTable [winSize]int
- var hashShift = uint((minMatch * 8) - hashLog)
-
- // Initialise the hash table with the first 64Kb of the input buffer
- // (used when compressing dependent blocks)
- for si < soffset {
- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
- chainTable[si&winMask] = hashTable[h]
- si++
- hashTable[h] = si
- }
-
- anchor := si
- for si < sn-minMatch {
- // hash the next 4 bytes (sequence)...
- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
-
- // follow the chain until out of window and give the longest match
- mLen := 0
- offset := 0
- for next := hashTable[h] - 1; next > 0 && next > si-winSize; next = chainTable[next&winMask] - 1 {
- // the first (mLen==0) or next byte (mLen>=minMatch) at current match length must match to improve on the match length
- if src[next+mLen] == src[si+mLen] {
- for ml := 0; ; ml++ {
- if src[next+ml] != src[si+ml] || si+ml > sn {
- // found a longer match, keep its position and length
- if mLen < ml && ml >= minMatch {
- mLen = ml
- offset = si - next
- }
- break
- }
- }
- }
- }
- chainTable[si&winMask] = hashTable[h]
- hashTable[h] = si + 1
-
- // no match found
- if mLen == 0 {
- si++
- continue
- }
-
- // match found
- // update hash/chain tables with overlaping bytes:
- // si already hashed, add everything from si+1 up to the match length
- for si, ml := si+1, si+mLen; si < ml; {
- h := binary.LittleEndian.Uint32(src[si:]) * hasher >> hashShift
- chainTable[si&winMask] = hashTable[h]
- si++
- hashTable[h] = si
- }
-
- lLen := si - anchor
- si += mLen
- mLen -= minMatch // match length does not include minMatch
-
- if mLen < 0xF {
- dst[di] = byte(mLen)
- } else {
- dst[di] = 0xF
- }
-
- // encode literals length
- if lLen < 0xF {
- dst[di] |= byte(lLen << 4)
- } else {
- dst[di] |= 0xF0
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- l := lLen - 0xF
- for ; l >= 0xFF; l -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(l)
- }
- if di++; di == dn {
- return di, ErrShortBuffer
- }
-
- // literals
- if di+lLen >= dn {
- return di, ErrShortBuffer
- }
- di += copy(dst[di:], src[anchor:anchor+lLen])
- anchor = si
-
- // encode offset
- if di += 2; di >= dn {
- return di, ErrShortBuffer
- }
- dst[di-2], dst[di-1] = byte(offset), byte(offset>>8)
-
- // encode match length part 2
- if mLen >= 0xF {
- for mLen -= 0xF; mLen >= 0xFF; mLen -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(mLen)
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- }
-
- if anchor == 0 {
- // incompressible
- return 0, nil
- }
-
- // last literals
- lLen := len(src) - anchor
- if lLen < 0xF {
- dst[di] = byte(lLen << 4)
- } else {
- dst[di] = 0xF0
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- lLen -= 0xF
- for ; lLen >= 0xFF; lLen -= 0xFF {
- dst[di] = 0xFF
- if di++; di == dn {
- return di, ErrShortBuffer
- }
- }
- dst[di] = byte(lLen)
- }
- if di++; di == dn {
- return di, ErrShortBuffer
- }
-
- // write literals
- src = src[anchor:]
- switch n := di + len(src); {
- case n > dn:
- return di, ErrShortBuffer
- case n >= sn:
- // incompressible
- return 0, nil
- }
- di += copy(dst[di:], src)
- return di, nil
-}
diff --git a/vendor/github.com/pierrec/lz4/lz4.go b/vendor/github.com/pierrec/lz4/lz4.go
deleted file mode 100644
index ddb82f66f8d..00000000000
--- a/vendor/github.com/pierrec/lz4/lz4.go
+++ /dev/null
@@ -1,105 +0,0 @@
-// Package lz4 implements reading and writing lz4 compressed data (a frame),
-// as specified in http://fastcompression.blogspot.fr/2013/04/lz4-streaming-format-final.html,
-// using an io.Reader (decompression) and io.Writer (compression).
-// It is designed to minimize memory usage while maximizing throughput by being able to
-// [de]compress data concurrently.
-//
-// The Reader and the Writer support concurrent processing provided the supplied buffers are
-// large enough (in multiples of BlockMaxSize) and there is no block dependency.
-// Reader.WriteTo and Writer.ReadFrom do leverage the concurrency transparently.
-// The runtime.GOMAXPROCS() value is used to apply concurrency or not.
-//
-// Although the block level compression and decompression functions are exposed and are fully compatible
-// with the lz4 block format definition, they are low level and should not be used directly.
-// For a complete description of an lz4 compressed block, see:
-// http://fastcompression.blogspot.fr/2011/05/lz4-explained.html
-//
-// See https://github.com/Cyan4973/lz4 for the reference C implementation.
-package lz4
-
-import (
- "hash"
- "sync"
-
- "github.com/pierrec/xxHash/xxHash32"
-)
-
-const (
- // Extension is the LZ4 frame file name extension
- Extension = ".lz4"
- // Version is the LZ4 frame format version
- Version = 1
-
- frameMagic = uint32(0x184D2204)
- frameSkipMagic = uint32(0x184D2A50)
-
- // The following constants are used to setup the compression algorithm.
- minMatch = 4 // the minimum size of the match sequence size (4 bytes)
- winSizeLog = 16 // LZ4 64Kb window size limit
- winSize = 1 << winSizeLog
- winMask = winSize - 1 // 64Kb window of previous data for dependent blocks
-
- // hashLog determines the size of the hash table used to quickly find a previous match position.
- // Its value influences the compression speed and memory usage, the lower the faster,
- // but at the expense of the compression ratio.
- // 16 seems to be the best compromise.
- hashLog = 16
- hashTableSize = 1 << hashLog
- hashShift = uint((minMatch * 8) - hashLog)
-
- mfLimit = 8 + minMatch // The last match cannot start within the last 12 bytes.
- skipStrength = 6 // variable step for fast scan
-
- hasher = uint32(2654435761) // prime number used to hash minMatch
-)
-
-// map the block max size id with its value in bytes: 64Kb, 256Kb, 1Mb and 4Mb.
-var bsMapID = map[byte]int{4: 64 << 10, 5: 256 << 10, 6: 1 << 20, 7: 4 << 20}
-var bsMapValue = map[int]byte{}
-
-// Reversed.
-func init() {
- for i, v := range bsMapID {
- bsMapValue[v] = i
- }
-}
-
-// Header describes the various flags that can be set on a Writer or obtained from a Reader.
-// The default values match those of the LZ4 frame format definition (http://fastcompression.blogspot.com/2013/04/lz4-streaming-format-final.html).
-//
-// NB. in a Reader, in case of concatenated frames, the Header values may change between Read() calls.
-// It is the caller responsibility to check them if necessary (typically when using the Reader concurrency).
-type Header struct {
- BlockDependency bool // compressed blocks are dependent (one block depends on the last 64Kb of the previous one)
- BlockChecksum bool // compressed blocks are checksumed
- NoChecksum bool // frame checksum
- BlockMaxSize int // the size of the decompressed data block (one of [64KB, 256KB, 1MB, 4MB]). Default=4MB.
- Size uint64 // the frame total size. It is _not_ computed by the Writer.
- HighCompression bool // use high compression (only for the Writer)
- done bool // whether the descriptor was processed (Read or Write and checked)
- // Removed as not supported
- // Dict bool // a dictionary id is to be used
- // DictID uint32 // the dictionary id read from the frame, if any.
-}
-
-// xxhPool wraps the standard pool for xxHash items.
-// Putting items back in the pool automatically resets them.
-type xxhPool struct {
- sync.Pool
-}
-
-func (p *xxhPool) Get() hash.Hash32 {
- return p.Pool.Get().(hash.Hash32)
-}
-
-func (p *xxhPool) Put(h hash.Hash32) {
- h.Reset()
- p.Pool.Put(h)
-}
-
-// hashPool is used by readers and writers and contains xxHash items.
-var hashPool = xxhPool{
- Pool: sync.Pool{
- New: func() interface{} { return xxHash32.New(0) },
- },
-}
diff --git a/vendor/github.com/pierrec/lz4/reader.go b/vendor/github.com/pierrec/lz4/reader.go
deleted file mode 100644
index 9f7fd60424f..00000000000
--- a/vendor/github.com/pierrec/lz4/reader.go
+++ /dev/null
@@ -1,364 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "errors"
- "fmt"
- "hash"
- "io"
- "io/ioutil"
- "runtime"
- "sync"
- "sync/atomic"
-)
-
-// ErrInvalid is returned when the data being read is not an LZ4 archive
-// (LZ4 magic number detection failed).
-var ErrInvalid = errors.New("invalid lz4 data")
-
-// errEndOfBlock is returned by readBlock when it has reached the last block of the frame.
-// It is not an error.
-var errEndOfBlock = errors.New("end of block")
-
-// Reader implements the LZ4 frame decoder.
-// The Header is set after the first call to Read().
-// The Header may change between Read() calls in case of concatenated frames.
-type Reader struct {
- Pos int64 // position within the source
- Header
- src io.Reader
- checksum hash.Hash32 // frame hash
- wg sync.WaitGroup // decompressing go routine wait group
- data []byte // buffered decompressed data
- window []byte // 64Kb decompressed data window
-}
-
-// NewReader returns a new LZ4 frame decoder.
-// No access to the underlying io.Reader is performed.
-func NewReader(src io.Reader) *Reader {
- return &Reader{
- src: src,
- checksum: hashPool.Get(),
- }
-}
-
-// readHeader checks the frame magic number and parses the frame descriptoz.
-// Skippable frames are supported even as a first frame although the LZ4
-// specifications recommends skippable frames not to be used as first frames.
-func (z *Reader) readHeader(first bool) error {
- defer z.checksum.Reset()
-
- for {
- var magic uint32
- if err := binary.Read(z.src, binary.LittleEndian, &magic); err != nil {
- if !first && err == io.ErrUnexpectedEOF {
- return io.EOF
- }
- return err
- }
- z.Pos += 4
- if magic>>8 == frameSkipMagic>>8 {
- var skipSize uint32
- if err := binary.Read(z.src, binary.LittleEndian, &skipSize); err != nil {
- return err
- }
- z.Pos += 4
- m, err := io.CopyN(ioutil.Discard, z.src, int64(skipSize))
- z.Pos += m
- if err != nil {
- return err
- }
- continue
- }
- if magic != frameMagic {
- return ErrInvalid
- }
- break
- }
-
- // header
- var buf [8]byte
- if _, err := io.ReadFull(z.src, buf[:2]); err != nil {
- return err
- }
- z.Pos += 2
-
- b := buf[0]
- if b>>6 != Version {
- return fmt.Errorf("lz4.Read: invalid version: got %d expected %d", b>>6, Version)
- }
- z.BlockDependency = b>>5&1 == 0
- z.BlockChecksum = b>>4&1 > 0
- frameSize := b>>3&1 > 0
- z.NoChecksum = b>>2&1 == 0
- // z.Dict = b&1 > 0
-
- bmsID := buf[1] >> 4 & 0x7
- bSize, ok := bsMapID[bmsID]
- if !ok {
- return fmt.Errorf("lz4.Read: invalid block max size: %d", bmsID)
- }
- z.BlockMaxSize = bSize
-
- z.checksum.Write(buf[0:2])
-
- if frameSize {
- if err := binary.Read(z.src, binary.LittleEndian, &z.Size); err != nil {
- return err
- }
- z.Pos += 8
- binary.LittleEndian.PutUint64(buf[:], z.Size)
- z.checksum.Write(buf[0:8])
- }
-
- // if z.Dict {
- // if err := binary.Read(z.src, binary.LittleEndian, &z.DictID); err != nil {
- // return err
- // }
- // z.Pos += 4
- // binary.LittleEndian.PutUint32(buf[:], z.DictID)
- // z.checksum.Write(buf[0:4])
- // }
-
- // header checksum
- if _, err := io.ReadFull(z.src, buf[:1]); err != nil {
- return err
- }
- z.Pos++
- if h := byte(z.checksum.Sum32() >> 8 & 0xFF); h != buf[0] {
- return fmt.Errorf("lz4.Read: invalid header checksum: got %v expected %v", buf[0], h)
- }
-
- z.Header.done = true
-
- return nil
-}
-
-// Read decompresses data from the underlying source into the supplied buffer.
-//
-// Since there can be multiple streams concatenated, Header values may
-// change between calls to Read(). If that is the case, no data is actually read from
-// the underlying io.Reader, to allow for potential input buffer resizing.
-//
-// Data is buffered if the input buffer is too small, and exhausted upon successive calls.
-//
-// If the buffer is large enough (typically in multiples of BlockMaxSize) and there is
-// no block dependency, then the data will be decompressed concurrently based on the GOMAXPROCS value.
-func (z *Reader) Read(buf []byte) (n int, err error) {
- if !z.Header.done {
- if err = z.readHeader(true); err != nil {
- return
- }
- }
-
- if len(buf) == 0 {
- return
- }
-
- // exhaust remaining data from previous Read()
- if len(z.data) > 0 {
- n = copy(buf, z.data)
- z.data = z.data[n:]
- if len(z.data) == 0 {
- z.data = nil
- }
- return
- }
-
- // Break up the input buffer into BlockMaxSize blocks with at least one block.
- // Then decompress into each of them concurrently if possible (no dependency).
- // In case of dependency, the first block will be missing the window (except on the
- // very first call), the rest will have it already since it comes from the previous block.
- wbuf := buf
- zn := (len(wbuf) + z.BlockMaxSize - 1) / z.BlockMaxSize
- zblocks := make([]block, zn)
- for zi, abort := 0, uint32(0); zi < zn && atomic.LoadUint32(&abort) == 0; zi++ {
- zb := &zblocks[zi]
- // last block may be too small
- if len(wbuf) < z.BlockMaxSize+len(z.window) {
- wbuf = make([]byte, z.BlockMaxSize+len(z.window))
- }
- copy(wbuf, z.window)
- if zb.err = z.readBlock(wbuf, zb); zb.err != nil {
- break
- }
- wbuf = wbuf[z.BlockMaxSize:]
- if !z.BlockDependency {
- z.wg.Add(1)
- go z.decompressBlock(zb, &abort)
- continue
- }
- // cannot decompress concurrently when dealing with block dependency
- z.decompressBlock(zb, nil)
- // the last block may not contain enough data
- if len(z.window) == 0 {
- z.window = make([]byte, winSize)
- }
- if len(zb.data) >= winSize {
- copy(z.window, zb.data[len(zb.data)-winSize:])
- } else {
- copy(z.window, z.window[len(zb.data):])
- copy(z.window[len(zb.data)+1:], zb.data)
- }
- }
- z.wg.Wait()
-
- // since a block size may be less then BlockMaxSize, trim the decompressed buffers
- for _, zb := range zblocks {
- if zb.err != nil {
- if zb.err == errEndOfBlock {
- return n, z.close()
- }
- return n, zb.err
- }
- bLen := len(zb.data)
- if !z.NoChecksum {
- z.checksum.Write(zb.data)
- }
- m := copy(buf[n:], zb.data)
- // buffer the remaining data (this is necessarily the last block)
- if m < bLen {
- z.data = zb.data[m:]
- }
- n += m
- }
-
- return
-}
-
-// readBlock reads an entire frame block from the frame.
-// The input buffer is the one that will receive the decompressed data.
-// If the end of the frame is detected, it returns the errEndOfBlock error.
-func (z *Reader) readBlock(buf []byte, b *block) error {
- var bLen uint32
- if err := binary.Read(z.src, binary.LittleEndian, &bLen); err != nil {
- return err
- }
- atomic.AddInt64(&z.Pos, 4)
-
- switch {
- case bLen == 0:
- return errEndOfBlock
- case bLen&(1<<31) == 0:
- b.compressed = true
- b.data = buf
- b.zdata = make([]byte, bLen)
- default:
- bLen = bLen & (1<<31 - 1)
- if int(bLen) > len(buf) {
- return fmt.Errorf("lz4.Read: invalid block size: %d", bLen)
- }
- b.data = buf[:bLen]
- b.zdata = buf[:bLen]
- }
- if _, err := io.ReadFull(z.src, b.zdata); err != nil {
- return err
- }
-
- if z.BlockChecksum {
- if err := binary.Read(z.src, binary.LittleEndian, &b.checksum); err != nil {
- return err
- }
- xxh := hashPool.Get()
- defer hashPool.Put(xxh)
- xxh.Write(b.zdata)
- if h := xxh.Sum32(); h != b.checksum {
- return fmt.Errorf("lz4.Read: invalid block checksum: got %x expected %x", h, b.checksum)
- }
- }
-
- return nil
-}
-
-// decompressBlock decompresses a frame block.
-// In case of an error, the block err is set with it and abort is set to 1.
-func (z *Reader) decompressBlock(b *block, abort *uint32) {
- if abort != nil {
- defer z.wg.Done()
- }
- if b.compressed {
- n := len(z.window)
- m, err := UncompressBlock(b.zdata, b.data, n)
- if err != nil {
- if abort != nil {
- atomic.StoreUint32(abort, 1)
- }
- b.err = err
- return
- }
- b.data = b.data[n : n+m]
- }
- atomic.AddInt64(&z.Pos, int64(len(b.data)))
-}
-
-// close validates the frame checksum (if any) and checks the next frame (if any).
-func (z *Reader) close() error {
- if !z.NoChecksum {
- var checksum uint32
- if err := binary.Read(z.src, binary.LittleEndian, &checksum); err != nil {
- return err
- }
- if checksum != z.checksum.Sum32() {
- return fmt.Errorf("lz4.Read: invalid frame checksum: got %x expected %x", z.checksum.Sum32(), checksum)
- }
- }
-
- // get ready for the next concatenated frame, but do not change the position
- pos := z.Pos
- z.Reset(z.src)
- z.Pos = pos
-
- // since multiple frames can be concatenated, check for another one
- return z.readHeader(false)
-}
-
-// Reset discards the Reader's state and makes it equivalent to the
-// result of its original state from NewReader, but reading from r instead.
-// This permits reusing a Reader rather than allocating a new one.
-func (z *Reader) Reset(r io.Reader) {
- z.Header = Header{}
- z.Pos = 0
- z.src = r
- z.checksum.Reset()
- z.data = nil
- z.window = nil
-}
-
-// WriteTo decompresses the data from the underlying io.Reader and writes it to the io.Writer.
-// Returns the number of bytes written.
-func (z *Reader) WriteTo(w io.Writer) (n int64, err error) {
- cpus := runtime.GOMAXPROCS(0)
- var buf []byte
-
- // The initial buffer being nil, the first Read will be only read the compressed frame options.
- // The buffer can then be sized appropriately to support maximum concurrency decompression.
- // If multiple frames are concatenated, Read() will return with no data decompressed but with
- // potentially changed options. The buffer will be resized accordingly, always trying to
- // maximize concurrency.
- for {
- nsize := 0
- // the block max size can change if multiple streams are concatenated.
- // Check it after every Read().
- if z.BlockDependency {
- // in case of dependency, we cannot decompress concurrently,
- // so allocate the minimum buffer + window size
- nsize = len(z.window) + z.BlockMaxSize
- } else {
- // if no dependency, allocate a buffer large enough for concurrent decompression
- nsize = cpus * z.BlockMaxSize
- }
- if nsize != len(buf) {
- buf = make([]byte, nsize)
- }
-
- m, er := z.Read(buf)
- if er != nil && er != io.EOF {
- return n, er
- }
- m, err = w.Write(buf[:m])
- n += int64(m)
- if err != nil || er == io.EOF {
- return
- }
- }
-}
diff --git a/vendor/github.com/pierrec/lz4/writer.go b/vendor/github.com/pierrec/lz4/writer.go
deleted file mode 100644
index b16cb2618b7..00000000000
--- a/vendor/github.com/pierrec/lz4/writer.go
+++ /dev/null
@@ -1,376 +0,0 @@
-package lz4
-
-import (
- "encoding/binary"
- "fmt"
- "hash"
- "io"
- "runtime"
-)
-
-// Writer implements the LZ4 frame encoder.
-type Writer struct {
- Header
- dst io.Writer
- checksum hash.Hash32 // frame checksum
- data []byte // data to be compressed, only used when dealing with block dependency as we need 64Kb to work with
- window []byte // last 64KB of decompressed data (block dependency) + blockMaxSize buffer
-
- zbCompressBuf []byte // buffer for compressing lz4 blocks
- writeSizeBuf []byte // four-byte slice for writing checksums and sizes in writeblock
-}
-
-// NewWriter returns a new LZ4 frame encoder.
-// No access to the underlying io.Writer is performed.
-// The supplied Header is checked at the first Write.
-// It is ok to change it before the first Write but then not until a Reset() is performed.
-func NewWriter(dst io.Writer) *Writer {
- return &Writer{
- dst: dst,
- checksum: hashPool.Get(),
- Header: Header{
- BlockMaxSize: 4 << 20,
- },
- writeSizeBuf: make([]byte, 4),
- }
-}
-
-// writeHeader builds and writes the header (magic+header) to the underlying io.Writer.
-func (z *Writer) writeHeader() error {
- // Default to 4Mb if BlockMaxSize is not set
- if z.Header.BlockMaxSize == 0 {
- z.Header.BlockMaxSize = 4 << 20
- }
- // the only option that need to be validated
- bSize, ok := bsMapValue[z.Header.BlockMaxSize]
- if !ok {
- return fmt.Errorf("lz4: invalid block max size: %d", z.Header.BlockMaxSize)
- }
-
- // magic number(4) + header(flags(2)+[Size(8)+DictID(4)]+checksum(1)) does not exceed 19 bytes
- // Size and DictID are optional
- var buf [19]byte
-
- // set the fixed size data: magic number, block max size and flags
- binary.LittleEndian.PutUint32(buf[0:], frameMagic)
- flg := byte(Version << 6)
- if !z.Header.BlockDependency {
- flg |= 1 << 5
- }
- if z.Header.BlockChecksum {
- flg |= 1 << 4
- }
- if z.Header.Size > 0 {
- flg |= 1 << 3
- }
- if !z.Header.NoChecksum {
- flg |= 1 << 2
- }
- // if z.Header.Dict {
- // flg |= 1
- // }
- buf[4] = flg
- buf[5] = bSize << 4
-
- // current buffer size: magic(4) + flags(1) + block max size (1)
- n := 6
- // optional items
- if z.Header.Size > 0 {
- binary.LittleEndian.PutUint64(buf[n:], z.Header.Size)
- n += 8
- }
- // if z.Header.Dict {
- // binary.LittleEndian.PutUint32(buf[n:], z.Header.DictID)
- // n += 4
- // }
-
- // header checksum includes the flags, block max size and optional Size and DictID
- z.checksum.Write(buf[4:n])
- buf[n] = byte(z.checksum.Sum32() >> 8 & 0xFF)
- z.checksum.Reset()
-
- // header ready, write it out
- if _, err := z.dst.Write(buf[0 : n+1]); err != nil {
- return err
- }
- z.Header.done = true
-
- // initialize buffers dependent on header info
- z.zbCompressBuf = make([]byte, winSize+z.BlockMaxSize)
-
- return nil
-}
-
-// Write compresses data from the supplied buffer into the underlying io.Writer.
-// Write does not return until the data has been written.
-//
-// If the input buffer is large enough (typically in multiples of BlockMaxSize)
-// the data will be compressed concurrently.
-//
-// Write never buffers any data unless in BlockDependency mode where it may
-// do so until it has 64Kb of data, after which it never buffers any.
-func (z *Writer) Write(buf []byte) (n int, err error) {
- if !z.Header.done {
- if err = z.writeHeader(); err != nil {
- return
- }
- }
-
- if len(buf) == 0 {
- return
- }
-
- if !z.NoChecksum {
- z.checksum.Write(buf)
- }
-
- // with block dependency, require at least 64Kb of data to work with
- // not having 64Kb only matters initially to setup the first window
- bl := 0
- if z.BlockDependency && len(z.window) == 0 {
- bl = len(z.data)
- z.data = append(z.data, buf...)
- if len(z.data) < winSize {
- return len(buf), nil
- }
- buf = z.data
- z.data = nil
- }
-
- // Break up the input buffer into BlockMaxSize blocks, provisioning the left over block.
- // Then compress into each of them concurrently if possible (no dependency).
- var (
- zb block
- wbuf = buf
- zn = len(wbuf) / z.BlockMaxSize
- zi = 0
- leftover = len(buf) % z.BlockMaxSize
- )
-
-loop:
- for zi < zn {
- if z.BlockDependency {
- if zi == 0 {
- // first block does not have the window
- zb.data = append(z.window, wbuf[:z.BlockMaxSize]...)
- zb.offset = len(z.window)
- wbuf = wbuf[z.BlockMaxSize-winSize:]
- } else {
- // set the uncompressed data including the window from previous block
- zb.data = wbuf[:z.BlockMaxSize+winSize]
- zb.offset = winSize
- wbuf = wbuf[z.BlockMaxSize:]
- }
- } else {
- zb.data = wbuf[:z.BlockMaxSize]
- wbuf = wbuf[z.BlockMaxSize:]
- }
-
- goto write
- }
-
- // left over
- if leftover > 0 {
- zb = block{data: wbuf}
- if z.BlockDependency {
- if zn == 0 {
- zb.data = append(z.window, zb.data...)
- zb.offset = len(z.window)
- } else {
- zb.offset = winSize
- }
- }
-
- leftover = 0
- goto write
- }
-
- if z.BlockDependency {
- if len(z.window) == 0 {
- z.window = make([]byte, winSize)
- }
- // last buffer may be shorter than the window
- if len(buf) >= winSize {
- copy(z.window, buf[len(buf)-winSize:])
- } else {
- copy(z.window, z.window[len(buf):])
- copy(z.window[len(buf)+1:], buf)
- }
- }
-
- return
-
-write:
- zb = z.compressBlock(zb)
- _, err = z.writeBlock(zb)
-
- written := len(zb.data)
- if bl > 0 {
- if written >= bl {
- written -= bl
- bl = 0
- } else {
- bl -= written
- written = 0
- }
- }
-
- n += written
- // remove the window in zb.data
- if z.BlockDependency {
- if zi == 0 {
- n -= len(z.window)
- } else {
- n -= winSize
- }
- }
- if err != nil {
- return
- }
- zi++
- goto loop
-}
-
-// compressBlock compresses a block.
-func (z *Writer) compressBlock(zb block) block {
- // compressed block size cannot exceed the input's
- var (
- n int
- err error
- zbuf = z.zbCompressBuf
- )
- if z.HighCompression {
- n, err = CompressBlockHC(zb.data, zbuf, zb.offset)
- } else {
- n, err = CompressBlock(zb.data, zbuf, zb.offset)
- }
-
- // compressible and compressed size smaller than decompressed: ok!
- if err == nil && n > 0 && len(zb.zdata) < len(zb.data) {
- zb.compressed = true
- zb.zdata = zbuf[:n]
- } else {
- zb.zdata = zb.data[zb.offset:]
- }
-
- if z.BlockChecksum {
- xxh := hashPool.Get()
- xxh.Write(zb.zdata)
- zb.checksum = xxh.Sum32()
- hashPool.Put(xxh)
- }
-
- return zb
-}
-
-// writeBlock writes a frame block to the underlying io.Writer (size, data).
-func (z *Writer) writeBlock(zb block) (int, error) {
- bLen := uint32(len(zb.zdata))
- if !zb.compressed {
- bLen |= 1 << 31
- }
-
- n := 0
-
- binary.LittleEndian.PutUint32(z.writeSizeBuf, bLen)
- n, err := z.dst.Write(z.writeSizeBuf)
- if err != nil {
- return n, err
- }
-
- m, err := z.dst.Write(zb.zdata)
- n += m
- if err != nil {
- return n, err
- }
-
- if z.BlockChecksum {
- binary.LittleEndian.PutUint32(z.writeSizeBuf, zb.checksum)
- m, err := z.dst.Write(z.writeSizeBuf)
- n += m
-
- if err != nil {
- return n, err
- }
- }
-
- return n, nil
-}
-
-// Flush flushes any pending compressed data to the underlying writer.
-// Flush does not return until the data has been written.
-// If the underlying writer returns an error, Flush returns that error.
-//
-// Flush is only required when in BlockDependency mode and the total of
-// data written is less than 64Kb.
-func (z *Writer) Flush() error {
- if len(z.data) == 0 {
- return nil
- }
-
- zb := z.compressBlock(block{data: z.data})
- if _, err := z.writeBlock(zb); err != nil {
- return err
- }
- return nil
-}
-
-// Close closes the Writer, flushing any unwritten data to the underlying io.Writer, but does not close the underlying io.Writer.
-func (z *Writer) Close() error {
- if !z.Header.done {
- if err := z.writeHeader(); err != nil {
- return err
- }
- }
-
- // buffered data for the block dependency window
- if z.BlockDependency && len(z.data) > 0 {
- zb := block{data: z.data}
- if _, err := z.writeBlock(z.compressBlock(zb)); err != nil {
- return err
- }
- }
-
- if err := binary.Write(z.dst, binary.LittleEndian, uint32(0)); err != nil {
- return err
- }
- if !z.NoChecksum {
- if err := binary.Write(z.dst, binary.LittleEndian, z.checksum.Sum32()); err != nil {
- return err
- }
- }
- return nil
-}
-
-// Reset clears the state of the Writer z such that it is equivalent to its
-// initial state from NewWriter, but instead writing to w.
-// No access to the underlying io.Writer is performed.
-func (z *Writer) Reset(w io.Writer) {
- z.Header = Header{}
- z.dst = w
- z.checksum.Reset()
- z.data = nil
- z.window = nil
-}
-
-// ReadFrom compresses the data read from the io.Reader and writes it to the underlying io.Writer.
-// Returns the number of bytes read.
-// It does not close the Writer.
-func (z *Writer) ReadFrom(r io.Reader) (n int64, err error) {
- cpus := runtime.GOMAXPROCS(0)
- buf := make([]byte, cpus*z.BlockMaxSize)
- for {
- m, er := io.ReadFull(r, buf)
- n += int64(m)
- if er == nil || er == io.ErrUnexpectedEOF || er == io.EOF {
- if _, err = z.Write(buf[:m]); err != nil {
- return
- }
- if er == nil {
- continue
- }
- return
- }
- return n, er
- }
-}
diff --git a/vendor/github.com/pierrec/xxHash/LICENSE b/vendor/github.com/pierrec/xxHash/LICENSE
deleted file mode 100644
index c1418f3f677..00000000000
--- a/vendor/github.com/pierrec/xxHash/LICENSE
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2014, Pierre Curto
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are met:
-
-* Redistributions of source code must retain the above copyright notice, this
- list of conditions and the following disclaimer.
-
-* Redistributions in binary form must reproduce the above copyright notice,
- this list of conditions and the following disclaimer in the documentation
- and/or other materials provided with the distribution.
-
-* Neither the name of xxHash nor the names of its
- contributors may be used to endorse or promote products derived from
- this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
-AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
-SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
-CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
-OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
diff --git a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go b/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
deleted file mode 100644
index 411504e4bb8..00000000000
--- a/vendor/github.com/pierrec/xxHash/xxHash32/xxHash32.go
+++ /dev/null
@@ -1,205 +0,0 @@
-// Package xxHash32 implements the very fast xxHash hashing algorithm (32 bits version).
-// (https://github.com/Cyan4973/xxHash/)
-package xxHash32
-
-import "hash"
-
-const (
- prime32_1 = 2654435761
- prime32_2 = 2246822519
- prime32_3 = 3266489917
- prime32_4 = 668265263
- prime32_5 = 374761393
-)
-
-type xxHash struct {
- seed uint32
- v1 uint32
- v2 uint32
- v3 uint32
- v4 uint32
- totalLen uint64
- buf [16]byte
- bufused int
-}
-
-// New returns a new Hash32 instance.
-func New(seed uint32) hash.Hash32 {
- xxh := &xxHash{seed: seed}
- xxh.Reset()
- return xxh
-}
-
-// Sum appends the current hash to b and returns the resulting slice.
-// It does not change the underlying hash state.
-func (xxh xxHash) Sum(b []byte) []byte {
- h32 := xxh.Sum32()
- return append(b, byte(h32), byte(h32>>8), byte(h32>>16), byte(h32>>24))
-}
-
-// Reset resets the Hash to its initial state.
-func (xxh *xxHash) Reset() {
- xxh.v1 = xxh.seed + prime32_1 + prime32_2
- xxh.v2 = xxh.seed + prime32_2
- xxh.v3 = xxh.seed
- xxh.v4 = xxh.seed - prime32_1
- xxh.totalLen = 0
- xxh.bufused = 0
-}
-
-// Size returns the number of bytes returned by Sum().
-func (xxh *xxHash) Size() int {
- return 4
-}
-
-// BlockSize gives the minimum number of bytes accepted by Write().
-func (xxh *xxHash) BlockSize() int {
- return 1
-}
-
-// Write adds input bytes to the Hash.
-// It never returns an error.
-func (xxh *xxHash) Write(input []byte) (int, error) {
- n := len(input)
- m := xxh.bufused
-
- xxh.totalLen += uint64(n)
-
- r := len(xxh.buf) - m
- if n < r {
- copy(xxh.buf[m:], input)
- xxh.bufused += len(input)
- return n, nil
- }
-
- p := 0
- if m > 0 {
- // some data left from previous update
- copy(xxh.buf[xxh.bufused:], input[:r])
- xxh.bufused += len(input) - r
-
- // fast rotl(13)
- p32 := xxh.v1 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
- xxh.v1 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v2 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
- xxh.v2 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v3 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
- xxh.v3 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v4 + (uint32(xxh.buf[p+3])<<24|uint32(xxh.buf[p+2])<<16|uint32(xxh.buf[p+1])<<8|uint32(xxh.buf[p]))*prime32_2
- xxh.v4 = (p32<<13 | p32>>19) * prime32_1
-
- p = r
- xxh.bufused = 0
- }
-
- for n := n - 16; p <= n; {
- p32 := xxh.v1 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
- xxh.v1 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v2 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
- xxh.v2 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v3 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
- xxh.v3 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- p32 = xxh.v4 + (uint32(input[p+3])<<24|uint32(input[p+2])<<16|uint32(input[p+1])<<8|uint32(input[p]))*prime32_2
- xxh.v4 = (p32<<13 | p32>>19) * prime32_1
- p += 4
- }
-
- copy(xxh.buf[xxh.bufused:], input[p:])
- xxh.bufused += len(input) - p
-
- return n, nil
-}
-
-// Sum32 returns the 32 bits Hash value.
-func (xxh *xxHash) Sum32() uint32 {
- h32 := uint32(xxh.totalLen)
- if xxh.totalLen >= 16 {
- h32 += ((xxh.v1 << 1) | (xxh.v1 >> 31)) +
- ((xxh.v2 << 7) | (xxh.v2 >> 25)) +
- ((xxh.v3 << 12) | (xxh.v3 >> 20)) +
- ((xxh.v4 << 18) | (xxh.v4 >> 14))
- } else {
- h32 += xxh.seed + prime32_5
- }
-
- p := 0
- n := xxh.bufused
- for n := n - 4; p <= n; p += 4 {
- h32 += (uint32(xxh.buf[p+3])<<24 | uint32(xxh.buf[p+2])<<16 | uint32(xxh.buf[p+1])<<8 | uint32(xxh.buf[p])) * prime32_3
- h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
- }
- for ; p < n; p++ {
- h32 += uint32(xxh.buf[p]) * prime32_5
- h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
- }
-
- h32 ^= h32 >> 15
- h32 *= prime32_2
- h32 ^= h32 >> 13
- h32 *= prime32_3
- h32 ^= h32 >> 16
-
- return h32
-}
-
-// Checksum returns the 32bits Hash value.
-func Checksum(input []byte, seed uint32) uint32 {
- n := len(input)
- h32 := uint32(n)
-
- if n < 16 {
- h32 += seed + prime32_5
- } else {
- v1 := seed + prime32_1 + prime32_2
- v2 := seed + prime32_2
- v3 := seed
- v4 := seed - prime32_1
- p := 0
- for p <= n-16 {
- v1 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
- v1 = (v1<<13 | v1>>19) * prime32_1
- p += 4
- v2 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
- v2 = (v2<<13 | v2>>19) * prime32_1
- p += 4
- v3 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
- v3 = (v3<<13 | v3>>19) * prime32_1
- p += 4
- v4 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_2
- v4 = (v4<<13 | v4>>19) * prime32_1
- p += 4
- }
- input = input[p:]
- n -= p
- h32 += ((v1 << 1) | (v1 >> 31)) +
- ((v2 << 7) | (v2 >> 25)) +
- ((v3 << 12) | (v3 >> 20)) +
- ((v4 << 18) | (v4 >> 14))
- }
-
- p := 0
- for p <= n-4 {
- h32 += (uint32(input[p+3])<<24 | uint32(input[p+2])<<16 | uint32(input[p+1])<<8 | uint32(input[p])) * prime32_3
- h32 = ((h32 << 17) | (h32 >> 15)) * prime32_4
- p += 4
- }
- for p < n {
- h32 += uint32(input[p]) * prime32_5
- h32 = ((h32 << 11) | (h32 >> 21)) * prime32_1
- p++
- }
-
- h32 ^= h32 >> 15
- h32 *= prime32_2
- h32 ^= h32 >> 13
- h32 *= prime32_3
- h32 ^= h32 >> 16
-
- return h32
-}
diff --git a/vendor/github.com/prometheus/common/promlog/log.go b/vendor/github.com/prometheus/common/promlog/log.go
deleted file mode 100644
index cf8307ad285..00000000000
--- a/vendor/github.com/prometheus/common/promlog/log.go
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2017 The Prometheus Authors
-// Licensed under the Apache License, Version 2.0 (the "License");
-// you may not use this file except in compliance with the License.
-// You may obtain a copy of the License at
-//
-// http://www.apache.org/licenses/LICENSE-2.0
-//
-// Unless required by applicable law or agreed to in writing, software
-// distributed under the License is distributed on an "AS IS" BASIS,
-// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-// See the License for the specific language governing permissions and
-// limitations under the License.
-
-// Package promlog defines standardised ways to initialize Go kit loggers
-// across Prometheus components.
-// It should typically only ever be imported by main packages.
-package promlog
-
-import (
- "os"
-
- "github.com/go-kit/kit/log"
- "github.com/go-kit/kit/log/level"
- "github.com/pkg/errors"
-)
-
-// AllowedLevel is a settable identifier for the minimum level a log entry
-// must be have.
-type AllowedLevel struct {
- s string
- o level.Option
-}
-
-func (l *AllowedLevel) String() string {
- return l.s
-}
-
-// Set updates the value of the allowed level.
-func (l *AllowedLevel) Set(s string) error {
- switch s {
- case "debug":
- l.o = level.AllowDebug()
- case "info":
- l.o = level.AllowInfo()
- case "warn":
- l.o = level.AllowWarn()
- case "error":
- l.o = level.AllowError()
- default:
- return errors.Errorf("unrecognized log level %q", s)
- }
- l.s = s
- return nil
-}
-
-// New returns a new leveled oklog logger in the logfmt format. Each logged line will be annotated
-// with a timestamp. The output always goes to stderr.
-func New(al AllowedLevel) log.Logger {
- l := log.NewLogfmtLogger(log.NewSyncWriter(os.Stderr))
- l = level.NewFilter(l, al.o)
- l = log.With(l, "ts", log.DefaultTimestampUTC, "caller", log.DefaultCaller)
- return l
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/.gitignore b/vendor/github.com/rcrowley/go-metrics/.gitignore
deleted file mode 100644
index 83c8f82374a..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/.gitignore
+++ /dev/null
@@ -1,9 +0,0 @@
-*.[68]
-*.a
-*.out
-*.swp
-_obj
-_testmain.go
-cmd/metrics-bench/metrics-bench
-cmd/metrics-example/metrics-example
-cmd/never-read/never-read
diff --git a/vendor/github.com/rcrowley/go-metrics/.travis.yml b/vendor/github.com/rcrowley/go-metrics/.travis.yml
deleted file mode 100644
index 20aa5d04202..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/.travis.yml
+++ /dev/null
@@ -1,14 +0,0 @@
-language: go
-
-go:
- - 1.2
- - 1.3
- - 1.4
- - 1.5
-
-script:
- - ./validate.sh
-
-# this should give us faster builds according to
-# http://docs.travis-ci.com/user/migrating-from-legacy/
-sudo: false
diff --git a/vendor/github.com/rcrowley/go-metrics/LICENSE b/vendor/github.com/rcrowley/go-metrics/LICENSE
deleted file mode 100644
index 363fa9ee77b..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/LICENSE
+++ /dev/null
@@ -1,29 +0,0 @@
-Copyright 2012 Richard Crowley. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- 1. Redistributions of source code must retain the above copyright
- notice, this list of conditions and the following disclaimer.
-
- 2. Redistributions in binary form must reproduce the above
- copyright notice, this list of conditions and the following
- disclaimer in the documentation and/or other materials provided
- with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY RICHARD CROWLEY ``AS IS'' AND ANY EXPRESS
-OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
-WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
-DISCLAIMED. IN NO EVENT SHALL RICHARD CROWLEY OR CONTRIBUTORS BE LIABLE
-FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
-CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
-SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
-INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
-CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
-ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
-THE POSSIBILITY OF SUCH DAMAGE.
-
-The views and conclusions contained in the software and documentation
-are those of the authors and should not be interpreted as representing
-official policies, either expressed or implied, of Richard Crowley.
diff --git a/vendor/github.com/rcrowley/go-metrics/README.md b/vendor/github.com/rcrowley/go-metrics/README.md
deleted file mode 100644
index 2d1a6dcfa44..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/README.md
+++ /dev/null
@@ -1,153 +0,0 @@
-go-metrics
-==========
-
-
-
-Go port of Coda Hale's Metrics library: .
-
-Documentation: .
-
-Usage
------
-
-Create and update metrics:
-
-```go
-c := metrics.NewCounter()
-metrics.Register("foo", c)
-c.Inc(47)
-
-g := metrics.NewGauge()
-metrics.Register("bar", g)
-g.Update(47)
-
-r := NewRegistry()
-g := metrics.NewRegisteredFunctionalGauge("cache-evictions", r, func() int64 { return cache.getEvictionsCount() })
-
-s := metrics.NewExpDecaySample(1028, 0.015) // or metrics.NewUniformSample(1028)
-h := metrics.NewHistogram(s)
-metrics.Register("baz", h)
-h.Update(47)
-
-m := metrics.NewMeter()
-metrics.Register("quux", m)
-m.Mark(47)
-
-t := metrics.NewTimer()
-metrics.Register("bang", t)
-t.Time(func() {})
-t.Update(47)
-```
-
-Register() is not threadsafe. For threadsafe metric registration use
-GetOrRegister:
-
-```
-t := metrics.GetOrRegisterTimer("account.create.latency", nil)
-t.Time(func() {})
-t.Update(47)
-```
-
-Periodically log every metric in human-readable form to standard error:
-
-```go
-go metrics.Log(metrics.DefaultRegistry, 5 * time.Second, log.New(os.Stderr, "metrics: ", log.Lmicroseconds))
-```
-
-Periodically log every metric in slightly-more-parseable form to syslog:
-
-```go
-w, _ := syslog.Dial("unixgram", "/dev/log", syslog.LOG_INFO, "metrics")
-go metrics.Syslog(metrics.DefaultRegistry, 60e9, w)
-```
-
-Periodically emit every metric to Graphite using the [Graphite client](https://github.com/cyberdelia/go-metrics-graphite):
-
-```go
-
-import "github.com/cyberdelia/go-metrics-graphite"
-
-addr, _ := net.ResolveTCPAddr("tcp", "127.0.0.1:2003")
-go graphite.Graphite(metrics.DefaultRegistry, 10e9, "metrics", addr)
-```
-
-Periodically emit every metric into InfluxDB:
-
-**NOTE:** this has been pulled out of the library due to constant fluctuations
-in the InfluxDB API. In fact, all client libraries are on their way out. see
-issues [#121](https://github.com/rcrowley/go-metrics/issues/121) and
-[#124](https://github.com/rcrowley/go-metrics/issues/124) for progress and details.
-
-```go
-import "github.com/vrischmann/go-metrics-influxdb"
-
-go influxdb.Influxdb(metrics.DefaultRegistry, 10e9, &influxdb.Config{
- Host: "127.0.0.1:8086",
- Database: "metrics",
- Username: "test",
- Password: "test",
-})
-```
-
-Periodically upload every metric to Librato using the [Librato client](https://github.com/mihasya/go-metrics-librato):
-
-**Note**: the client included with this repository under the `librato` package
-has been deprecated and moved to the repository linked above.
-
-```go
-import "github.com/mihasya/go-metrics-librato"
-
-go librato.Librato(metrics.DefaultRegistry,
- 10e9, // interval
- "example@example.com", // account owner email address
- "token", // Librato API token
- "hostname", // source
- []float64{0.95}, // percentiles to send
- time.Millisecond, // time unit
-)
-```
-
-Periodically emit every metric to StatHat:
-
-```go
-import "github.com/rcrowley/go-metrics/stathat"
-
-go stathat.Stathat(metrics.DefaultRegistry, 10e9, "example@example.com")
-```
-
-Maintain all metrics along with expvars at `/debug/metrics`:
-
-This uses the same mechanism as [the official expvar](http://golang.org/pkg/expvar/)
-but exposed under `/debug/metrics`, which shows a json representation of all your usual expvars
-as well as all your go-metrics.
-
-
-```go
-import "github.com/rcrowley/go-metrics/exp"
-
-exp.Exp(metrics.DefaultRegistry)
-```
-
-Installation
-------------
-
-```sh
-go get github.com/rcrowley/go-metrics
-```
-
-StatHat support additionally requires their Go client:
-
-```sh
-go get github.com/stathat/go
-```
-
-Publishing Metrics
-------------------
-
-Clients are available for the following destinations:
-
-* Librato - [https://github.com/mihasya/go-metrics-librato](https://github.com/mihasya/go-metrics-librato)
-* Graphite - [https://github.com/cyberdelia/go-metrics-graphite](https://github.com/cyberdelia/go-metrics-graphite)
-* InfluxDB - [https://github.com/vrischmann/go-metrics-influxdb](https://github.com/vrischmann/go-metrics-influxdb)
-* Ganglia - [https://github.com/appscode/metlia](https://github.com/appscode/metlia)
-* Prometheus - [https://github.com/deathowl/go-metrics-prometheus](https://github.com/deathowl/go-metrics-prometheus)
diff --git a/vendor/github.com/rcrowley/go-metrics/counter.go b/vendor/github.com/rcrowley/go-metrics/counter.go
deleted file mode 100644
index bb7b039cb57..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/counter.go
+++ /dev/null
@@ -1,112 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Counters hold an int64 value that can be incremented and decremented.
-type Counter interface {
- Clear()
- Count() int64
- Dec(int64)
- Inc(int64)
- Snapshot() Counter
-}
-
-// GetOrRegisterCounter returns an existing Counter or constructs and registers
-// a new StandardCounter.
-func GetOrRegisterCounter(name string, r Registry) Counter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewCounter).(Counter)
-}
-
-// NewCounter constructs a new StandardCounter.
-func NewCounter() Counter {
- if UseNilMetrics {
- return NilCounter{}
- }
- return &StandardCounter{0}
-}
-
-// NewRegisteredCounter constructs and registers a new StandardCounter.
-func NewRegisteredCounter(name string, r Registry) Counter {
- c := NewCounter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// CounterSnapshot is a read-only copy of another Counter.
-type CounterSnapshot int64
-
-// Clear panics.
-func (CounterSnapshot) Clear() {
- panic("Clear called on a CounterSnapshot")
-}
-
-// Count returns the count at the time the snapshot was taken.
-func (c CounterSnapshot) Count() int64 { return int64(c) }
-
-// Dec panics.
-func (CounterSnapshot) Dec(int64) {
- panic("Dec called on a CounterSnapshot")
-}
-
-// Inc panics.
-func (CounterSnapshot) Inc(int64) {
- panic("Inc called on a CounterSnapshot")
-}
-
-// Snapshot returns the snapshot.
-func (c CounterSnapshot) Snapshot() Counter { return c }
-
-// NilCounter is a no-op Counter.
-type NilCounter struct{}
-
-// Clear is a no-op.
-func (NilCounter) Clear() {}
-
-// Count is a no-op.
-func (NilCounter) Count() int64 { return 0 }
-
-// Dec is a no-op.
-func (NilCounter) Dec(i int64) {}
-
-// Inc is a no-op.
-func (NilCounter) Inc(i int64) {}
-
-// Snapshot is a no-op.
-func (NilCounter) Snapshot() Counter { return NilCounter{} }
-
-// StandardCounter is the standard implementation of a Counter and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardCounter struct {
- count int64
-}
-
-// Clear sets the counter to zero.
-func (c *StandardCounter) Clear() {
- atomic.StoreInt64(&c.count, 0)
-}
-
-// Count returns the current count.
-func (c *StandardCounter) Count() int64 {
- return atomic.LoadInt64(&c.count)
-}
-
-// Dec decrements the counter by the given amount.
-func (c *StandardCounter) Dec(i int64) {
- atomic.AddInt64(&c.count, -i)
-}
-
-// Inc increments the counter by the given amount.
-func (c *StandardCounter) Inc(i int64) {
- atomic.AddInt64(&c.count, i)
-}
-
-// Snapshot returns a read-only copy of the counter.
-func (c *StandardCounter) Snapshot() Counter {
- return CounterSnapshot(c.Count())
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/debug.go b/vendor/github.com/rcrowley/go-metrics/debug.go
deleted file mode 100644
index 043ccefab61..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/debug.go
+++ /dev/null
@@ -1,76 +0,0 @@
-package metrics
-
-import (
- "runtime/debug"
- "time"
-)
-
-var (
- debugMetrics struct {
- GCStats struct {
- LastGC Gauge
- NumGC Gauge
- Pause Histogram
- //PauseQuantiles Histogram
- PauseTotal Gauge
- }
- ReadGCStats Timer
- }
- gcStats debug.GCStats
-)
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called as a goroutine.
-func CaptureDebugGCStats(r Registry, d time.Duration) {
- for _ = range time.Tick(d) {
- CaptureDebugGCStatsOnce(r)
- }
-}
-
-// Capture new values for the Go garbage collector statistics exported in
-// debug.GCStats. This is designed to be called in a background goroutine.
-// Giving a registry which has not been given to RegisterDebugGCStats will
-// panic.
-//
-// Be careful (but much less so) with this because debug.ReadGCStats calls
-// the C function runtime·lock(runtime·mheap) which, while not a stop-the-world
-// operation, isn't something you want to be doing all the time.
-func CaptureDebugGCStatsOnce(r Registry) {
- lastGC := gcStats.LastGC
- t := time.Now()
- debug.ReadGCStats(&gcStats)
- debugMetrics.ReadGCStats.UpdateSince(t)
-
- debugMetrics.GCStats.LastGC.Update(int64(gcStats.LastGC.UnixNano()))
- debugMetrics.GCStats.NumGC.Update(int64(gcStats.NumGC))
- if lastGC != gcStats.LastGC && 0 < len(gcStats.Pause) {
- debugMetrics.GCStats.Pause.Update(int64(gcStats.Pause[0]))
- }
- //debugMetrics.GCStats.PauseQuantiles.Update(gcStats.PauseQuantiles)
- debugMetrics.GCStats.PauseTotal.Update(int64(gcStats.PauseTotal))
-}
-
-// Register metrics for the Go garbage collector statistics exported in
-// debug.GCStats. The metrics are named by their fully-qualified Go symbols,
-// i.e. debug.GCStats.PauseTotal.
-func RegisterDebugGCStats(r Registry) {
- debugMetrics.GCStats.LastGC = NewGauge()
- debugMetrics.GCStats.NumGC = NewGauge()
- debugMetrics.GCStats.Pause = NewHistogram(NewExpDecaySample(1028, 0.015))
- //debugMetrics.GCStats.PauseQuantiles = NewHistogram(NewExpDecaySample(1028, 0.015))
- debugMetrics.GCStats.PauseTotal = NewGauge()
- debugMetrics.ReadGCStats = NewTimer()
-
- r.Register("debug.GCStats.LastGC", debugMetrics.GCStats.LastGC)
- r.Register("debug.GCStats.NumGC", debugMetrics.GCStats.NumGC)
- r.Register("debug.GCStats.Pause", debugMetrics.GCStats.Pause)
- //r.Register("debug.GCStats.PauseQuantiles", debugMetrics.GCStats.PauseQuantiles)
- r.Register("debug.GCStats.PauseTotal", debugMetrics.GCStats.PauseTotal)
- r.Register("debug.ReadGCStats", debugMetrics.ReadGCStats)
-}
-
-// Allocate an initial slice for gcStats.Pause to avoid allocations during
-// normal operation.
-func init() {
- gcStats.Pause = make([]time.Duration, 11)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/ewma.go b/vendor/github.com/rcrowley/go-metrics/ewma.go
deleted file mode 100644
index 694a1d03307..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/ewma.go
+++ /dev/null
@@ -1,118 +0,0 @@
-package metrics
-
-import (
- "math"
- "sync"
- "sync/atomic"
-)
-
-// EWMAs continuously calculate an exponentially-weighted moving average
-// based on an outside source of clock ticks.
-type EWMA interface {
- Rate() float64
- Snapshot() EWMA
- Tick()
- Update(int64)
-}
-
-// NewEWMA constructs a new EWMA with the given alpha.
-func NewEWMA(alpha float64) EWMA {
- if UseNilMetrics {
- return NilEWMA{}
- }
- return &StandardEWMA{alpha: alpha}
-}
-
-// NewEWMA1 constructs a new EWMA for a one-minute moving average.
-func NewEWMA1() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/1))
-}
-
-// NewEWMA5 constructs a new EWMA for a five-minute moving average.
-func NewEWMA5() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/5))
-}
-
-// NewEWMA15 constructs a new EWMA for a fifteen-minute moving average.
-func NewEWMA15() EWMA {
- return NewEWMA(1 - math.Exp(-5.0/60.0/15))
-}
-
-// EWMASnapshot is a read-only copy of another EWMA.
-type EWMASnapshot float64
-
-// Rate returns the rate of events per second at the time the snapshot was
-// taken.
-func (a EWMASnapshot) Rate() float64 { return float64(a) }
-
-// Snapshot returns the snapshot.
-func (a EWMASnapshot) Snapshot() EWMA { return a }
-
-// Tick panics.
-func (EWMASnapshot) Tick() {
- panic("Tick called on an EWMASnapshot")
-}
-
-// Update panics.
-func (EWMASnapshot) Update(int64) {
- panic("Update called on an EWMASnapshot")
-}
-
-// NilEWMA is a no-op EWMA.
-type NilEWMA struct{}
-
-// Rate is a no-op.
-func (NilEWMA) Rate() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilEWMA) Snapshot() EWMA { return NilEWMA{} }
-
-// Tick is a no-op.
-func (NilEWMA) Tick() {}
-
-// Update is a no-op.
-func (NilEWMA) Update(n int64) {}
-
-// StandardEWMA is the standard implementation of an EWMA and tracks the number
-// of uncounted events and processes them on each tick. It uses the
-// sync/atomic package to manage uncounted events.
-type StandardEWMA struct {
- uncounted int64 // /!\ this should be the first member to ensure 64-bit alignment
- alpha float64
- rate float64
- init bool
- mutex sync.Mutex
-}
-
-// Rate returns the moving average rate of events per second.
-func (a *StandardEWMA) Rate() float64 {
- a.mutex.Lock()
- defer a.mutex.Unlock()
- return a.rate * float64(1e9)
-}
-
-// Snapshot returns a read-only copy of the EWMA.
-func (a *StandardEWMA) Snapshot() EWMA {
- return EWMASnapshot(a.Rate())
-}
-
-// Tick ticks the clock to update the moving average. It assumes it is called
-// every five seconds.
-func (a *StandardEWMA) Tick() {
- count := atomic.LoadInt64(&a.uncounted)
- atomic.AddInt64(&a.uncounted, -count)
- instantRate := float64(count) / float64(5e9)
- a.mutex.Lock()
- defer a.mutex.Unlock()
- if a.init {
- a.rate += a.alpha * (instantRate - a.rate)
- } else {
- a.init = true
- a.rate = instantRate
- }
-}
-
-// Update adds n uncounted events.
-func (a *StandardEWMA) Update(n int64) {
- atomic.AddInt64(&a.uncounted, n)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge.go b/vendor/github.com/rcrowley/go-metrics/gauge.go
deleted file mode 100644
index cb57a93889f..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/gauge.go
+++ /dev/null
@@ -1,120 +0,0 @@
-package metrics
-
-import "sync/atomic"
-
-// Gauges hold an int64 value that can be set arbitrarily.
-type Gauge interface {
- Snapshot() Gauge
- Update(int64)
- Value() int64
-}
-
-// GetOrRegisterGauge returns an existing Gauge or constructs and registers a
-// new StandardGauge.
-func GetOrRegisterGauge(name string, r Registry) Gauge {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGauge).(Gauge)
-}
-
-// NewGauge constructs a new StandardGauge.
-func NewGauge() Gauge {
- if UseNilMetrics {
- return NilGauge{}
- }
- return &StandardGauge{0}
-}
-
-// NewRegisteredGauge constructs and registers a new StandardGauge.
-func NewRegisteredGauge(name string, r Registry) Gauge {
- c := NewGauge()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGauge(f func() int64) Gauge {
- if UseNilMetrics {
- return NilGauge{}
- }
- return &FunctionalGauge{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGauge(name string, r Registry, f func() int64) Gauge {
- c := NewFunctionalGauge(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeSnapshot is a read-only copy of another Gauge.
-type GaugeSnapshot int64
-
-// Snapshot returns the snapshot.
-func (g GaugeSnapshot) Snapshot() Gauge { return g }
-
-// Update panics.
-func (GaugeSnapshot) Update(int64) {
- panic("Update called on a GaugeSnapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeSnapshot) Value() int64 { return int64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGauge struct{}
-
-// Snapshot is a no-op.
-func (NilGauge) Snapshot() Gauge { return NilGauge{} }
-
-// Update is a no-op.
-func (NilGauge) Update(v int64) {}
-
-// Value is a no-op.
-func (NilGauge) Value() int64 { return 0 }
-
-// StandardGauge is the standard implementation of a Gauge and uses the
-// sync/atomic package to manage a single int64 value.
-type StandardGauge struct {
- value int64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGauge) Snapshot() Gauge {
- return GaugeSnapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGauge) Update(v int64) {
- atomic.StoreInt64(&g.value, v)
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGauge) Value() int64 {
- return atomic.LoadInt64(&g.value)
-}
-
-// FunctionalGauge returns value from given function
-type FunctionalGauge struct {
- value func() int64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGauge) Value() int64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGauge) Snapshot() Gauge { return GaugeSnapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGauge) Update(int64) {
- panic("Update called on a FunctionalGauge")
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go b/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
deleted file mode 100644
index 6f93920b2c0..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/gauge_float64.go
+++ /dev/null
@@ -1,127 +0,0 @@
-package metrics
-
-import "sync"
-
-// GaugeFloat64s hold a float64 value that can be set arbitrarily.
-type GaugeFloat64 interface {
- Snapshot() GaugeFloat64
- Update(float64)
- Value() float64
-}
-
-// GetOrRegisterGaugeFloat64 returns an existing GaugeFloat64 or constructs and registers a
-// new StandardGaugeFloat64.
-func GetOrRegisterGaugeFloat64(name string, r Registry) GaugeFloat64 {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewGaugeFloat64()).(GaugeFloat64)
-}
-
-// NewGaugeFloat64 constructs a new StandardGaugeFloat64.
-func NewGaugeFloat64() GaugeFloat64 {
- if UseNilMetrics {
- return NilGaugeFloat64{}
- }
- return &StandardGaugeFloat64{
- value: 0.0,
- }
-}
-
-// NewRegisteredGaugeFloat64 constructs and registers a new StandardGaugeFloat64.
-func NewRegisteredGaugeFloat64(name string, r Registry) GaugeFloat64 {
- c := NewGaugeFloat64()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewFunctionalGauge constructs a new FunctionalGauge.
-func NewFunctionalGaugeFloat64(f func() float64) GaugeFloat64 {
- if UseNilMetrics {
- return NilGaugeFloat64{}
- }
- return &FunctionalGaugeFloat64{value: f}
-}
-
-// NewRegisteredFunctionalGauge constructs and registers a new StandardGauge.
-func NewRegisteredFunctionalGaugeFloat64(name string, r Registry, f func() float64) GaugeFloat64 {
- c := NewFunctionalGaugeFloat64(f)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// GaugeFloat64Snapshot is a read-only copy of another GaugeFloat64.
-type GaugeFloat64Snapshot float64
-
-// Snapshot returns the snapshot.
-func (g GaugeFloat64Snapshot) Snapshot() GaugeFloat64 { return g }
-
-// Update panics.
-func (GaugeFloat64Snapshot) Update(float64) {
- panic("Update called on a GaugeFloat64Snapshot")
-}
-
-// Value returns the value at the time the snapshot was taken.
-func (g GaugeFloat64Snapshot) Value() float64 { return float64(g) }
-
-// NilGauge is a no-op Gauge.
-type NilGaugeFloat64 struct{}
-
-// Snapshot is a no-op.
-func (NilGaugeFloat64) Snapshot() GaugeFloat64 { return NilGaugeFloat64{} }
-
-// Update is a no-op.
-func (NilGaugeFloat64) Update(v float64) {}
-
-// Value is a no-op.
-func (NilGaugeFloat64) Value() float64 { return 0.0 }
-
-// StandardGaugeFloat64 is the standard implementation of a GaugeFloat64 and uses
-// sync.Mutex to manage a single float64 value.
-type StandardGaugeFloat64 struct {
- mutex sync.Mutex
- value float64
-}
-
-// Snapshot returns a read-only copy of the gauge.
-func (g *StandardGaugeFloat64) Snapshot() GaugeFloat64 {
- return GaugeFloat64Snapshot(g.Value())
-}
-
-// Update updates the gauge's value.
-func (g *StandardGaugeFloat64) Update(v float64) {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- g.value = v
-}
-
-// Value returns the gauge's current value.
-func (g *StandardGaugeFloat64) Value() float64 {
- g.mutex.Lock()
- defer g.mutex.Unlock()
- return g.value
-}
-
-// FunctionalGaugeFloat64 returns value from given function
-type FunctionalGaugeFloat64 struct {
- value func() float64
-}
-
-// Value returns the gauge's current value.
-func (g FunctionalGaugeFloat64) Value() float64 {
- return g.value()
-}
-
-// Snapshot returns the snapshot.
-func (g FunctionalGaugeFloat64) Snapshot() GaugeFloat64 { return GaugeFloat64Snapshot(g.Value()) }
-
-// Update panics.
-func (FunctionalGaugeFloat64) Update(float64) {
- panic("Update called on a FunctionalGaugeFloat64")
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/graphite.go b/vendor/github.com/rcrowley/go-metrics/graphite.go
deleted file mode 100644
index abd0a7d2918..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/graphite.go
+++ /dev/null
@@ -1,113 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "strconv"
- "strings"
- "time"
-)
-
-// GraphiteConfig provides a container with configuration parameters for
-// the Graphite exporter
-type GraphiteConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
- Percentiles []float64 // Percentiles to export from timers and histograms
-}
-
-// Graphite is a blocking exporter function which reports metrics in r
-// to a graphite server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func Graphite(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- GraphiteWithConfig(GraphiteConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- Percentiles: []float64{0.5, 0.75, 0.95, 0.99, 0.999},
- })
-}
-
-// GraphiteWithConfig is a blocking exporter function just like Graphite,
-// but it takes a GraphiteConfig instead.
-func GraphiteWithConfig(c GraphiteConfig) {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- for _ = range time.Tick(c.FlushInterval) {
- if err := graphite(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-// GraphiteOnce performs a single submission to Graphite, returning a
-// non-nil error on failed connections. This can be used in a loop
-// similar to GraphiteWithConfig for custom error handling.
-func GraphiteOnce(c GraphiteConfig) error {
- log.Printf("WARNING: This go-metrics client has been DEPRECATED! It has been moved to https://github.com/cyberdelia/go-metrics-graphite and will be removed from rcrowley/go-metrics on August 12th 2015")
- return graphite(&c)
-}
-
-func graphite(c *GraphiteConfig) error {
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, metric.Count(), now)
- case Gauge:
- fmt.Fprintf(w, "%s.%s.value %d %d\n", c.Prefix, name, metric.Value(), now)
- case GaugeFloat64:
- fmt.Fprintf(w, "%s.%s.value %f %d\n", c.Prefix, name, metric.Value(), now)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, h.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, h.Min(), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, h.Max(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, h.Mean(), now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, h.StdDev(), now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, m.Count(), now)
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, m.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, m.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, m.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, m.RateMean(), now)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles(c.Percentiles)
- fmt.Fprintf(w, "%s.%s.count %d %d\n", c.Prefix, name, t.Count(), now)
- fmt.Fprintf(w, "%s.%s.min %d %d\n", c.Prefix, name, t.Min()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.max %d %d\n", c.Prefix, name, t.Max()/int64(du), now)
- fmt.Fprintf(w, "%s.%s.mean %.2f %d\n", c.Prefix, name, t.Mean()/du, now)
- fmt.Fprintf(w, "%s.%s.std-dev %.2f %d\n", c.Prefix, name, t.StdDev()/du, now)
- for psIdx, psKey := range c.Percentiles {
- key := strings.Replace(strconv.FormatFloat(psKey*100.0, 'f', -1, 64), ".", "", 1)
- fmt.Fprintf(w, "%s.%s.%s-percentile %.2f %d\n", c.Prefix, name, key, ps[psIdx], now)
- }
- fmt.Fprintf(w, "%s.%s.one-minute %.2f %d\n", c.Prefix, name, t.Rate1(), now)
- fmt.Fprintf(w, "%s.%s.five-minute %.2f %d\n", c.Prefix, name, t.Rate5(), now)
- fmt.Fprintf(w, "%s.%s.fifteen-minute %.2f %d\n", c.Prefix, name, t.Rate15(), now)
- fmt.Fprintf(w, "%s.%s.mean-rate %.2f %d\n", c.Prefix, name, t.RateMean(), now)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/healthcheck.go b/vendor/github.com/rcrowley/go-metrics/healthcheck.go
deleted file mode 100644
index 445131caee5..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/healthcheck.go
+++ /dev/null
@@ -1,61 +0,0 @@
-package metrics
-
-// Healthchecks hold an error value describing an arbitrary up/down status.
-type Healthcheck interface {
- Check()
- Error() error
- Healthy()
- Unhealthy(error)
-}
-
-// NewHealthcheck constructs a new Healthcheck which will use the given
-// function to update its status.
-func NewHealthcheck(f func(Healthcheck)) Healthcheck {
- if UseNilMetrics {
- return NilHealthcheck{}
- }
- return &StandardHealthcheck{nil, f}
-}
-
-// NilHealthcheck is a no-op.
-type NilHealthcheck struct{}
-
-// Check is a no-op.
-func (NilHealthcheck) Check() {}
-
-// Error is a no-op.
-func (NilHealthcheck) Error() error { return nil }
-
-// Healthy is a no-op.
-func (NilHealthcheck) Healthy() {}
-
-// Unhealthy is a no-op.
-func (NilHealthcheck) Unhealthy(error) {}
-
-// StandardHealthcheck is the standard implementation of a Healthcheck and
-// stores the status and a function to call to update the status.
-type StandardHealthcheck struct {
- err error
- f func(Healthcheck)
-}
-
-// Check runs the healthcheck function to update the healthcheck's status.
-func (h *StandardHealthcheck) Check() {
- h.f(h)
-}
-
-// Error returns the healthcheck's status, which will be nil if it is healthy.
-func (h *StandardHealthcheck) Error() error {
- return h.err
-}
-
-// Healthy marks the healthcheck as healthy.
-func (h *StandardHealthcheck) Healthy() {
- h.err = nil
-}
-
-// Unhealthy marks the healthcheck as unhealthy. The error is stored and
-// may be retrieved by the Error method.
-func (h *StandardHealthcheck) Unhealthy(err error) {
- h.err = err
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/histogram.go b/vendor/github.com/rcrowley/go-metrics/histogram.go
deleted file mode 100644
index dbc837fe4d9..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/histogram.go
+++ /dev/null
@@ -1,202 +0,0 @@
-package metrics
-
-// Histograms calculate distribution statistics from a series of int64 values.
-type Histogram interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Sample() Sample
- Snapshot() Histogram
- StdDev() float64
- Sum() int64
- Update(int64)
- Variance() float64
-}
-
-// GetOrRegisterHistogram returns an existing Histogram or constructs and
-// registers a new StandardHistogram.
-func GetOrRegisterHistogram(name string, r Registry, s Sample) Histogram {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, func() Histogram { return NewHistogram(s) }).(Histogram)
-}
-
-// NewHistogram constructs a new StandardHistogram from a Sample.
-func NewHistogram(s Sample) Histogram {
- if UseNilMetrics {
- return NilHistogram{}
- }
- return &StandardHistogram{sample: s}
-}
-
-// NewRegisteredHistogram constructs and registers a new StandardHistogram from
-// a Sample.
-func NewRegisteredHistogram(name string, r Registry, s Sample) Histogram {
- c := NewHistogram(s)
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// HistogramSnapshot is a read-only copy of another Histogram.
-type HistogramSnapshot struct {
- sample *SampleSnapshot
-}
-
-// Clear panics.
-func (*HistogramSnapshot) Clear() {
- panic("Clear called on a HistogramSnapshot")
-}
-
-// Count returns the number of samples recorded at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample at the time the snapshot
-// was taken.
-func (h *HistogramSnapshot) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample at the time the snapshot was
-// taken.
-func (h *HistogramSnapshot) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the sample
-// at the time the snapshot was taken.
-func (h *HistogramSnapshot) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *HistogramSnapshot) Sample() Sample { return h.sample }
-
-// Snapshot returns the snapshot.
-func (h *HistogramSnapshot) Snapshot() Histogram { return h }
-
-// StdDev returns the standard deviation of the values in the sample at the
-// time the snapshot was taken.
-func (h *HistogramSnapshot) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample at the time the snapshot was taken.
-func (h *HistogramSnapshot) Sum() int64 { return h.sample.Sum() }
-
-// Update panics.
-func (*HistogramSnapshot) Update(int64) {
- panic("Update called on a HistogramSnapshot")
-}
-
-// Variance returns the variance of inputs at the time the snapshot was taken.
-func (h *HistogramSnapshot) Variance() float64 { return h.sample.Variance() }
-
-// NilHistogram is a no-op Histogram.
-type NilHistogram struct{}
-
-// Clear is a no-op.
-func (NilHistogram) Clear() {}
-
-// Count is a no-op.
-func (NilHistogram) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilHistogram) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilHistogram) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilHistogram) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilHistogram) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilHistogram) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Sample is a no-op.
-func (NilHistogram) Sample() Sample { return NilSample{} }
-
-// Snapshot is a no-op.
-func (NilHistogram) Snapshot() Histogram { return NilHistogram{} }
-
-// StdDev is a no-op.
-func (NilHistogram) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilHistogram) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilHistogram) Update(v int64) {}
-
-// Variance is a no-op.
-func (NilHistogram) Variance() float64 { return 0.0 }
-
-// StandardHistogram is the standard implementation of a Histogram and uses a
-// Sample to bound its memory use.
-type StandardHistogram struct {
- sample Sample
-}
-
-// Clear clears the histogram and its sample.
-func (h *StandardHistogram) Clear() { h.sample.Clear() }
-
-// Count returns the number of samples recorded since the histogram was last
-// cleared.
-func (h *StandardHistogram) Count() int64 { return h.sample.Count() }
-
-// Max returns the maximum value in the sample.
-func (h *StandardHistogram) Max() int64 { return h.sample.Max() }
-
-// Mean returns the mean of the values in the sample.
-func (h *StandardHistogram) Mean() float64 { return h.sample.Mean() }
-
-// Min returns the minimum value in the sample.
-func (h *StandardHistogram) Min() int64 { return h.sample.Min() }
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (h *StandardHistogram) Percentile(p float64) float64 {
- return h.sample.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (h *StandardHistogram) Percentiles(ps []float64) []float64 {
- return h.sample.Percentiles(ps)
-}
-
-// Sample returns the Sample underlying the histogram.
-func (h *StandardHistogram) Sample() Sample { return h.sample }
-
-// Snapshot returns a read-only copy of the histogram.
-func (h *StandardHistogram) Snapshot() Histogram {
- return &HistogramSnapshot{sample: h.sample.Snapshot().(*SampleSnapshot)}
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (h *StandardHistogram) StdDev() float64 { return h.sample.StdDev() }
-
-// Sum returns the sum in the sample.
-func (h *StandardHistogram) Sum() int64 { return h.sample.Sum() }
-
-// Update samples a new value.
-func (h *StandardHistogram) Update(v int64) { h.sample.Update(v) }
-
-// Variance returns the variance of the values in the sample.
-func (h *StandardHistogram) Variance() float64 { return h.sample.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/json.go b/vendor/github.com/rcrowley/go-metrics/json.go
deleted file mode 100644
index 2fdcbcfbf1d..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/json.go
+++ /dev/null
@@ -1,87 +0,0 @@
-package metrics
-
-import (
- "encoding/json"
- "io"
- "time"
-)
-
-// MarshalJSON returns a byte slice containing a JSON representation of all
-// the metrics in the Registry.
-func (r *StandardRegistry) MarshalJSON() ([]byte, error) {
- data := make(map[string]map[string]interface{})
- r.Each(func(name string, i interface{}) {
- values := make(map[string]interface{})
- switch metric := i.(type) {
- case Counter:
- values["count"] = metric.Count()
- case Gauge:
- values["value"] = metric.Value()
- case GaugeFloat64:
- values["value"] = metric.Value()
- case Healthcheck:
- values["error"] = nil
- metric.Check()
- if err := metric.Error(); nil != err {
- values["error"] = metric.Error().Error()
- }
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = h.Count()
- values["min"] = h.Min()
- values["max"] = h.Max()
- values["mean"] = h.Mean()
- values["stddev"] = h.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- case Meter:
- m := metric.Snapshot()
- values["count"] = m.Count()
- values["1m.rate"] = m.Rate1()
- values["5m.rate"] = m.Rate5()
- values["15m.rate"] = m.Rate15()
- values["mean.rate"] = m.RateMean()
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- values["count"] = t.Count()
- values["min"] = t.Min()
- values["max"] = t.Max()
- values["mean"] = t.Mean()
- values["stddev"] = t.StdDev()
- values["median"] = ps[0]
- values["75%"] = ps[1]
- values["95%"] = ps[2]
- values["99%"] = ps[3]
- values["99.9%"] = ps[4]
- values["1m.rate"] = t.Rate1()
- values["5m.rate"] = t.Rate5()
- values["15m.rate"] = t.Rate15()
- values["mean.rate"] = t.RateMean()
- }
- data[name] = values
- })
- return json.Marshal(data)
-}
-
-// WriteJSON writes metrics from the given registry periodically to the
-// specified io.Writer as JSON.
-func WriteJSON(r Registry, d time.Duration, w io.Writer) {
- for _ = range time.Tick(d) {
- WriteJSONOnce(r, w)
- }
-}
-
-// WriteJSONOnce writes metrics from the given registry to the specified
-// io.Writer as JSON.
-func WriteJSONOnce(r Registry, w io.Writer) {
- json.NewEncoder(w).Encode(r)
-}
-
-func (p *PrefixedRegistry) MarshalJSON() ([]byte, error) {
- return json.Marshal(p.underlying)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/log.go b/vendor/github.com/rcrowley/go-metrics/log.go
deleted file mode 100644
index f8074c04576..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/log.go
+++ /dev/null
@@ -1,80 +0,0 @@
-package metrics
-
-import (
- "time"
-)
-
-type Logger interface {
- Printf(format string, v ...interface{})
-}
-
-func Log(r Registry, freq time.Duration, l Logger) {
- LogScaled(r, freq, time.Nanosecond, l)
-}
-
-// Output each metric in the given registry periodically using the given
-// logger. Print timings in `scale` units (eg time.Millisecond) rather than nanos.
-func LogScaled(r Registry, freq time.Duration, scale time.Duration, l Logger) {
- du := float64(scale)
- duSuffix := scale.String()[1:]
-
- for _ = range time.Tick(freq) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- l.Printf("counter %s\n", name)
- l.Printf(" count: %9d\n", metric.Count())
- case Gauge:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %9d\n", metric.Value())
- case GaugeFloat64:
- l.Printf("gauge %s\n", name)
- l.Printf(" value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- l.Printf("healthcheck %s\n", name)
- l.Printf(" error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("histogram %s\n", name)
- l.Printf(" count: %9d\n", h.Count())
- l.Printf(" min: %9d\n", h.Min())
- l.Printf(" max: %9d\n", h.Max())
- l.Printf(" mean: %12.2f\n", h.Mean())
- l.Printf(" stddev: %12.2f\n", h.StdDev())
- l.Printf(" median: %12.2f\n", ps[0])
- l.Printf(" 75%%: %12.2f\n", ps[1])
- l.Printf(" 95%%: %12.2f\n", ps[2])
- l.Printf(" 99%%: %12.2f\n", ps[3])
- l.Printf(" 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- l.Printf("meter %s\n", name)
- l.Printf(" count: %9d\n", m.Count())
- l.Printf(" 1-min rate: %12.2f\n", m.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", m.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", m.Rate15())
- l.Printf(" mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- l.Printf("timer %s\n", name)
- l.Printf(" count: %9d\n", t.Count())
- l.Printf(" min: %12.2f%s\n", float64(t.Min())/du, duSuffix)
- l.Printf(" max: %12.2f%s\n", float64(t.Max())/du, duSuffix)
- l.Printf(" mean: %12.2f%s\n", t.Mean()/du, duSuffix)
- l.Printf(" stddev: %12.2f%s\n", t.StdDev()/du, duSuffix)
- l.Printf(" median: %12.2f%s\n", ps[0]/du, duSuffix)
- l.Printf(" 75%%: %12.2f%s\n", ps[1]/du, duSuffix)
- l.Printf(" 95%%: %12.2f%s\n", ps[2]/du, duSuffix)
- l.Printf(" 99%%: %12.2f%s\n", ps[3]/du, duSuffix)
- l.Printf(" 99.9%%: %12.2f%s\n", ps[4]/du, duSuffix)
- l.Printf(" 1-min rate: %12.2f\n", t.Rate1())
- l.Printf(" 5-min rate: %12.2f\n", t.Rate5())
- l.Printf(" 15-min rate: %12.2f\n", t.Rate15())
- l.Printf(" mean rate: %12.2f\n", t.RateMean())
- }
- })
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/memory.md b/vendor/github.com/rcrowley/go-metrics/memory.md
deleted file mode 100644
index 47454f54b64..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/memory.md
+++ /dev/null
@@ -1,285 +0,0 @@
-Memory usage
-============
-
-(Highly unscientific.)
-
-Command used to gather static memory usage:
-
-```sh
-grep ^Vm "/proc/$(ps fax | grep [m]etrics-bench | awk '{print $1}')/status"
-```
-
-Program used to gather baseline memory usage:
-
-```go
-package main
-
-import "time"
-
-func main() {
- time.Sleep(600e9)
-}
-```
-
-Baseline
---------
-
-```
-VmPeak: 42604 kB
-VmSize: 42604 kB
-VmLck: 0 kB
-VmHWM: 1120 kB
-VmRSS: 1120 kB
-VmData: 35460 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 36 kB
-VmSwap: 0 kB
-```
-
-Program used to gather metric memory usage (with other metrics being similar):
-
-```go
-package main
-
-import (
- "fmt"
- "metrics"
- "time"
-)
-
-func main() {
- fmt.Sprintf("foo")
- metrics.NewRegistry()
- time.Sleep(600e9)
-}
-```
-
-1000 counters registered
-------------------------
-
-```
-VmPeak: 44016 kB
-VmSize: 44016 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.412 kB virtual, TODO 0.808 kB resident per counter.**
-
-100000 counters registered
---------------------------
-
-```
-VmPeak: 55024 kB
-VmSize: 55024 kB
-VmLck: 0 kB
-VmHWM: 12440 kB
-VmRSS: 12440 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1024 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**0.1242 kB virtual, 0.1132 kB resident per counter.**
-
-1000 gauges registered
-----------------------
-
-```
-VmPeak: 44012 kB
-VmSize: 44012 kB
-VmLck: 0 kB
-VmHWM: 1928 kB
-VmRSS: 1928 kB
-VmData: 36868 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 40 kB
-VmSwap: 0 kB
-```
-
-**1.408 kB virtual, 0.808 kB resident per counter.**
-
-100000 gauges registered
-------------------------
-
-```
-VmPeak: 55020 kB
-VmSize: 55020 kB
-VmLck: 0 kB
-VmHWM: 12432 kB
-VmRSS: 12432 kB
-VmData: 47876 kB
-VmStk: 136 kB
-VmExe: 1020 kB
-VmLib: 1848 kB
-VmPTE: 60 kB
-VmSwap: 0 kB
-```
-
-**0.12416 kB virtual, 0.11312 resident per gauge.**
-
-1000 histograms with a uniform sample size of 1028
---------------------------------------------------
-
-```
-VmPeak: 72272 kB
-VmSize: 72272 kB
-VmLck: 0 kB
-VmHWM: 16204 kB
-VmRSS: 16204 kB
-VmData: 65100 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 80 kB
-VmSwap: 0 kB
-```
-
-**29.668 kB virtual, TODO 15.084 resident per histogram.**
-
-10000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 256912 kB
-VmSize: 256912 kB
-VmLck: 0 kB
-VmHWM: 146204 kB
-VmRSS: 146204 kB
-VmData: 249740 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 448 kB
-VmSwap: 0 kB
-```
-
-**21.4308 kB virtual, 14.5084 kB resident per histogram.**
-
-50000 histograms with a uniform sample size of 1028
----------------------------------------------------
-
-```
-VmPeak: 908112 kB
-VmSize: 908112 kB
-VmLck: 0 kB
-VmHWM: 645832 kB
-VmRSS: 645588 kB
-VmData: 900940 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1716 kB
-VmSwap: 1544 kB
-```
-
-**17.31016 kB virtual, 12.88936 kB resident per histogram.**
-
-1000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
--------------------------------------------------------------------------------------
-
-```
-VmPeak: 62480 kB
-VmSize: 62480 kB
-VmLck: 0 kB
-VmHWM: 11572 kB
-VmRSS: 11572 kB
-VmData: 55308 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 64 kB
-VmSwap: 0 kB
-```
-
-**19.876 kB virtual, 10.452 kB resident per histogram.**
-
-10000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 153296 kB
-VmSize: 153296 kB
-VmLck: 0 kB
-VmHWM: 101176 kB
-VmRSS: 101176 kB
-VmData: 146124 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 240 kB
-VmSwap: 0 kB
-```
-
-**11.0692 kB virtual, 10.0056 kB resident per histogram.**
-
-50000 histograms with an exponentially-decaying sample size of 1028 and alpha of 0.015
---------------------------------------------------------------------------------------
-
-```
-VmPeak: 557264 kB
-VmSize: 557264 kB
-VmLck: 0 kB
-VmHWM: 501056 kB
-VmRSS: 501056 kB
-VmData: 550092 kB
-VmStk: 136 kB
-VmExe: 1048 kB
-VmLib: 1848 kB
-VmPTE: 1032 kB
-VmSwap: 0 kB
-```
-
-**10.2932 kB virtual, 9.99872 kB resident per histogram.**
-
-1000 meters
------------
-
-```
-VmPeak: 74504 kB
-VmSize: 74504 kB
-VmLck: 0 kB
-VmHWM: 24124 kB
-VmRSS: 24124 kB
-VmData: 67340 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 92 kB
-VmSwap: 0 kB
-```
-
-**31.9 kB virtual, 23.004 kB resident per meter.**
-
-10000 meters
-------------
-
-```
-VmPeak: 278920 kB
-VmSize: 278920 kB
-VmLck: 0 kB
-VmHWM: 227300 kB
-VmRSS: 227300 kB
-VmData: 271756 kB
-VmStk: 136 kB
-VmExe: 1040 kB
-VmLib: 1848 kB
-VmPTE: 488 kB
-VmSwap: 0 kB
-```
-
-**23.6316 kB virtual, 22.618 kB resident per meter.**
diff --git a/vendor/github.com/rcrowley/go-metrics/meter.go b/vendor/github.com/rcrowley/go-metrics/meter.go
deleted file mode 100644
index 0389ab0b8f6..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/meter.go
+++ /dev/null
@@ -1,233 +0,0 @@
-package metrics
-
-import (
- "sync"
- "time"
-)
-
-// Meters count events to produce exponentially-weighted moving average rates
-// at one-, five-, and fifteen-minutes and a mean rate.
-type Meter interface {
- Count() int64
- Mark(int64)
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Meter
-}
-
-// GetOrRegisterMeter returns an existing Meter or constructs and registers a
-// new StandardMeter.
-func GetOrRegisterMeter(name string, r Registry) Meter {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewMeter).(Meter)
-}
-
-// NewMeter constructs a new StandardMeter and launches a goroutine.
-func NewMeter() Meter {
- if UseNilMetrics {
- return NilMeter{}
- }
- m := newStandardMeter()
- arbiter.Lock()
- defer arbiter.Unlock()
- arbiter.meters = append(arbiter.meters, m)
- if !arbiter.started {
- arbiter.started = true
- go arbiter.tick()
- }
- return m
-}
-
-// NewMeter constructs and registers a new StandardMeter and launches a
-// goroutine.
-func NewRegisteredMeter(name string, r Registry) Meter {
- c := NewMeter()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// MeterSnapshot is a read-only copy of another Meter.
-type MeterSnapshot struct {
- count int64
- rate1, rate5, rate15, rateMean float64
-}
-
-// Count returns the count of events at the time the snapshot was taken.
-func (m *MeterSnapshot) Count() int64 { return m.count }
-
-// Mark panics.
-func (*MeterSnapshot) Mark(n int64) {
- panic("Mark called on a MeterSnapshot")
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (m *MeterSnapshot) Rate1() float64 { return m.rate1 }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (m *MeterSnapshot) Rate5() float64 { return m.rate5 }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (m *MeterSnapshot) Rate15() float64 { return m.rate15 }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (m *MeterSnapshot) RateMean() float64 { return m.rateMean }
-
-// Snapshot returns the snapshot.
-func (m *MeterSnapshot) Snapshot() Meter { return m }
-
-// NilMeter is a no-op Meter.
-type NilMeter struct{}
-
-// Count is a no-op.
-func (NilMeter) Count() int64 { return 0 }
-
-// Mark is a no-op.
-func (NilMeter) Mark(n int64) {}
-
-// Rate1 is a no-op.
-func (NilMeter) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilMeter) Rate5() float64 { return 0.0 }
-
-// Rate15is a no-op.
-func (NilMeter) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilMeter) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilMeter) Snapshot() Meter { return NilMeter{} }
-
-// StandardMeter is the standard implementation of a Meter.
-type StandardMeter struct {
- lock sync.RWMutex
- snapshot *MeterSnapshot
- a1, a5, a15 EWMA
- startTime time.Time
-}
-
-func newStandardMeter() *StandardMeter {
- return &StandardMeter{
- snapshot: &MeterSnapshot{},
- a1: NewEWMA1(),
- a5: NewEWMA5(),
- a15: NewEWMA15(),
- startTime: time.Now(),
- }
-}
-
-// Count returns the number of events recorded.
-func (m *StandardMeter) Count() int64 {
- m.lock.RLock()
- count := m.snapshot.count
- m.lock.RUnlock()
- return count
-}
-
-// Mark records the occurance of n events.
-func (m *StandardMeter) Mark(n int64) {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.snapshot.count += n
- m.a1.Update(n)
- m.a5.Update(n)
- m.a15.Update(n)
- m.updateSnapshot()
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (m *StandardMeter) Rate1() float64 {
- m.lock.RLock()
- rate1 := m.snapshot.rate1
- m.lock.RUnlock()
- return rate1
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (m *StandardMeter) Rate5() float64 {
- m.lock.RLock()
- rate5 := m.snapshot.rate5
- m.lock.RUnlock()
- return rate5
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (m *StandardMeter) Rate15() float64 {
- m.lock.RLock()
- rate15 := m.snapshot.rate15
- m.lock.RUnlock()
- return rate15
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (m *StandardMeter) RateMean() float64 {
- m.lock.RLock()
- rateMean := m.snapshot.rateMean
- m.lock.RUnlock()
- return rateMean
-}
-
-// Snapshot returns a read-only copy of the meter.
-func (m *StandardMeter) Snapshot() Meter {
- m.lock.RLock()
- snapshot := *m.snapshot
- m.lock.RUnlock()
- return &snapshot
-}
-
-func (m *StandardMeter) updateSnapshot() {
- // should run with write lock held on m.lock
- snapshot := m.snapshot
- snapshot.rate1 = m.a1.Rate()
- snapshot.rate5 = m.a5.Rate()
- snapshot.rate15 = m.a15.Rate()
- snapshot.rateMean = float64(snapshot.count) / time.Since(m.startTime).Seconds()
-}
-
-func (m *StandardMeter) tick() {
- m.lock.Lock()
- defer m.lock.Unlock()
- m.a1.Tick()
- m.a5.Tick()
- m.a15.Tick()
- m.updateSnapshot()
-}
-
-type meterArbiter struct {
- sync.RWMutex
- started bool
- meters []*StandardMeter
- ticker *time.Ticker
-}
-
-var arbiter = meterArbiter{ticker: time.NewTicker(5e9)}
-
-// Ticks meters on the scheduled interval
-func (ma *meterArbiter) tick() {
- for {
- select {
- case <-ma.ticker.C:
- ma.tickMeters()
- }
- }
-}
-
-func (ma *meterArbiter) tickMeters() {
- ma.RLock()
- defer ma.RUnlock()
- for _, meter := range ma.meters {
- meter.tick()
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/metrics.go b/vendor/github.com/rcrowley/go-metrics/metrics.go
deleted file mode 100644
index b97a49ed123..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/metrics.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Go port of Coda Hale's Metrics library
-//
-//
-//
-// Coda Hale's original work:
-package metrics
-
-// UseNilMetrics is checked by the constructor functions for all of the
-// standard metrics. If it is true, the metric returned is a stub.
-//
-// This global kill-switch helps quantify the observer effect and makes
-// for less cluttered pprof profiles.
-var UseNilMetrics bool = false
diff --git a/vendor/github.com/rcrowley/go-metrics/opentsdb.go b/vendor/github.com/rcrowley/go-metrics/opentsdb.go
deleted file mode 100644
index 266b6c93d21..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/opentsdb.go
+++ /dev/null
@@ -1,119 +0,0 @@
-package metrics
-
-import (
- "bufio"
- "fmt"
- "log"
- "net"
- "os"
- "strings"
- "time"
-)
-
-var shortHostName string = ""
-
-// OpenTSDBConfig provides a container with configuration parameters for
-// the OpenTSDB exporter
-type OpenTSDBConfig struct {
- Addr *net.TCPAddr // Network address to connect to
- Registry Registry // Registry to be exported
- FlushInterval time.Duration // Flush interval
- DurationUnit time.Duration // Time conversion unit for durations
- Prefix string // Prefix to be prepended to metric names
-}
-
-// OpenTSDB is a blocking exporter function which reports metrics in r
-// to a TSDB server located at addr, flushing them every d duration
-// and prepending metric names with prefix.
-func OpenTSDB(r Registry, d time.Duration, prefix string, addr *net.TCPAddr) {
- OpenTSDBWithConfig(OpenTSDBConfig{
- Addr: addr,
- Registry: r,
- FlushInterval: d,
- DurationUnit: time.Nanosecond,
- Prefix: prefix,
- })
-}
-
-// OpenTSDBWithConfig is a blocking exporter function just like OpenTSDB,
-// but it takes a OpenTSDBConfig instead.
-func OpenTSDBWithConfig(c OpenTSDBConfig) {
- for _ = range time.Tick(c.FlushInterval) {
- if err := openTSDB(&c); nil != err {
- log.Println(err)
- }
- }
-}
-
-func getShortHostname() string {
- if shortHostName == "" {
- host, _ := os.Hostname()
- if index := strings.Index(host, "."); index > 0 {
- shortHostName = host[:index]
- } else {
- shortHostName = host
- }
- }
- return shortHostName
-}
-
-func openTSDB(c *OpenTSDBConfig) error {
- shortHostname := getShortHostname()
- now := time.Now().Unix()
- du := float64(c.DurationUnit)
- conn, err := net.DialTCP("tcp", nil, c.Addr)
- if nil != err {
- return err
- }
- defer conn.Close()
- w := bufio.NewWriter(conn)
- c.Registry.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, metric.Count(), shortHostname)
- case Gauge:
- fmt.Fprintf(w, "put %s.%s.value %d %d host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case GaugeFloat64:
- fmt.Fprintf(w, "put %s.%s.value %d %f host=%s\n", c.Prefix, name, now, metric.Value(), shortHostname)
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, h.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, h.Min(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, h.Max(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, h.Mean(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, h.StdDev(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0], shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1], shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2], shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3], shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4], shortHostname)
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, m.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, m.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, m.RateMean(), shortHostname)
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "put %s.%s.count %d %d host=%s\n", c.Prefix, name, now, t.Count(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.min %d %d host=%s\n", c.Prefix, name, now, t.Min()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.max %d %d host=%s\n", c.Prefix, name, now, t.Max()/int64(du), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean %d %.2f host=%s\n", c.Prefix, name, now, t.Mean()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.std-dev %d %.2f host=%s\n", c.Prefix, name, now, t.StdDev()/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.50-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[0]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.75-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[1]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.95-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[2]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.99-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[3]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.999-percentile %d %.2f host=%s\n", c.Prefix, name, now, ps[4]/du, shortHostname)
- fmt.Fprintf(w, "put %s.%s.one-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate1(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.five-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate5(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.fifteen-minute %d %.2f host=%s\n", c.Prefix, name, now, t.Rate15(), shortHostname)
- fmt.Fprintf(w, "put %s.%s.mean-rate %d %.2f host=%s\n", c.Prefix, name, now, t.RateMean(), shortHostname)
- }
- w.Flush()
- })
- return nil
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/registry.go b/vendor/github.com/rcrowley/go-metrics/registry.go
deleted file mode 100644
index 2bb7a1e7d0f..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/registry.go
+++ /dev/null
@@ -1,270 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// DuplicateMetric is the error returned by Registry.Register when a metric
-// already exists. If you mean to Register that metric you must first
-// Unregister the existing metric.
-type DuplicateMetric string
-
-func (err DuplicateMetric) Error() string {
- return fmt.Sprintf("duplicate metric: %s", string(err))
-}
-
-// A Registry holds references to a set of metrics by name and can iterate
-// over them, calling callback functions provided by the user.
-//
-// This is an interface so as to encourage other structs to implement
-// the Registry API as appropriate.
-type Registry interface {
-
- // Call the given function for each registered metric.
- Each(func(string, interface{}))
-
- // Get the metric by the given name or nil if none is registered.
- Get(string) interface{}
-
- // Gets an existing metric or registers the given one.
- // The interface can be the metric to register if not found in registry,
- // or a function returning the metric for lazy instantiation.
- GetOrRegister(string, interface{}) interface{}
-
- // Register the given metric under the given name.
- Register(string, interface{}) error
-
- // Run all registered healthchecks.
- RunHealthchecks()
-
- // Unregister the metric with the given name.
- Unregister(string)
-
- // Unregister all metrics. (Mostly for testing.)
- UnregisterAll()
-}
-
-// The standard implementation of a Registry is a mutex-protected map
-// of names to metrics.
-type StandardRegistry struct {
- metrics map[string]interface{}
- mutex sync.Mutex
-}
-
-// Create a new registry.
-func NewRegistry() Registry {
- return &StandardRegistry{metrics: make(map[string]interface{})}
-}
-
-// Call the given function for each registered metric.
-func (r *StandardRegistry) Each(f func(string, interface{})) {
- for name, i := range r.registered() {
- f(name, i)
- }
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *StandardRegistry) Get(name string) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.metrics[name]
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *StandardRegistry) GetOrRegister(name string, i interface{}) interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- if metric, ok := r.metrics[name]; ok {
- return metric
- }
- if v := reflect.ValueOf(i); v.Kind() == reflect.Func {
- i = v.Call(nil)[0].Interface()
- }
- r.register(name, i)
- return i
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func (r *StandardRegistry) Register(name string, i interface{}) error {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- return r.register(name, i)
-}
-
-// Run all registered healthchecks.
-func (r *StandardRegistry) RunHealthchecks() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for _, i := range r.metrics {
- if h, ok := i.(Healthcheck); ok {
- h.Check()
- }
- }
-}
-
-// Unregister the metric with the given name.
-func (r *StandardRegistry) Unregister(name string) {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- delete(r.metrics, name)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *StandardRegistry) UnregisterAll() {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- for name, _ := range r.metrics {
- delete(r.metrics, name)
- }
-}
-
-func (r *StandardRegistry) register(name string, i interface{}) error {
- if _, ok := r.metrics[name]; ok {
- return DuplicateMetric(name)
- }
- switch i.(type) {
- case Counter, Gauge, GaugeFloat64, Healthcheck, Histogram, Meter, Timer:
- r.metrics[name] = i
- }
- return nil
-}
-
-func (r *StandardRegistry) registered() map[string]interface{} {
- r.mutex.Lock()
- defer r.mutex.Unlock()
- metrics := make(map[string]interface{}, len(r.metrics))
- for name, i := range r.metrics {
- metrics[name] = i
- }
- return metrics
-}
-
-type PrefixedRegistry struct {
- underlying Registry
- prefix string
-}
-
-func NewPrefixedRegistry(prefix string) Registry {
- return &PrefixedRegistry{
- underlying: NewRegistry(),
- prefix: prefix,
- }
-}
-
-func NewPrefixedChildRegistry(parent Registry, prefix string) Registry {
- return &PrefixedRegistry{
- underlying: parent,
- prefix: prefix,
- }
-}
-
-// Call the given function for each registered metric.
-func (r *PrefixedRegistry) Each(fn func(string, interface{})) {
- wrappedFn := func(prefix string) func(string, interface{}) {
- return func(name string, iface interface{}) {
- if strings.HasPrefix(name, prefix) {
- fn(name, iface)
- } else {
- return
- }
- }
- }
-
- baseRegistry, prefix := findPrefix(r, "")
- baseRegistry.Each(wrappedFn(prefix))
-}
-
-func findPrefix(registry Registry, prefix string) (Registry, string) {
- switch r := registry.(type) {
- case *PrefixedRegistry:
- return findPrefix(r.underlying, r.prefix+prefix)
- case *StandardRegistry:
- return r, prefix
- }
- return nil, ""
-}
-
-// Get the metric by the given name or nil if none is registered.
-func (r *PrefixedRegistry) Get(name string) interface{} {
- realName := r.prefix + name
- return r.underlying.Get(realName)
-}
-
-// Gets an existing metric or registers the given one.
-// The interface can be the metric to register if not found in registry,
-// or a function returning the metric for lazy instantiation.
-func (r *PrefixedRegistry) GetOrRegister(name string, metric interface{}) interface{} {
- realName := r.prefix + name
- return r.underlying.GetOrRegister(realName, metric)
-}
-
-// Register the given metric under the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Register(name string, metric interface{}) error {
- realName := r.prefix + name
- return r.underlying.Register(realName, metric)
-}
-
-// Run all registered healthchecks.
-func (r *PrefixedRegistry) RunHealthchecks() {
- r.underlying.RunHealthchecks()
-}
-
-// Unregister the metric with the given name. The name will be prefixed.
-func (r *PrefixedRegistry) Unregister(name string) {
- realName := r.prefix + name
- r.underlying.Unregister(realName)
-}
-
-// Unregister all metrics. (Mostly for testing.)
-func (r *PrefixedRegistry) UnregisterAll() {
- r.underlying.UnregisterAll()
-}
-
-var DefaultRegistry Registry = NewRegistry()
-
-// Call the given function for each registered metric.
-func Each(f func(string, interface{})) {
- DefaultRegistry.Each(f)
-}
-
-// Get the metric by the given name or nil if none is registered.
-func Get(name string) interface{} {
- return DefaultRegistry.Get(name)
-}
-
-// Gets an existing metric or creates and registers a new one. Threadsafe
-// alternative to calling Get and Register on failure.
-func GetOrRegister(name string, i interface{}) interface{} {
- return DefaultRegistry.GetOrRegister(name, i)
-}
-
-// Register the given metric under the given name. Returns a DuplicateMetric
-// if a metric by the given name is already registered.
-func Register(name string, i interface{}) error {
- return DefaultRegistry.Register(name, i)
-}
-
-// Register the given metric under the given name. Panics if a metric by the
-// given name is already registered.
-func MustRegister(name string, i interface{}) {
- if err := Register(name, i); err != nil {
- panic(err)
- }
-}
-
-// Run all registered healthchecks.
-func RunHealthchecks() {
- DefaultRegistry.RunHealthchecks()
-}
-
-// Unregister the metric with the given name.
-func Unregister(name string) {
- DefaultRegistry.Unregister(name)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime.go b/vendor/github.com/rcrowley/go-metrics/runtime.go
deleted file mode 100644
index 11c6b785a0f..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime.go
+++ /dev/null
@@ -1,212 +0,0 @@
-package metrics
-
-import (
- "runtime"
- "runtime/pprof"
- "time"
-)
-
-var (
- memStats runtime.MemStats
- runtimeMetrics struct {
- MemStats struct {
- Alloc Gauge
- BuckHashSys Gauge
- DebugGC Gauge
- EnableGC Gauge
- Frees Gauge
- HeapAlloc Gauge
- HeapIdle Gauge
- HeapInuse Gauge
- HeapObjects Gauge
- HeapReleased Gauge
- HeapSys Gauge
- LastGC Gauge
- Lookups Gauge
- Mallocs Gauge
- MCacheInuse Gauge
- MCacheSys Gauge
- MSpanInuse Gauge
- MSpanSys Gauge
- NextGC Gauge
- NumGC Gauge
- GCCPUFraction GaugeFloat64
- PauseNs Histogram
- PauseTotalNs Gauge
- StackInuse Gauge
- StackSys Gauge
- Sys Gauge
- TotalAlloc Gauge
- }
- NumCgoCall Gauge
- NumGoroutine Gauge
- NumThread Gauge
- ReadMemStats Timer
- }
- frees uint64
- lookups uint64
- mallocs uint64
- numGC uint32
- numCgoCalls int64
-
- threadCreateProfile = pprof.Lookup("threadcreate")
-)
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called as a goroutine.
-func CaptureRuntimeMemStats(r Registry, d time.Duration) {
- for _ = range time.Tick(d) {
- CaptureRuntimeMemStatsOnce(r)
- }
-}
-
-// Capture new values for the Go runtime statistics exported in
-// runtime.MemStats. This is designed to be called in a background
-// goroutine. Giving a registry which has not been given to
-// RegisterRuntimeMemStats will panic.
-//
-// Be very careful with this because runtime.ReadMemStats calls the C
-// functions runtime·semacquire(&runtime·worldsema) and runtime·stoptheworld()
-// and that last one does what it says on the tin.
-func CaptureRuntimeMemStatsOnce(r Registry) {
- t := time.Now()
- runtime.ReadMemStats(&memStats) // This takes 50-200us.
- runtimeMetrics.ReadMemStats.UpdateSince(t)
-
- runtimeMetrics.MemStats.Alloc.Update(int64(memStats.Alloc))
- runtimeMetrics.MemStats.BuckHashSys.Update(int64(memStats.BuckHashSys))
- if memStats.DebugGC {
- runtimeMetrics.MemStats.DebugGC.Update(1)
- } else {
- runtimeMetrics.MemStats.DebugGC.Update(0)
- }
- if memStats.EnableGC {
- runtimeMetrics.MemStats.EnableGC.Update(1)
- } else {
- runtimeMetrics.MemStats.EnableGC.Update(0)
- }
-
- runtimeMetrics.MemStats.Frees.Update(int64(memStats.Frees - frees))
- runtimeMetrics.MemStats.HeapAlloc.Update(int64(memStats.HeapAlloc))
- runtimeMetrics.MemStats.HeapIdle.Update(int64(memStats.HeapIdle))
- runtimeMetrics.MemStats.HeapInuse.Update(int64(memStats.HeapInuse))
- runtimeMetrics.MemStats.HeapObjects.Update(int64(memStats.HeapObjects))
- runtimeMetrics.MemStats.HeapReleased.Update(int64(memStats.HeapReleased))
- runtimeMetrics.MemStats.HeapSys.Update(int64(memStats.HeapSys))
- runtimeMetrics.MemStats.LastGC.Update(int64(memStats.LastGC))
- runtimeMetrics.MemStats.Lookups.Update(int64(memStats.Lookups - lookups))
- runtimeMetrics.MemStats.Mallocs.Update(int64(memStats.Mallocs - mallocs))
- runtimeMetrics.MemStats.MCacheInuse.Update(int64(memStats.MCacheInuse))
- runtimeMetrics.MemStats.MCacheSys.Update(int64(memStats.MCacheSys))
- runtimeMetrics.MemStats.MSpanInuse.Update(int64(memStats.MSpanInuse))
- runtimeMetrics.MemStats.MSpanSys.Update(int64(memStats.MSpanSys))
- runtimeMetrics.MemStats.NextGC.Update(int64(memStats.NextGC))
- runtimeMetrics.MemStats.NumGC.Update(int64(memStats.NumGC - numGC))
- runtimeMetrics.MemStats.GCCPUFraction.Update(gcCPUFraction(&memStats))
-
- //
- i := numGC % uint32(len(memStats.PauseNs))
- ii := memStats.NumGC % uint32(len(memStats.PauseNs))
- if memStats.NumGC-numGC >= uint32(len(memStats.PauseNs)) {
- for i = 0; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- } else {
- if i > ii {
- for ; i < uint32(len(memStats.PauseNs)); i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- i = 0
- }
- for ; i < ii; i++ {
- runtimeMetrics.MemStats.PauseNs.Update(int64(memStats.PauseNs[i]))
- }
- }
- frees = memStats.Frees
- lookups = memStats.Lookups
- mallocs = memStats.Mallocs
- numGC = memStats.NumGC
-
- runtimeMetrics.MemStats.PauseTotalNs.Update(int64(memStats.PauseTotalNs))
- runtimeMetrics.MemStats.StackInuse.Update(int64(memStats.StackInuse))
- runtimeMetrics.MemStats.StackSys.Update(int64(memStats.StackSys))
- runtimeMetrics.MemStats.Sys.Update(int64(memStats.Sys))
- runtimeMetrics.MemStats.TotalAlloc.Update(int64(memStats.TotalAlloc))
-
- currentNumCgoCalls := numCgoCall()
- runtimeMetrics.NumCgoCall.Update(currentNumCgoCalls - numCgoCalls)
- numCgoCalls = currentNumCgoCalls
-
- runtimeMetrics.NumGoroutine.Update(int64(runtime.NumGoroutine()))
-
- runtimeMetrics.NumThread.Update(int64(threadCreateProfile.Count()))
-}
-
-// Register runtimeMetrics for the Go runtime statistics exported in runtime and
-// specifically runtime.MemStats. The runtimeMetrics are named by their
-// fully-qualified Go symbols, i.e. runtime.MemStats.Alloc.
-func RegisterRuntimeMemStats(r Registry) {
- runtimeMetrics.MemStats.Alloc = NewGauge()
- runtimeMetrics.MemStats.BuckHashSys = NewGauge()
- runtimeMetrics.MemStats.DebugGC = NewGauge()
- runtimeMetrics.MemStats.EnableGC = NewGauge()
- runtimeMetrics.MemStats.Frees = NewGauge()
- runtimeMetrics.MemStats.HeapAlloc = NewGauge()
- runtimeMetrics.MemStats.HeapIdle = NewGauge()
- runtimeMetrics.MemStats.HeapInuse = NewGauge()
- runtimeMetrics.MemStats.HeapObjects = NewGauge()
- runtimeMetrics.MemStats.HeapReleased = NewGauge()
- runtimeMetrics.MemStats.HeapSys = NewGauge()
- runtimeMetrics.MemStats.LastGC = NewGauge()
- runtimeMetrics.MemStats.Lookups = NewGauge()
- runtimeMetrics.MemStats.Mallocs = NewGauge()
- runtimeMetrics.MemStats.MCacheInuse = NewGauge()
- runtimeMetrics.MemStats.MCacheSys = NewGauge()
- runtimeMetrics.MemStats.MSpanInuse = NewGauge()
- runtimeMetrics.MemStats.MSpanSys = NewGauge()
- runtimeMetrics.MemStats.NextGC = NewGauge()
- runtimeMetrics.MemStats.NumGC = NewGauge()
- runtimeMetrics.MemStats.GCCPUFraction = NewGaugeFloat64()
- runtimeMetrics.MemStats.PauseNs = NewHistogram(NewExpDecaySample(1028, 0.015))
- runtimeMetrics.MemStats.PauseTotalNs = NewGauge()
- runtimeMetrics.MemStats.StackInuse = NewGauge()
- runtimeMetrics.MemStats.StackSys = NewGauge()
- runtimeMetrics.MemStats.Sys = NewGauge()
- runtimeMetrics.MemStats.TotalAlloc = NewGauge()
- runtimeMetrics.NumCgoCall = NewGauge()
- runtimeMetrics.NumGoroutine = NewGauge()
- runtimeMetrics.NumThread = NewGauge()
- runtimeMetrics.ReadMemStats = NewTimer()
-
- r.Register("runtime.MemStats.Alloc", runtimeMetrics.MemStats.Alloc)
- r.Register("runtime.MemStats.BuckHashSys", runtimeMetrics.MemStats.BuckHashSys)
- r.Register("runtime.MemStats.DebugGC", runtimeMetrics.MemStats.DebugGC)
- r.Register("runtime.MemStats.EnableGC", runtimeMetrics.MemStats.EnableGC)
- r.Register("runtime.MemStats.Frees", runtimeMetrics.MemStats.Frees)
- r.Register("runtime.MemStats.HeapAlloc", runtimeMetrics.MemStats.HeapAlloc)
- r.Register("runtime.MemStats.HeapIdle", runtimeMetrics.MemStats.HeapIdle)
- r.Register("runtime.MemStats.HeapInuse", runtimeMetrics.MemStats.HeapInuse)
- r.Register("runtime.MemStats.HeapObjects", runtimeMetrics.MemStats.HeapObjects)
- r.Register("runtime.MemStats.HeapReleased", runtimeMetrics.MemStats.HeapReleased)
- r.Register("runtime.MemStats.HeapSys", runtimeMetrics.MemStats.HeapSys)
- r.Register("runtime.MemStats.LastGC", runtimeMetrics.MemStats.LastGC)
- r.Register("runtime.MemStats.Lookups", runtimeMetrics.MemStats.Lookups)
- r.Register("runtime.MemStats.Mallocs", runtimeMetrics.MemStats.Mallocs)
- r.Register("runtime.MemStats.MCacheInuse", runtimeMetrics.MemStats.MCacheInuse)
- r.Register("runtime.MemStats.MCacheSys", runtimeMetrics.MemStats.MCacheSys)
- r.Register("runtime.MemStats.MSpanInuse", runtimeMetrics.MemStats.MSpanInuse)
- r.Register("runtime.MemStats.MSpanSys", runtimeMetrics.MemStats.MSpanSys)
- r.Register("runtime.MemStats.NextGC", runtimeMetrics.MemStats.NextGC)
- r.Register("runtime.MemStats.NumGC", runtimeMetrics.MemStats.NumGC)
- r.Register("runtime.MemStats.GCCPUFraction", runtimeMetrics.MemStats.GCCPUFraction)
- r.Register("runtime.MemStats.PauseNs", runtimeMetrics.MemStats.PauseNs)
- r.Register("runtime.MemStats.PauseTotalNs", runtimeMetrics.MemStats.PauseTotalNs)
- r.Register("runtime.MemStats.StackInuse", runtimeMetrics.MemStats.StackInuse)
- r.Register("runtime.MemStats.StackSys", runtimeMetrics.MemStats.StackSys)
- r.Register("runtime.MemStats.Sys", runtimeMetrics.MemStats.Sys)
- r.Register("runtime.MemStats.TotalAlloc", runtimeMetrics.MemStats.TotalAlloc)
- r.Register("runtime.NumCgoCall", runtimeMetrics.NumCgoCall)
- r.Register("runtime.NumGoroutine", runtimeMetrics.NumGoroutine)
- r.Register("runtime.NumThread", runtimeMetrics.NumThread)
- r.Register("runtime.ReadMemStats", runtimeMetrics.ReadMemStats)
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
deleted file mode 100644
index e3391f4e89f..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_cgo.go
+++ /dev/null
@@ -1,10 +0,0 @@
-// +build cgo
-// +build !appengine
-
-package metrics
-
-import "runtime"
-
-func numCgoCall() int64 {
- return runtime.NumCgoCall()
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
deleted file mode 100644
index ca12c05bac7..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return memStats.GCCPUFraction
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
deleted file mode 100644
index 616a3b4751b..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_no_cgo.go
+++ /dev/null
@@ -1,7 +0,0 @@
-// +build !cgo appengine
-
-package metrics
-
-func numCgoCall() int64 {
- return 0
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go b/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
deleted file mode 100644
index be96aa6f1be..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/runtime_no_gccpufraction.go
+++ /dev/null
@@ -1,9 +0,0 @@
-// +build !go1.5
-
-package metrics
-
-import "runtime"
-
-func gcCPUFraction(memStats *runtime.MemStats) float64 {
- return 0
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/sample.go b/vendor/github.com/rcrowley/go-metrics/sample.go
deleted file mode 100644
index fecee5ef68b..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/sample.go
+++ /dev/null
@@ -1,616 +0,0 @@
-package metrics
-
-import (
- "math"
- "math/rand"
- "sort"
- "sync"
- "time"
-)
-
-const rescaleThreshold = time.Hour
-
-// Samples maintain a statistically-significant selection of values from
-// a stream.
-type Sample interface {
- Clear()
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Size() int
- Snapshot() Sample
- StdDev() float64
- Sum() int64
- Update(int64)
- Values() []int64
- Variance() float64
-}
-
-// ExpDecaySample is an exponentially-decaying sample using a forward-decaying
-// priority reservoir. See Cormode et al's "Forward Decay: A Practical Time
-// Decay Model for Streaming Systems".
-//
-//
-type ExpDecaySample struct {
- alpha float64
- count int64
- mutex sync.Mutex
- reservoirSize int
- t0, t1 time.Time
- values *expDecaySampleHeap
-}
-
-// NewExpDecaySample constructs a new exponentially-decaying sample with the
-// given reservoir size and alpha.
-func NewExpDecaySample(reservoirSize int, alpha float64) Sample {
- if UseNilMetrics {
- return NilSample{}
- }
- s := &ExpDecaySample{
- alpha: alpha,
- reservoirSize: reservoirSize,
- t0: time.Now(),
- values: newExpDecaySampleHeap(reservoirSize),
- }
- s.t1 = s.t0.Add(rescaleThreshold)
- return s
-}
-
-// Clear clears all samples.
-func (s *ExpDecaySample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.t0 = time.Now()
- s.t1 = s.t0.Add(rescaleThreshold)
- s.values.Clear()
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *ExpDecaySample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Max() int64 {
- return SampleMax(s.Values())
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *ExpDecaySample) Mean() float64 {
- return SampleMean(s.Values())
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *ExpDecaySample) Min() int64 {
- return SampleMin(s.Values())
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *ExpDecaySample) Percentile(p float64) float64 {
- return SamplePercentile(s.Values(), p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *ExpDecaySample) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.Values(), ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *ExpDecaySample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.values.Size()
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *ExpDecaySample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *ExpDecaySample) StdDev() float64 {
- return SampleStdDev(s.Values())
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *ExpDecaySample) Sum() int64 {
- return SampleSum(s.Values())
-}
-
-// Update samples a new value.
-func (s *ExpDecaySample) Update(v int64) {
- s.update(time.Now(), v)
-}
-
-// Values returns a copy of the values in the sample.
-func (s *ExpDecaySample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- vals := s.values.Values()
- values := make([]int64, len(vals))
- for i, v := range vals {
- values[i] = v.v
- }
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *ExpDecaySample) Variance() float64 {
- return SampleVariance(s.Values())
-}
-
-// update samples a new value at a particular timestamp. This is a method all
-// its own to facilitate testing.
-func (s *ExpDecaySample) update(t time.Time, v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if s.values.Size() == s.reservoirSize {
- s.values.Pop()
- }
- s.values.Push(expDecaySample{
- k: math.Exp(t.Sub(s.t0).Seconds()*s.alpha) / rand.Float64(),
- v: v,
- })
- if t.After(s.t1) {
- values := s.values.Values()
- t0 := s.t0
- s.values.Clear()
- s.t0 = t
- s.t1 = s.t0.Add(rescaleThreshold)
- for _, v := range values {
- v.k = v.k * math.Exp(-s.alpha*s.t0.Sub(t0).Seconds())
- s.values.Push(v)
- }
- }
-}
-
-// NilSample is a no-op Sample.
-type NilSample struct{}
-
-// Clear is a no-op.
-func (NilSample) Clear() {}
-
-// Count is a no-op.
-func (NilSample) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilSample) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilSample) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilSample) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilSample) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilSample) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Size is a no-op.
-func (NilSample) Size() int { return 0 }
-
-// Sample is a no-op.
-func (NilSample) Snapshot() Sample { return NilSample{} }
-
-// StdDev is a no-op.
-func (NilSample) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilSample) Sum() int64 { return 0 }
-
-// Update is a no-op.
-func (NilSample) Update(v int64) {}
-
-// Values is a no-op.
-func (NilSample) Values() []int64 { return []int64{} }
-
-// Variance is a no-op.
-func (NilSample) Variance() float64 { return 0.0 }
-
-// SampleMax returns the maximum value of the slice of int64.
-func SampleMax(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var max int64 = math.MinInt64
- for _, v := range values {
- if max < v {
- max = v
- }
- }
- return max
-}
-
-// SampleMean returns the mean value of the slice of int64.
-func SampleMean(values []int64) float64 {
- if 0 == len(values) {
- return 0.0
- }
- return float64(SampleSum(values)) / float64(len(values))
-}
-
-// SampleMin returns the minimum value of the slice of int64.
-func SampleMin(values []int64) int64 {
- if 0 == len(values) {
- return 0
- }
- var min int64 = math.MaxInt64
- for _, v := range values {
- if min > v {
- min = v
- }
- }
- return min
-}
-
-// SamplePercentiles returns an arbitrary percentile of the slice of int64.
-func SamplePercentile(values int64Slice, p float64) float64 {
- return SamplePercentiles(values, []float64{p})[0]
-}
-
-// SamplePercentiles returns a slice of arbitrary percentiles of the slice of
-// int64.
-func SamplePercentiles(values int64Slice, ps []float64) []float64 {
- scores := make([]float64, len(ps))
- size := len(values)
- if size > 0 {
- sort.Sort(values)
- for i, p := range ps {
- pos := p * float64(size+1)
- if pos < 1.0 {
- scores[i] = float64(values[0])
- } else if pos >= float64(size) {
- scores[i] = float64(values[size-1])
- } else {
- lower := float64(values[int(pos)-1])
- upper := float64(values[int(pos)])
- scores[i] = lower + (pos-math.Floor(pos))*(upper-lower)
- }
- }
- }
- return scores
-}
-
-// SampleSnapshot is a read-only copy of another Sample.
-type SampleSnapshot struct {
- count int64
- values []int64
-}
-
-func NewSampleSnapshot(count int64, values []int64) *SampleSnapshot {
- return &SampleSnapshot{
- count: count,
- values: values,
- }
-}
-
-// Clear panics.
-func (*SampleSnapshot) Clear() {
- panic("Clear called on a SampleSnapshot")
-}
-
-// Count returns the count of inputs at the time the snapshot was taken.
-func (s *SampleSnapshot) Count() int64 { return s.count }
-
-// Max returns the maximal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Max() int64 { return SampleMax(s.values) }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (s *SampleSnapshot) Mean() float64 { return SampleMean(s.values) }
-
-// Min returns the minimal value at the time the snapshot was taken.
-func (s *SampleSnapshot) Min() int64 { return SampleMin(s.values) }
-
-// Percentile returns an arbitrary percentile of values at the time the
-// snapshot was taken.
-func (s *SampleSnapshot) Percentile(p float64) float64 {
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values at the time
-// the snapshot was taken.
-func (s *SampleSnapshot) Percentiles(ps []float64) []float64 {
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample at the time the snapshot was taken.
-func (s *SampleSnapshot) Size() int { return len(s.values) }
-
-// Snapshot returns the snapshot.
-func (s *SampleSnapshot) Snapshot() Sample { return s }
-
-// StdDev returns the standard deviation of values at the time the snapshot was
-// taken.
-func (s *SampleSnapshot) StdDev() float64 { return SampleStdDev(s.values) }
-
-// Sum returns the sum of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Sum() int64 { return SampleSum(s.values) }
-
-// Update panics.
-func (*SampleSnapshot) Update(int64) {
- panic("Update called on a SampleSnapshot")
-}
-
-// Values returns a copy of the values in the sample.
-func (s *SampleSnapshot) Values() []int64 {
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of values at the time the snapshot was taken.
-func (s *SampleSnapshot) Variance() float64 { return SampleVariance(s.values) }
-
-// SampleStdDev returns the standard deviation of the slice of int64.
-func SampleStdDev(values []int64) float64 {
- return math.Sqrt(SampleVariance(values))
-}
-
-// SampleSum returns the sum of the slice of int64.
-func SampleSum(values []int64) int64 {
- var sum int64
- for _, v := range values {
- sum += v
- }
- return sum
-}
-
-// SampleVariance returns the variance of the slice of int64.
-func SampleVariance(values []int64) float64 {
- if 0 == len(values) {
- return 0.0
- }
- m := SampleMean(values)
- var sum float64
- for _, v := range values {
- d := float64(v) - m
- sum += d * d
- }
- return sum / float64(len(values))
-}
-
-// A uniform sample using Vitter's Algorithm R.
-//
-//
-type UniformSample struct {
- count int64
- mutex sync.Mutex
- reservoirSize int
- values []int64
-}
-
-// NewUniformSample constructs a new uniform sample with the given reservoir
-// size.
-func NewUniformSample(reservoirSize int) Sample {
- if UseNilMetrics {
- return NilSample{}
- }
- return &UniformSample{
- reservoirSize: reservoirSize,
- values: make([]int64, 0, reservoirSize),
- }
-}
-
-// Clear clears all samples.
-func (s *UniformSample) Clear() {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count = 0
- s.values = make([]int64, 0, s.reservoirSize)
-}
-
-// Count returns the number of samples recorded, which may exceed the
-// reservoir size.
-func (s *UniformSample) Count() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return s.count
-}
-
-// Max returns the maximum value in the sample, which may not be the maximum
-// value ever to be part of the sample.
-func (s *UniformSample) Max() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMax(s.values)
-}
-
-// Mean returns the mean of the values in the sample.
-func (s *UniformSample) Mean() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMean(s.values)
-}
-
-// Min returns the minimum value in the sample, which may not be the minimum
-// value ever to be part of the sample.
-func (s *UniformSample) Min() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleMin(s.values)
-}
-
-// Percentile returns an arbitrary percentile of values in the sample.
-func (s *UniformSample) Percentile(p float64) float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentile(s.values, p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of values in the
-// sample.
-func (s *UniformSample) Percentiles(ps []float64) []float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SamplePercentiles(s.values, ps)
-}
-
-// Size returns the size of the sample, which is at most the reservoir size.
-func (s *UniformSample) Size() int {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return len(s.values)
-}
-
-// Snapshot returns a read-only copy of the sample.
-func (s *UniformSample) Snapshot() Sample {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return &SampleSnapshot{
- count: s.count,
- values: values,
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (s *UniformSample) StdDev() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleStdDev(s.values)
-}
-
-// Sum returns the sum of the values in the sample.
-func (s *UniformSample) Sum() int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleSum(s.values)
-}
-
-// Update samples a new value.
-func (s *UniformSample) Update(v int64) {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- s.count++
- if len(s.values) < s.reservoirSize {
- s.values = append(s.values, v)
- } else {
- r := rand.Int63n(s.count)
- if r < int64(len(s.values)) {
- s.values[int(r)] = v
- }
- }
-}
-
-// Values returns a copy of the values in the sample.
-func (s *UniformSample) Values() []int64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- values := make([]int64, len(s.values))
- copy(values, s.values)
- return values
-}
-
-// Variance returns the variance of the values in the sample.
-func (s *UniformSample) Variance() float64 {
- s.mutex.Lock()
- defer s.mutex.Unlock()
- return SampleVariance(s.values)
-}
-
-// expDecaySample represents an individual sample in a heap.
-type expDecaySample struct {
- k float64
- v int64
-}
-
-func newExpDecaySampleHeap(reservoirSize int) *expDecaySampleHeap {
- return &expDecaySampleHeap{make([]expDecaySample, 0, reservoirSize)}
-}
-
-// expDecaySampleHeap is a min-heap of expDecaySamples.
-// The internal implementation is copied from the standard library's container/heap
-type expDecaySampleHeap struct {
- s []expDecaySample
-}
-
-func (h *expDecaySampleHeap) Clear() {
- h.s = h.s[:0]
-}
-
-func (h *expDecaySampleHeap) Push(s expDecaySample) {
- n := len(h.s)
- h.s = h.s[0 : n+1]
- h.s[n] = s
- h.up(n)
-}
-
-func (h *expDecaySampleHeap) Pop() expDecaySample {
- n := len(h.s) - 1
- h.s[0], h.s[n] = h.s[n], h.s[0]
- h.down(0, n)
-
- n = len(h.s)
- s := h.s[n-1]
- h.s = h.s[0 : n-1]
- return s
-}
-
-func (h *expDecaySampleHeap) Size() int {
- return len(h.s)
-}
-
-func (h *expDecaySampleHeap) Values() []expDecaySample {
- return h.s
-}
-
-func (h *expDecaySampleHeap) up(j int) {
- for {
- i := (j - 1) / 2 // parent
- if i == j || !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- j = i
- }
-}
-
-func (h *expDecaySampleHeap) down(i, n int) {
- for {
- j1 := 2*i + 1
- if j1 >= n || j1 < 0 { // j1 < 0 after int overflow
- break
- }
- j := j1 // left child
- if j2 := j1 + 1; j2 < n && !(h.s[j1].k < h.s[j2].k) {
- j = j2 // = 2*i + 2 // right child
- }
- if !(h.s[j].k < h.s[i].k) {
- break
- }
- h.s[i], h.s[j] = h.s[j], h.s[i]
- i = j
- }
-}
-
-type int64Slice []int64
-
-func (p int64Slice) Len() int { return len(p) }
-func (p int64Slice) Less(i, j int) bool { return p[i] < p[j] }
-func (p int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
diff --git a/vendor/github.com/rcrowley/go-metrics/syslog.go b/vendor/github.com/rcrowley/go-metrics/syslog.go
deleted file mode 100644
index 693f190855c..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/syslog.go
+++ /dev/null
@@ -1,78 +0,0 @@
-// +build !windows
-
-package metrics
-
-import (
- "fmt"
- "log/syslog"
- "time"
-)
-
-// Output each metric in the given registry to syslog periodically using
-// the given syslogger.
-func Syslog(r Registry, d time.Duration, w *syslog.Writer) {
- for _ = range time.Tick(d) {
- r.Each(func(name string, i interface{}) {
- switch metric := i.(type) {
- case Counter:
- w.Info(fmt.Sprintf("counter %s: count: %d", name, metric.Count()))
- case Gauge:
- w.Info(fmt.Sprintf("gauge %s: value: %d", name, metric.Value()))
- case GaugeFloat64:
- w.Info(fmt.Sprintf("gauge %s: value: %f", name, metric.Value()))
- case Healthcheck:
- metric.Check()
- w.Info(fmt.Sprintf("healthcheck %s: error: %v", name, metric.Error()))
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "histogram %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f",
- name,
- h.Count(),
- h.Min(),
- h.Max(),
- h.Mean(),
- h.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- ))
- case Meter:
- m := metric.Snapshot()
- w.Info(fmt.Sprintf(
- "meter %s: count: %d 1-min: %.2f 5-min: %.2f 15-min: %.2f mean: %.2f",
- name,
- m.Count(),
- m.Rate1(),
- m.Rate5(),
- m.Rate15(),
- m.RateMean(),
- ))
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- w.Info(fmt.Sprintf(
- "timer %s: count: %d min: %d max: %d mean: %.2f stddev: %.2f median: %.2f 75%%: %.2f 95%%: %.2f 99%%: %.2f 99.9%%: %.2f 1-min: %.2f 5-min: %.2f 15-min: %.2f mean-rate: %.2f",
- name,
- t.Count(),
- t.Min(),
- t.Max(),
- t.Mean(),
- t.StdDev(),
- ps[0],
- ps[1],
- ps[2],
- ps[3],
- ps[4],
- t.Rate1(),
- t.Rate5(),
- t.Rate15(),
- t.RateMean(),
- ))
- }
- })
- }
-}
diff --git a/vendor/github.com/rcrowley/go-metrics/timer.go b/vendor/github.com/rcrowley/go-metrics/timer.go
deleted file mode 100644
index 17db8f8d202..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/timer.go
+++ /dev/null
@@ -1,311 +0,0 @@
-package metrics
-
-import (
- "sync"
- "time"
-)
-
-// Timers capture the duration and rate of events.
-type Timer interface {
- Count() int64
- Max() int64
- Mean() float64
- Min() int64
- Percentile(float64) float64
- Percentiles([]float64) []float64
- Rate1() float64
- Rate5() float64
- Rate15() float64
- RateMean() float64
- Snapshot() Timer
- StdDev() float64
- Sum() int64
- Time(func())
- Update(time.Duration)
- UpdateSince(time.Time)
- Variance() float64
-}
-
-// GetOrRegisterTimer returns an existing Timer or constructs and registers a
-// new StandardTimer.
-func GetOrRegisterTimer(name string, r Registry) Timer {
- if nil == r {
- r = DefaultRegistry
- }
- return r.GetOrRegister(name, NewTimer).(Timer)
-}
-
-// NewCustomTimer constructs a new StandardTimer from a Histogram and a Meter.
-func NewCustomTimer(h Histogram, m Meter) Timer {
- if UseNilMetrics {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: h,
- meter: m,
- }
-}
-
-// NewRegisteredTimer constructs and registers a new StandardTimer.
-func NewRegisteredTimer(name string, r Registry) Timer {
- c := NewTimer()
- if nil == r {
- r = DefaultRegistry
- }
- r.Register(name, c)
- return c
-}
-
-// NewTimer constructs a new StandardTimer using an exponentially-decaying
-// sample with the same reservoir size and alpha as UNIX load averages.
-func NewTimer() Timer {
- if UseNilMetrics {
- return NilTimer{}
- }
- return &StandardTimer{
- histogram: NewHistogram(NewExpDecaySample(1028, 0.015)),
- meter: NewMeter(),
- }
-}
-
-// NilTimer is a no-op Timer.
-type NilTimer struct {
- h Histogram
- m Meter
-}
-
-// Count is a no-op.
-func (NilTimer) Count() int64 { return 0 }
-
-// Max is a no-op.
-func (NilTimer) Max() int64 { return 0 }
-
-// Mean is a no-op.
-func (NilTimer) Mean() float64 { return 0.0 }
-
-// Min is a no-op.
-func (NilTimer) Min() int64 { return 0 }
-
-// Percentile is a no-op.
-func (NilTimer) Percentile(p float64) float64 { return 0.0 }
-
-// Percentiles is a no-op.
-func (NilTimer) Percentiles(ps []float64) []float64 {
- return make([]float64, len(ps))
-}
-
-// Rate1 is a no-op.
-func (NilTimer) Rate1() float64 { return 0.0 }
-
-// Rate5 is a no-op.
-func (NilTimer) Rate5() float64 { return 0.0 }
-
-// Rate15 is a no-op.
-func (NilTimer) Rate15() float64 { return 0.0 }
-
-// RateMean is a no-op.
-func (NilTimer) RateMean() float64 { return 0.0 }
-
-// Snapshot is a no-op.
-func (NilTimer) Snapshot() Timer { return NilTimer{} }
-
-// StdDev is a no-op.
-func (NilTimer) StdDev() float64 { return 0.0 }
-
-// Sum is a no-op.
-func (NilTimer) Sum() int64 { return 0 }
-
-// Time is a no-op.
-func (NilTimer) Time(func()) {}
-
-// Update is a no-op.
-func (NilTimer) Update(time.Duration) {}
-
-// UpdateSince is a no-op.
-func (NilTimer) UpdateSince(time.Time) {}
-
-// Variance is a no-op.
-func (NilTimer) Variance() float64 { return 0.0 }
-
-// StandardTimer is the standard implementation of a Timer and uses a Histogram
-// and Meter.
-type StandardTimer struct {
- histogram Histogram
- meter Meter
- mutex sync.Mutex
-}
-
-// Count returns the number of events recorded.
-func (t *StandardTimer) Count() int64 {
- return t.histogram.Count()
-}
-
-// Max returns the maximum value in the sample.
-func (t *StandardTimer) Max() int64 {
- return t.histogram.Max()
-}
-
-// Mean returns the mean of the values in the sample.
-func (t *StandardTimer) Mean() float64 {
- return t.histogram.Mean()
-}
-
-// Min returns the minimum value in the sample.
-func (t *StandardTimer) Min() int64 {
- return t.histogram.Min()
-}
-
-// Percentile returns an arbitrary percentile of the values in the sample.
-func (t *StandardTimer) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of the values in the
-// sample.
-func (t *StandardTimer) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second.
-func (t *StandardTimer) Rate1() float64 {
- return t.meter.Rate1()
-}
-
-// Rate5 returns the five-minute moving average rate of events per second.
-func (t *StandardTimer) Rate5() float64 {
- return t.meter.Rate5()
-}
-
-// Rate15 returns the fifteen-minute moving average rate of events per second.
-func (t *StandardTimer) Rate15() float64 {
- return t.meter.Rate15()
-}
-
-// RateMean returns the meter's mean rate of events per second.
-func (t *StandardTimer) RateMean() float64 {
- return t.meter.RateMean()
-}
-
-// Snapshot returns a read-only copy of the timer.
-func (t *StandardTimer) Snapshot() Timer {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- return &TimerSnapshot{
- histogram: t.histogram.Snapshot().(*HistogramSnapshot),
- meter: t.meter.Snapshot().(*MeterSnapshot),
- }
-}
-
-// StdDev returns the standard deviation of the values in the sample.
-func (t *StandardTimer) StdDev() float64 {
- return t.histogram.StdDev()
-}
-
-// Sum returns the sum in the sample.
-func (t *StandardTimer) Sum() int64 {
- return t.histogram.Sum()
-}
-
-// Record the duration of the execution of the given function.
-func (t *StandardTimer) Time(f func()) {
- ts := time.Now()
- f()
- t.Update(time.Since(ts))
-}
-
-// Record the duration of an event.
-func (t *StandardTimer) Update(d time.Duration) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(d))
- t.meter.Mark(1)
-}
-
-// Record the duration of an event that started at a time and ends now.
-func (t *StandardTimer) UpdateSince(ts time.Time) {
- t.mutex.Lock()
- defer t.mutex.Unlock()
- t.histogram.Update(int64(time.Since(ts)))
- t.meter.Mark(1)
-}
-
-// Variance returns the variance of the values in the sample.
-func (t *StandardTimer) Variance() float64 {
- return t.histogram.Variance()
-}
-
-// TimerSnapshot is a read-only copy of another Timer.
-type TimerSnapshot struct {
- histogram *HistogramSnapshot
- meter *MeterSnapshot
-}
-
-// Count returns the number of events recorded at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Count() int64 { return t.histogram.Count() }
-
-// Max returns the maximum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Max() int64 { return t.histogram.Max() }
-
-// Mean returns the mean value at the time the snapshot was taken.
-func (t *TimerSnapshot) Mean() float64 { return t.histogram.Mean() }
-
-// Min returns the minimum value at the time the snapshot was taken.
-func (t *TimerSnapshot) Min() int64 { return t.histogram.Min() }
-
-// Percentile returns an arbitrary percentile of sampled values at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) Percentile(p float64) float64 {
- return t.histogram.Percentile(p)
-}
-
-// Percentiles returns a slice of arbitrary percentiles of sampled values at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Percentiles(ps []float64) []float64 {
- return t.histogram.Percentiles(ps)
-}
-
-// Rate1 returns the one-minute moving average rate of events per second at the
-// time the snapshot was taken.
-func (t *TimerSnapshot) Rate1() float64 { return t.meter.Rate1() }
-
-// Rate5 returns the five-minute moving average rate of events per second at
-// the time the snapshot was taken.
-func (t *TimerSnapshot) Rate5() float64 { return t.meter.Rate5() }
-
-// Rate15 returns the fifteen-minute moving average rate of events per second
-// at the time the snapshot was taken.
-func (t *TimerSnapshot) Rate15() float64 { return t.meter.Rate15() }
-
-// RateMean returns the meter's mean rate of events per second at the time the
-// snapshot was taken.
-func (t *TimerSnapshot) RateMean() float64 { return t.meter.RateMean() }
-
-// Snapshot returns the snapshot.
-func (t *TimerSnapshot) Snapshot() Timer { return t }
-
-// StdDev returns the standard deviation of the values at the time the snapshot
-// was taken.
-func (t *TimerSnapshot) StdDev() float64 { return t.histogram.StdDev() }
-
-// Sum returns the sum at the time the snapshot was taken.
-func (t *TimerSnapshot) Sum() int64 { return t.histogram.Sum() }
-
-// Time panics.
-func (*TimerSnapshot) Time(func()) {
- panic("Time called on a TimerSnapshot")
-}
-
-// Update panics.
-func (*TimerSnapshot) Update(time.Duration) {
- panic("Update called on a TimerSnapshot")
-}
-
-// UpdateSince panics.
-func (*TimerSnapshot) UpdateSince(time.Time) {
- panic("UpdateSince called on a TimerSnapshot")
-}
-
-// Variance returns the variance of the values at the time the snapshot was
-// taken.
-func (t *TimerSnapshot) Variance() float64 { return t.histogram.Variance() }
diff --git a/vendor/github.com/rcrowley/go-metrics/validate.sh b/vendor/github.com/rcrowley/go-metrics/validate.sh
deleted file mode 100755
index f6499982e58..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/validate.sh
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/bash
-
-set -e
-
-# check there are no formatting issues
-GOFMT_LINES=`gofmt -l . | wc -l | xargs`
-test $GOFMT_LINES -eq 0 || echo "gofmt needs to be run, ${GOFMT_LINES} files have issues"
-
-# run the tests for the root package
-go test .
diff --git a/vendor/github.com/rcrowley/go-metrics/writer.go b/vendor/github.com/rcrowley/go-metrics/writer.go
deleted file mode 100644
index 091e971d2e6..00000000000
--- a/vendor/github.com/rcrowley/go-metrics/writer.go
+++ /dev/null
@@ -1,100 +0,0 @@
-package metrics
-
-import (
- "fmt"
- "io"
- "sort"
- "time"
-)
-
-// Write sorts writes each metric in the given registry periodically to the
-// given io.Writer.
-func Write(r Registry, d time.Duration, w io.Writer) {
- for _ = range time.Tick(d) {
- WriteOnce(r, w)
- }
-}
-
-// WriteOnce sorts and writes metrics in the given registry to the given
-// io.Writer.
-func WriteOnce(r Registry, w io.Writer) {
- var namedMetrics namedMetricSlice
- r.Each(func(name string, i interface{}) {
- namedMetrics = append(namedMetrics, namedMetric{name, i})
- })
-
- sort.Sort(namedMetrics)
- for _, namedMetric := range namedMetrics {
- switch metric := namedMetric.m.(type) {
- case Counter:
- fmt.Fprintf(w, "counter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", metric.Count())
- case Gauge:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %9d\n", metric.Value())
- case GaugeFloat64:
- fmt.Fprintf(w, "gauge %s\n", namedMetric.name)
- fmt.Fprintf(w, " value: %f\n", metric.Value())
- case Healthcheck:
- metric.Check()
- fmt.Fprintf(w, "healthcheck %s\n", namedMetric.name)
- fmt.Fprintf(w, " error: %v\n", metric.Error())
- case Histogram:
- h := metric.Snapshot()
- ps := h.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "histogram %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", h.Count())
- fmt.Fprintf(w, " min: %9d\n", h.Min())
- fmt.Fprintf(w, " max: %9d\n", h.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", h.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", h.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- case Meter:
- m := metric.Snapshot()
- fmt.Fprintf(w, "meter %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", m.Count())
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", m.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", m.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", m.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", m.RateMean())
- case Timer:
- t := metric.Snapshot()
- ps := t.Percentiles([]float64{0.5, 0.75, 0.95, 0.99, 0.999})
- fmt.Fprintf(w, "timer %s\n", namedMetric.name)
- fmt.Fprintf(w, " count: %9d\n", t.Count())
- fmt.Fprintf(w, " min: %9d\n", t.Min())
- fmt.Fprintf(w, " max: %9d\n", t.Max())
- fmt.Fprintf(w, " mean: %12.2f\n", t.Mean())
- fmt.Fprintf(w, " stddev: %12.2f\n", t.StdDev())
- fmt.Fprintf(w, " median: %12.2f\n", ps[0])
- fmt.Fprintf(w, " 75%%: %12.2f\n", ps[1])
- fmt.Fprintf(w, " 95%%: %12.2f\n", ps[2])
- fmt.Fprintf(w, " 99%%: %12.2f\n", ps[3])
- fmt.Fprintf(w, " 99.9%%: %12.2f\n", ps[4])
- fmt.Fprintf(w, " 1-min rate: %12.2f\n", t.Rate1())
- fmt.Fprintf(w, " 5-min rate: %12.2f\n", t.Rate5())
- fmt.Fprintf(w, " 15-min rate: %12.2f\n", t.Rate15())
- fmt.Fprintf(w, " mean rate: %12.2f\n", t.RateMean())
- }
- }
-}
-
-type namedMetric struct {
- name string
- m interface{}
-}
-
-// namedMetricSlice is a slice of namedMetrics that implements sort.Interface.
-type namedMetricSlice []namedMetric
-
-func (nms namedMetricSlice) Len() int { return len(nms) }
-
-func (nms namedMetricSlice) Swap(i, j int) { nms[i], nms[j] = nms[j], nms[i] }
-
-func (nms namedMetricSlice) Less(i, j int) bool {
- return nms[i].name < nms[j].name
-}
diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go
deleted file mode 100644
index bb36150a499..00000000000
--- a/vendor/github.com/weaveworks-experiments/loki/pkg/client/collector.go
+++ /dev/null
@@ -1,150 +0,0 @@
-package loki
-
-import (
- "fmt"
- "io"
- "log"
- "net/http"
- "sync"
-
- "github.com/apache/thrift/lib/go/thrift"
- "github.com/openzipkin/zipkin-go-opentracing/_thrift/gen-go/zipkincore"
-)
-
-// Want to be able to support a service doing 100 QPS with a 15s scrape interval
-var globalCollector = NewCollector(15 * 100)
-
-type Collector struct {
- mtx sync.Mutex
- traceIDs map[int64]int // map from trace ID to index in traces
- traces []trace
- next int
- length int
-}
-
-type trace struct {
- traceID int64
- spans []*zipkincore.Span
-}
-
-func NewCollector(capacity int) *Collector {
- return &Collector{
- traceIDs: make(map[int64]int, capacity),
- traces: make([]trace, capacity, capacity),
- next: 0,
- length: 0,
- }
-}
-
-func (c *Collector) Collect(span *zipkincore.Span) error {
- if span == nil {
- return fmt.Errorf("cannot collect nil span")
- }
-
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- traceID := span.GetTraceID()
- idx, ok := c.traceIDs[traceID]
- if !ok {
- // Pick a slot in c.spans for this trace
- idx = c.next
- c.next++
- c.next %= cap(c.traces) // wrap
-
- // If the slot it occupied, we'll need to clear the trace ID index,
- // otherwise we'll need to number of traces.
- if c.length == cap(c.traces) {
- delete(c.traceIDs, c.traces[idx].traceID)
- } else {
- c.length++
- }
-
- // Initialise said slot.
- c.traceIDs[traceID] = idx
- c.traces[idx].traceID = traceID
- c.traces[idx].spans = c.traces[idx].spans[:0]
- }
-
- c.traces[idx].spans = append(c.traces[idx].spans, span)
- return nil
-}
-
-func (*Collector) Close() error {
- return nil
-}
-
-func (c *Collector) gather() []*zipkincore.Span {
- c.mtx.Lock()
- defer c.mtx.Unlock()
-
- spans := make([]*zipkincore.Span, 0, c.length)
- i, count := c.next-c.length, 0
- if i < 0 {
- i = cap(c.traces) + i
- }
- for count < c.length {
- i %= cap(c.traces)
- spans = append(spans, c.traces[i].spans...)
- delete(c.traceIDs, c.traces[i].traceID)
- i++
- count++
- }
- c.length = 0
- if len(c.traceIDs) != 0 {
- panic("didn't clear all trace ids")
- }
- return spans
-}
-
-func (c *Collector) ServeHTTP(w http.ResponseWriter, r *http.Request) {
- spans := c.gather()
- if err := WriteSpans(spans, w); err != nil {
- log.Printf("error writing spans: %v", err)
- http.Error(w, err.Error(), http.StatusInternalServerError)
- return
- }
-}
-
-func WriteSpans(spans []*zipkincore.Span, w io.Writer) error {
- transport := thrift.NewStreamTransportW(w)
- protocol := thrift.NewTCompactProtocol(transport)
-
- if err := protocol.WriteListBegin(thrift.STRUCT, len(spans)); err != nil {
- return err
- }
- for _, span := range spans {
- if err := span.Write(protocol); err != nil {
- return err
- }
- }
- if err := protocol.WriteListEnd(); err != nil {
- return err
- }
- return protocol.Flush()
-}
-
-func ReadSpans(r io.Reader) ([]*zipkincore.Span, error) {
- transport := thrift.NewStreamTransportR(r)
- protocol := thrift.NewTCompactProtocol(transport)
- ttype, size, err := protocol.ReadListBegin()
- if err != nil {
- return nil, err
- }
- spans := make([]*zipkincore.Span, 0, size)
- if ttype != thrift.STRUCT {
- return nil, fmt.Errorf("unexpected type: %v", ttype)
- }
- for i := 0; i < size; i++ {
- span := zipkincore.NewSpan()
- if err := span.Read(protocol); err != nil {
- return nil, err
- }
- spans = append(spans, span)
- }
- return spans, protocol.ReadListEnd()
-}
-
-func Handler() http.Handler {
- return globalCollector
-}
diff --git a/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go b/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go
deleted file mode 100644
index b3efe9e97fd..00000000000
--- a/vendor/github.com/weaveworks-experiments/loki/pkg/client/tracer.go
+++ /dev/null
@@ -1,27 +0,0 @@
-package loki
-
-import (
- "fmt"
- "os"
-
- "github.com/opentracing/opentracing-go"
- "github.com/openzipkin/zipkin-go-opentracing"
-)
-
-func NewTracer() (opentracing.Tracer, error) {
- // create recorder.
- hostname, err := os.Hostname()
- if err != nil {
- return nil, err
- }
- recorder := zipkintracer.NewRecorder(globalCollector, false, hostname, "")
-
- // create tracer.
- tracer, err := zipkintracer.NewTracer(recorder)
- if err != nil {
- fmt.Printf("unable to create Zipkin tracer: %+v", err)
- os.Exit(-1)
- }
-
- return tracer, nil
-}
diff --git a/vendor/github.com/weaveworks/common/middleware/logging.go b/vendor/github.com/weaveworks/common/middleware/logging.go
index 4e3f739c85b..4dbc049bc9d 100644
--- a/vendor/github.com/weaveworks/common/middleware/logging.go
+++ b/vendor/github.com/weaveworks/common/middleware/logging.go
@@ -36,7 +36,7 @@ func (l Log) Wrap(next http.Handler) http.Handler {
wrapped := newBadResponseLoggingWriter(w, &buf)
next.ServeHTTP(wrapped, r)
statusCode := wrapped.statusCode
- if 100 <= statusCode && statusCode < 500 || statusCode == 502 {
+ if 100 <= statusCode && statusCode < 500 || statusCode == http.StatusBadGateway || statusCode == http.StatusServiceUnavailable {
logWithRequest(r).Debugf("%s %s (%d) %s", r.Method, uri, statusCode, time.Since(begin))
if l.LogRequestHeaders && headers != nil {
logWithRequest(r).Debugf("Is websocket request: %v\n%s", IsWSHandshakeRequest(r), string(headers))
diff --git a/vendor/github.com/weaveworks/common/server/server.go b/vendor/github.com/weaveworks/common/server/server.go
index 8cf02ee4a6d..3d1496f8ff9 100644
--- a/vendor/github.com/weaveworks/common/server/server.go
+++ b/vendor/github.com/weaveworks/common/server/server.go
@@ -18,7 +18,6 @@ import (
"golang.org/x/net/context"
"google.golang.org/grpc"
- "github.com/weaveworks-experiments/loki/pkg/client"
"github.com/weaveworks/common/httpgrpc"
httpgrpc_server "github.com/weaveworks/common/httpgrpc/server"
"github.com/weaveworks/common/instrument"
@@ -26,15 +25,6 @@ import (
"github.com/weaveworks/common/signals"
)
-func init() {
- tracer, err := loki.NewTracer()
- if err != nil {
- panic(fmt.Sprintf("Failed to create tracer: %v", err))
- } else {
- opentracing.InitGlobalTracer(tracer)
- }
-}
-
// Config for a Server
type Config struct {
MetricsNamespace string
@@ -67,8 +57,7 @@ func (cfg *Config) RegisterFlags(f *flag.FlagSet) {
// Server wraps a HTTP and gRPC server, and some common initialization.
//
-// Servers will be automatically instrumented for Prometheus metrics
-// and Loki tracing. HTTP over gRPC
+// Servers will be automatically instrumented for Prometheus metrics.
type Server struct {
cfg Config
handler *signals.Handler
@@ -156,7 +145,6 @@ func New(cfg Config) (*Server, error) {
// RegisterInstrumentation on the given router.
func RegisterInstrumentation(router *mux.Router) {
router.Handle("/metrics", prometheus.Handler())
- router.Handle("/traces", loki.Handler())
router.PathPrefix("/debug/pprof").Handler(http.DefaultServeMux)
}