diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json index b6765cc89362..e942edbf10d1 100644 --- a/Godeps/Godeps.json +++ b/Godeps/Godeps.json @@ -10,7 +10,7 @@ { "REBASE TODO": "git describe --long --tags --abbrev=7", "ImportPath": "k8s.io/kubernetes/pkg/api", - "Comment": "v1.16.0-beta.2-63-g2f054b7", + "Comment": "v1.16.2", "Rev": "2f054b7646dc9e98f6dea458d2fb65e1d2c1f731" } ] diff --git a/glide.lock b/glide.lock index 8171a750f591..d29b86ee491b 100644 --- a/glide.lock +++ b/glide.lock @@ -1,15 +1,15 @@ -hash: dd1b0fb63a26d53b145213c1268b9d79ce2058c95b85cb07aef1565f84eef517 -updated: 2019-10-04T08:57:16.574555756-04:00 +hash: 37739b9919a5b4ea5b24eb4adf0734609d309ed65e3d388650393e8ffb8ff8ad +updated: 2019-10-28T18:16:19.642044+01:00 imports: - name: bitbucket.org/ww/goautoneg - version: 2ae31c8b6b30d2f4c8100c20d527b571e9c433bb + version: a7dc8b61c822528f973a5e4e7b272055c6fdb43e repo: https://github.com/munnerz/goautoneg.git - name: cloud.google.com/go version: 8c41231e01b2085512d98153bcffb847ff9b4b9f subpackages: - compute/metadata - name: github.com/apparentlymart/go-cidr - version: b1115bf8e14a60131a196f908223e4506b0ddc35 + version: c0fb5fbe0acb592411e2db59add389a43260ad44 subpackages: - cidr - name: github.com/armon/circbuf @@ -49,6 +49,7 @@ imports: - private/protocol/query - private/protocol/query/queryutil - private/protocol/rest + - private/protocol/restxml - private/protocol/xml/xmlutil - service/autoscaling - service/ec2 @@ -56,6 +57,7 @@ imports: - service/elb - service/elbv2 - service/kms + - service/route53 - service/sts - name: github.com/Azure/azure-sdk-for-go version: 07e904809009c506339e781db6167dd06e95cde6 @@ -316,7 +318,7 @@ imports: - ipamutils - ipvs - name: github.com/docker/libtrust - version: 9cbd2a1374f46905c68a4eb3694a130610adc62a + version: aabc10ec26b754e797f9028f4589c5b7bd90dc20 - name: github.com/docker/spdystream version: 449fdfce4d962303d702fec724ef0ad181c92528 subpackages: @@ -361,7 +363,7 @@ imports: - name: github.com/go-openapi/jsonreference version: 2903bfd4bfbaf188694f1edf731f2725a8fa344f - name: github.com/go-openapi/loads - version: a80dea3052f00e5f032e860dd7355cd0cc67e24d + version: 8548893a17237be4a5b2f74773f23002f4179bbe - name: github.com/go-openapi/runtime version: 109737172424d8a656fd1199e28c9f5cc89b0cca - name: github.com/go-openapi/spec @@ -410,13 +412,13 @@ imports: - ptypes/timestamp - ptypes/wrappers - name: github.com/gonum/blas - version: 37e82626499e1df7c54aeaba0959fd6e7e8dc1e4 + version: f22b278b28ac9805aadd613a754a60c35b24ae69 subpackages: - blas64 - native - native/internal/math32 - name: github.com/gonum/floats - version: f74b330d45c56584a6ea7a27f5c64ea2900631e9 + version: c233463c7e827fd71a8cdb62dfda0e98f7c39ad5 - name: github.com/gonum/graph version: 50b27dea7ebbfb052dfaf91681afc6fde28d8796 subpackages: @@ -433,17 +435,17 @@ imports: - path - simple - name: github.com/gonum/internal - version: e57e4534cf9b3b00ef6c0175f59d8d2d34f60914 + version: f884aa71402950fb2796dbea0d5aa9ef9cfad8ca subpackages: - asm/f32 - asm/f64 - name: github.com/gonum/lapack - version: 5ed4b826becd1807e09377508f51756586d1a98c + version: e4cdc5a0bff924bb10be88482e635bd40429f65e subpackages: - lapack64 - native - name: github.com/gonum/matrix - version: dd6034299e4242c9f0ea36735e6d4264dfcb3f9f + version: c518dec07be9a636c38a4650e217be059b5952ec subpackages: - mat64 - name: github.com/google/btree @@ -584,6 +586,7 @@ imports: subpackages: - hcl/ast - hcl/parser + - hcl/printer - hcl/scanner - hcl/strconv - hcl/token @@ -621,16 +624,24 @@ imports: version: 2de2192f9e35ce981c152a873ed943b93b79ced4 - name: github.com/konsorten/go-windows-terminal-sequences version: 5c8c8bd35d3832f5d134ae1e1e375b69a4d25242 +- name: github.com/lestrrat-go/jspointer + version: 82fadba7561c3a8d78133c2b957263c0963bb79d +- name: github.com/lestrrat-go/jsref + version: 1b590508f37d3af76c77c8328e16978d2889b486 + subpackages: + - provider +- name: github.com/lestrrat-go/pdebug + version: 39f9a71bcabe9432cbdfe4d3d33f41988acd2ce6 +- name: github.com/lestrrat-go/structinfo + version: acd51874663bf3297433cb1f1015075c3cbe6130 - name: github.com/lestrrat/go-jspointer - version: f4881e611bdbe9fb413a7780721ef8400a1f2341 + version: 82fadba7561c3a8d78133c2b957263c0963bb79d repo: https://github.com/lestrrat/go-jspointer.git - name: github.com/lestrrat/go-jsref version: 50df7b2d07d799426a9ac43fa24bdb4785f72a54 repo: https://github.com/lestrrat/go-jsref.git - subpackages: - - provider - name: github.com/lestrrat/go-jsschema - version: a6a42341b50d8d7e2a733db922eefaa756321021 + version: 5c81c58ffcc359c4390d440b45f5462edb0107cb - name: github.com/lestrrat/go-pdebug version: 569c97477ae8837e053e5a50bc739e15172b8ebe repo: https://github.com/lestrrat/go-pdebug.git @@ -1077,7 +1088,7 @@ imports: - name: github.com/spf13/pflag version: 298182f68c66c05229eb03ac171abe6e309ee79a - name: github.com/spf13/viper - version: 7fb2782df3d83e0036cc89f461ed0422628776f4 + version: 9e56dacc08fbbf8c9ee2dbc717553c758ce42bc9 - name: github.com/storageos/go-api version: 343b3eff91fcc84b0165e252eb843f5fd720fa4e subpackages: @@ -1085,7 +1096,7 @@ imports: - serror - types - name: github.com/stretchr/objx - version: 1a9d0bb9f541897e62256577b352fdbc1fb4fd94 + version: ea4fe68685ee0d3cee7032121851b57e7494e8ea - name: github.com/stretchr/testify version: ffdc059bfe9ce6a4e144ba849dbedead332c6053 subpackages: @@ -1392,7 +1403,7 @@ imports: - name: gopkg.in/yaml.v2 version: f221b8435cfb71e54062f6c6e99e9ade30b124d5 - name: k8s.io/api - version: d2ab659560cb09bd6c9a3011b6468f0025c65e63 + version: c1e2ddbd0dac6a9739e0530fc65faaa477bed327 repo: https://github.com/openshift/kubernetes-api.git subpackages: - admission/v1 @@ -1437,7 +1448,7 @@ imports: - storage/v1alpha1 - storage/v1beta1 - name: k8s.io/apiextensions-apiserver - version: 159fa656239e4ec20590375d7d43ec751f1fc5e4 + version: dccaaad1f79ea273396a21c44b5160e06d043d2d repo: https://github.com/openshift/kubernetes-apiextensions-apiserver.git subpackages: - pkg/apihelpers @@ -1491,7 +1502,7 @@ imports: - test/integration - test/integration/fixtures - name: k8s.io/apimachinery - version: 27d36303b6556f377b4f34e64705fa9024a12b0c + version: 63ca3423f7c68b8e3e0a7a2a3a98a11840ab9a67 repo: https://github.com/openshift/kubernetes-apimachinery.git subpackages: - pkg/api/apitesting @@ -1557,7 +1568,7 @@ imports: - third_party/forked/golang/netutil - third_party/forked/golang/reflect - name: k8s.io/apiserver - version: d24a1e961b50a111558447152a4c845629f41b6a + version: b59c55136d196a562ddcf5304236b3ea93e5817c repo: https://github.com/openshift/kubernetes-apiserver.git subpackages: - pkg/admission @@ -1692,7 +1703,7 @@ imports: - pkg/printers - pkg/resource - name: k8s.io/client-go - version: 6b125bdbf71e924ebf06a0003ae54c994f30ebe7 + version: 3131d6e04e643e4a271c6b1fc5250baf1d40b602 repo: https://github.com/openshift/kubernetes-client-go.git subpackages: - discovery @@ -1946,7 +1957,7 @@ imports: version: d32e54a00c6a30786b8db3409983d6a96baacb68 repo: https://github.com/openshift/kubernetes-code-generator.git - name: k8s.io/component-base - version: c1db07a83ddd303366e4c13e16d2c1bfce990201 + version: 9ddfa9e68ad865d401fd887b61c5712cb1459cc1 repo: https://github.com/openshift/kubernetes-component-base.git subpackages: - cli/flag @@ -1961,7 +1972,7 @@ imports: - metrics/prometheus/workqueue - version - name: k8s.io/cri-api - version: 24ae4d4e8b036b885ee1f4930ec2b173eabb28e7 + version: 446748cffddacb0275666009e4ec68f06d00da17 subpackages: - pkg/apis - pkg/apis/runtime/v1alpha2 @@ -2125,7 +2136,7 @@ imports: subpackages: - config/v1beta1 - name: k8s.io/kubernetes - version: 3538c7842b1e43174e181b732cd3147a56711427 + version: b3bb404ef49a1c763cbf97ede51c0de37cd7787b repo: https://github.com/openshift/kubernetes.git subpackages: - cmd/cloud-controller-manager/app @@ -2912,6 +2923,7 @@ imports: - plugin/pkg/auth/authorizer/rbac - plugin/pkg/auth/authorizer/rbac/bootstrappolicy - staging/src/k8s.io/apimachinery/pkg/api/apitesting + - staging/src/k8s.io/apimachinery/pkg/apis/meta/v1/unstructured - test/e2e - test/e2e/apimachinery - test/e2e/apps @@ -2996,7 +3008,7 @@ imports: - third_party/forked/gonum/graph/simple - third_party/forked/gonum/graph/traverse - name: k8s.io/legacy-cloud-providers - version: 94dfe4b16be51dea7f45abda3c88605b7fa886bb + version: d60d6f2bae8387b85397bdf1df28476e9d12ca5b repo: https://github.com/openshift/kubernetes-legacy-cloud-providers.git subpackages: - aws diff --git a/glide.yaml b/glide.yaml index 1ca749731b7e..e76eb8757e32 100644 --- a/glide.yaml +++ b/glide.yaml @@ -77,7 +77,7 @@ import: version: origin-4.3-kubernetes-1.16.0-rc.2 - package: k8s.io/kubernetes repo: https://github.com/openshift/kubernetes.git - version: origin-4.3-kubernetes-1.16.0-rc.2 + version: origin-4.3-kubernetes-1.16.2 # this matches the 1.16 branch from kube - package: k8s.io/gengo version: 26a664648505d962332bda642b27306bc10d1082 diff --git a/images/hyperkube/Dockerfile.rhel b/images/hyperkube/Dockerfile.rhel index 0ec3b2f2409f..eee2708a7e98 100644 --- a/images/hyperkube/Dockerfile.rhel +++ b/images/hyperkube/Dockerfile.rhel @@ -10,4 +10,4 @@ COPY --from=builder /tmp/build/hyperkube /usr/bin/ LABEL io.k8s.display-name="OpenShift Kubernetes Server Commands" \ io.k8s.description="OpenShift is a platform for developing, building, and deploying containerized applications." \ io.openshift.tags="openshift,hyperkube" \ - io.openshift.build.versions="kubernetes=1.16.0" + io.openshift.build.versions="kubernetes=1.16.2" diff --git a/vendor/OWNERS b/vendor/OWNERS deleted file mode 100644 index 6255d33c610a..000000000000 --- a/vendor/OWNERS +++ /dev/null @@ -1,3 +0,0 @@ -approvers: - - childsb # Storage - - knobunc # Network, Multi-cluster, Storage diff --git a/vendor/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/bitbucket.org/ww/goautoneg/autoneg.go index 648b38cb6546..1dd1cad64663 100644 --- a/vendor/bitbucket.org/ww/goautoneg/autoneg.go +++ b/vendor/bitbucket.org/ww/goautoneg/autoneg.go @@ -35,9 +35,8 @@ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - - */ + package goautoneg import ( @@ -53,16 +52,14 @@ type Accept struct { Params map[string]string } -// For internal use, so that we can use the sort interface -type accept_slice []Accept +// acceptSlice is defined to implement sort interface. +type acceptSlice []Accept -func (accept accept_slice) Len() int { - slice := []Accept(accept) +func (slice acceptSlice) Len() int { return len(slice) } -func (accept accept_slice) Less(i, j int) bool { - slice := []Accept(accept) +func (slice acceptSlice) Less(i, j int) bool { ai, aj := slice[i], slice[j] if ai.Q > aj.Q { return true @@ -76,63 +73,93 @@ func (accept accept_slice) Less(i, j int) bool { return false } -func (accept accept_slice) Swap(i, j int) { - slice := []Accept(accept) +func (slice acceptSlice) Swap(i, j int) { slice[i], slice[j] = slice[j], slice[i] } +func stringTrimSpaceCutset(r rune) bool { + return r == ' ' +} + +func nextSplitElement(s, sep string) (item string, remaining string) { + if index := strings.Index(s, sep); index != -1 { + return s[:index], s[index+1:] + } + return s, "" +} + // Parse an Accept Header string returning a sorted list // of clauses -func ParseAccept(header string) (accept []Accept) { - parts := strings.Split(header, ",") - accept = make([]Accept, 0, len(parts)) - for _, part := range parts { - part := strings.Trim(part, " ") +func ParseAccept(header string) acceptSlice { + partsCount := 0 + remaining := header + for len(remaining) > 0 { + partsCount++ + _, remaining = nextSplitElement(remaining, ",") + } + accept := make(acceptSlice, 0, partsCount) - a := Accept{} - a.Params = make(map[string]string) - a.Q = 1.0 + remaining = header + var part string + for len(remaining) > 0 { + part, remaining = nextSplitElement(remaining, ",") + part = strings.TrimFunc(part, stringTrimSpaceCutset) + + a := Accept{ + Q: 1.0, + } - mrp := strings.Split(part, ";") + sp, remainingPart := nextSplitElement(part, ";") - media_range := mrp[0] - sp := strings.Split(media_range, "/") - a.Type = strings.Trim(sp[0], " ") + sp0, spRemaining := nextSplitElement(sp, "/") + a.Type = strings.TrimFunc(sp0, stringTrimSpaceCutset) switch { - case len(sp) == 1 && a.Type == "*": - a.SubType = "*" - case len(sp) == 2: - a.SubType = strings.Trim(sp[1], " ") + case len(spRemaining) == 0: + if a.Type == "*" { + a.SubType = "*" + } else { + continue + } default: - continue + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "/") + if len(spRemaining) > 0 { + continue + } + a.SubType = strings.TrimFunc(sp1, stringTrimSpaceCutset) } - if len(mrp) == 1 { + if len(remainingPart) == 0 { accept = append(accept, a) continue } - for _, param := range mrp[1:] { - sp := strings.SplitN(param, "=", 2) - if len(sp) != 2 { + a.Params = make(map[string]string) + for len(remainingPart) > 0 { + sp, remainingPart = nextSplitElement(remainingPart, ";") + sp0, spRemaining = nextSplitElement(sp, "=") + if len(spRemaining) == 0 { + continue + } + var sp1 string + sp1, spRemaining = nextSplitElement(spRemaining, "=") + if len(spRemaining) != 0 { continue } - token := strings.Trim(sp[0], " ") + token := strings.TrimFunc(sp0, stringTrimSpaceCutset) if token == "q" { - a.Q, _ = strconv.ParseFloat(sp[1], 32) + a.Q, _ = strconv.ParseFloat(sp1, 32) } else { - a.Params[token] = strings.Trim(sp[1], " ") + a.Params[token] = strings.TrimFunc(sp1, stringTrimSpaceCutset) } } accept = append(accept, a) } - slice := accept_slice(accept) - sort.Sort(slice) - - return + sort.Sort(accept) + return accept } // Negotiate the most appropriate content_type given the accept header diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go index c292db0ce07e..ed749d4c51cf 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr.go @@ -44,7 +44,7 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { } return &net.IPNet{ - IP: insertNumIntoIP(ip, num, newPrefixLen), + IP: insertNumIntoIP(ip, big.NewInt(int64(num)), newPrefixLen), Mask: net.CIDRMask(newPrefixLen, addrLen), }, nil } @@ -56,19 +56,23 @@ func Subnet(base *net.IPNet, newBits int, num int) (*net.IPNet, error) { func Host(base *net.IPNet, num int) (net.IP, error) { ip := base.IP mask := base.Mask + bigNum := big.NewInt(int64(num)) parentLen, addrLen := mask.Size() hostLen := addrLen - parentLen - maxHostNum := uint64(1< maxHostNum { + if numUint64.Cmp(maxHostNum) == 1 { return nil, fmt.Errorf("prefix of %d does not accommodate a host numbered %d", parentLen, num) } var bitlength int @@ -77,7 +81,7 @@ func Host(base *net.IPNet, num int) (net.IP, error) { } else { bitlength = 128 } - return insertNumIntoIP(ip, num, bitlength), nil + return insertNumIntoIP(ip, bigNum, bitlength), nil } // AddressRange returns the first and last addresses in the given CIDR range. @@ -129,7 +133,11 @@ func VerifyNoOverlap(subnets []*net.IPNet, CIDRBlock *net.IPNet) error { if !CIDRBlock.Contains(firstLastIP[i][0]) || !CIDRBlock.Contains(firstLastIP[i][1]) { return fmt.Errorf("%s does not fully contain %s", CIDRBlock.String(), s.String()) } - for j := i + 1; j < len(subnets); j++ { + for j := 0; j < len(subnets); j++ { + if i == j { + continue + } + first := firstLastIP[j][0] last := firstLastIP[j][1] if s.Contains(first) || s.Contains(last) { diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go index 64fdce8e1d13..98ede1f9c883 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/cidr_test.go @@ -139,6 +139,16 @@ func TestHost(t *testing.T) { Num: -5, Error: true, // 4 address (0-3) in 2 bits; cannot accomodate 5 }, + Case{ + Range: "fd9d:bc11:4020::/64", + Num: 2, + Output: "fd9d:bc11:4020::2", + }, + Case{ + Range: "fd9d:bc11:4020::/64", + Num: -2, + Output: "fd9d:bc11:4020:0:ffff:ffff:ffff:fffe", + }, } for _, testCase := range cases { @@ -397,6 +407,15 @@ func TestVerifyNetowrk(t *testing.T) { "192.168.12.128/26", }, }, + &testVerifyNetwork{ + CIDRBlock: "10.42.0.0/24", + CIDRList: []string{ + + "10.42.0.16/28", + "10.42.0.32/28", + "10.42.0.0/24", + }, + }, } for _, tc := range testCases { diff --git a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go index 861a5f623d73..e5e6a2cf91a1 100644 --- a/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go +++ b/vendor/github.com/apparentlymart/go-cidr/cidr/wrangling.go @@ -29,9 +29,8 @@ func intToIP(ipInt *big.Int, bits int) net.IP { return net.IP(ret) } -func insertNumIntoIP(ip net.IP, num int, prefixLen int) net.IP { +func insertNumIntoIP(ip net.IP, bigNum *big.Int, prefixLen int) net.IP { ipInt, totalBits := ipToInt(ip) - bigNum := big.NewInt(int64(num)) bigNum.Lsh(bigNum, uint(totalBits-prefixLen)) ipInt.Or(ipInt, bigNum) return intToIP(ipInt, totalBits) diff --git a/vendor/github.com/docker/libtrust/README.md b/vendor/github.com/docker/libtrust/README.md index 8e7db38186e6..dcffb31ae4a7 100644 --- a/vendor/github.com/docker/libtrust/README.md +++ b/vendor/github.com/docker/libtrust/README.md @@ -1,5 +1,9 @@ # libtrust +> **WARNING** this library is no longer actively developed, and will be integrated +> in the [docker/distribution][https://www.github.com/docker/distribution] +> repository in future. + Libtrust is library for managing authentication and authorization using public key cryptography. Authentication is handled using the identity attached to the public key. diff --git a/vendor/github.com/docker/libtrust/util.go b/vendor/github.com/docker/libtrust/util.go index d88176cc3d59..a5a101d3f117 100644 --- a/vendor/github.com/docker/libtrust/util.go +++ b/vendor/github.com/docker/libtrust/util.go @@ -152,7 +152,7 @@ func NewIdentityAuthTLSClientConfig(dockerUrl string, trustUnknownHosts bool, ro } // joseBase64UrlEncode encodes the given data using the standard base64 url -// encoding format but with all trailing '=' characters ommitted in accordance +// encoding format but with all trailing '=' characters omitted in accordance // with the jose specification. // http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2 func joseBase64UrlEncode(b []byte) string { diff --git a/vendor/github.com/go-openapi/loads/.drone.sec b/vendor/github.com/go-openapi/loads/.drone.sec deleted file mode 100644 index 6d3e8439931b..000000000000 --- a/vendor/github.com/go-openapi/loads/.drone.sec +++ /dev/null @@ -1 +0,0 @@ -eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.xUjixvmMMeampw0Doyr_XLvcV5ICmDgDFmlcWqgmO84O3Hwn6dqbMkwOjpKMOyEJW_98b5Om5ED59IFt2S0T_OarlrsJL8jOK5fqxSMNXy2w8LfI-e5l1URverW41ofAVK8m9wK05q2BSJM_M6PyyODaQeDBiCVK1HreMZBlXpuUDVtBMPILQoga0eSZOsTR3DYEpZIS0A0Rsa5yIhMYR5d5-JMYqbqOCB7tNJ-BM83OzYgL7Hrz0J15kqaJmhQ-GJoMJDzOemSO9KxLCOfSPp11R_G3Mfd48xYnuiRuPOTakbOCLxuYviH6uoGVIOhnMyY9qKiDKbOn4BQUi1-igA.6qjQzq9nzAxRRKV_.z79R5cMFAEuEaAh6U9ykiL8oIqzMbs_I2C-hSFRh3HYRJ4fTB-9LrcbF0uASIOq7bBn4OQzW-0QFwYOs1uaawmrByGngV5d0afiZf_LBKcmTF2vtxRi_A_nxD-EHoPmh3lKBU5WNDe_8kLjEeS89HeyyFPuv5iQbqhzdqPFohHKVigwVqVYYLjB8GWQ4t7tC4c8l5rHanaXf71W0e3op2m8bebpZL0JPGhnULVA1oU27TYeLsO112JkIYtBwZxzvAs--bBFoKeGJWVMFzrKN68UACGZ9RFw0uGJbBmVC4-jRuIc6XpqeEqw3KG-rjFzkeEor3575qW-8kiXYqpub9SFUc3SSZkxJ8hB3SrnMBOuDUSenrXNpAbltmV3KAALzN3_bMBQuihwSRIn0Hg7-Dpni8BieMe44RMDvRu6p_71aeU_KW4V7Umy_h8gpIvQFuKGdTQH2ahsyCXL0ojqjMbVMdoWpDQTQ2_Fy8Qt_p2kJ8BgDo-1Akd4a6BNU2NGqsdnrJmtVKcTqLBadf9ylCwxHdGVrtNYORALSms2T6Q1s-poQnMjIwN8lnUD8ABUBpt4uVtrYkiWPVwrwywLQeiHhR-pboe_53kWDAx4Hy4rpbKsaxanYhy_bEbAYKb3aIUA.75GD4kRBCQdcGFYP1QYdCg \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/.drone.yml b/vendor/github.com/go-openapi/loads/.drone.yml deleted file mode 100644 index 982291035389..000000000000 --- a/vendor/github.com/go-openapi/loads/.drone.yml +++ /dev/null @@ -1,39 +0,0 @@ -clone: - path: github.com/go-openapi/loads - -matrix: - GO_VERSION: - - "1.6" - -build: - integration: - image: golang:$$GO_VERSION - pull: true - environment: - GOCOVMODE: "count" - commands: - - go get -u github.com/axw/gocov/gocov - - go get -u gopkg.in/matm/v1/gocov-html - - go get -u github.com/cee-dub/go-junit-report - - go get -u github.com/stretchr/testify/assert - - go get -u gopkg.in/yaml.v2 - - go get -u github.com/go-openapi/swag - - go get -u github.com/go-openapi/analysis - - go get -u github.com/go-openapi/spec - - ./hack/build-drone.sh - -notify: - slack: - channel: bots - webhook_url: $$SLACK_URL - username: drone - -publish: - coverage: - server: https://coverage.vmware.run - token: $$GITHUB_TOKEN - # threshold: 70 - # must_increase: true - when: - matrix: - GO_VERSION: "1.6" diff --git a/vendor/github.com/go-openapi/loads/.golangci.yml b/vendor/github.com/go-openapi/loads/.golangci.yml new file mode 100644 index 000000000000..1932914e6d1a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/.golangci.yml @@ -0,0 +1,22 @@ +linters-settings: + govet: + check-shadowing: true + golint: + min-confidence: 0 + gocyclo: + min-complexity: 30 + maligned: + suggest-new: true + dupl: + threshold: 100 + goconst: + min-len: 2 + min-occurrences: 4 + +linters: + enable-all: true + disable: + - maligned + - lll + - gochecknoglobals + - gochecknoinits diff --git a/vendor/github.com/go-openapi/loads/.travis.yml b/vendor/github.com/go-openapi/loads/.travis.yml index b0d357e659f9..8a7e05d911c6 100644 --- a/vendor/github.com/go-openapi/loads/.travis.yml +++ b/vendor/github.com/go-openapi/loads/.travis.yml @@ -1,16 +1,15 @@ -language: go -go: -- 1.8 -install: -- go get -u github.com/stretchr/testify -- go get -u github.com/go-openapi/analysis -- go get -u github.com/go-openapi/spec -- go get -u github.com/go-openapi/swag -- go get -u gopkg.in/yaml.v2 -script: -- ./hack/coverage after_success: - bash <(curl -s https://codecov.io/bash) +go: +- 1.11.x +- 1.12.x +install: +- GO111MODULE=off go get -u gotest.tools/gotestsum +env: +- GO111MODULE=on +language: go notifications: slack: secure: OxkPwVp35qBTUilgWC8xykSj+sGMcj0h8IIOKD+Rflx2schZVlFfdYdyVBM+s9OqeOfvtuvnR9v1Ye2rPKAvcjWdC4LpRGUsgmItZaI6Um8Aj6+K9udCw5qrtZVfOVmRu8LieH//XznWWKdOultUuniW0MLqw5+II87Gd00RWbCGi0hk0PykHe7uK+PDA2BEbqyZ2WKKYCvfB3j+0nrFOHScXqnh0V05l2E83J4+Sgy1fsPy+1WdX58ZlNBG333ibaC1FS79XvKSmTgKRkx3+YBo97u6ZtUmJa5WZjf2OdLG3KIckGWAv6R5xgxeU31N0Ng8L332w/Edpp2O/M2bZwdnKJ8hJQikXIAQbICbr+lTDzsoNzMdEIYcHpJ5hjPbiUl3Bmd+Jnsjf5McgAZDiWIfpCKZ29tPCEkVwRsOCqkyPRMNMzHHmoja495P5jR+ODS7+J8RFg5xgcnOgpP9D4Wlhztlf5WyZMpkLxTUD+bZq2SRf50HfHFXTkfq22zPl3d1eq0yrLwh/Z/fWKkfb6SyysROL8y6s8u3dpFX1YHSg0BR6i913h4aoZw9B2BG27cafLLTwKYsp2dFo1PWl4O6u9giFJIeqwloZHLKKrwh0cBFhB7RH0I58asxkZpCH6uWjJierahmHe7iS+E6i+9oCHkOZ59hmCYNimIs3hM= +script: +- gotestsum -f short-verbose -- -race -timeout=20m -coverprofile=coverage.txt -covermode=atomic ./... diff --git a/vendor/github.com/go-openapi/loads/README.md b/vendor/github.com/go-openapi/loads/README.md index 6dbb8342e16f..071cf69ab97c 100644 --- a/vendor/github.com/go-openapi/loads/README.md +++ b/vendor/github.com/go-openapi/loads/README.md @@ -1,5 +1,7 @@ # Loads OAI specs [![Build Status](https://travis-ci.org/go-openapi/loads.svg?branch=master)](https://travis-ci.org/go-openapi/loads) [![codecov](https://codecov.io/gh/go-openapi/loads/branch/master/graph/badge.svg)](https://codecov.io/gh/go-openapi/loads) [![Slack Status](https://slackin.goswagger.io/badge.svg)](https://slackin.goswagger.io) [![license](http://img.shields.io/badge/license-Apache%20v2-orange.svg)](https://raw.githubusercontent.com/go-openapi/loads/master/LICENSE) [![GoDoc](https://godoc.org/github.com/go-openapi/loads?status.svg)](http://godoc.org/github.com/go-openapi/loads) +[![GolangCI](https://golangci.com/badges/github.com/go-openapi/loads.svg)](https://golangci.com) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-openapi/loads)](https://goreportcard.com/report/github.com/go-openapi/loads) -Loading of OAI specification documents from local or remote locations. +Loading of OAI specification documents from local or remote locations. Supports JSON and YAML documents. diff --git a/vendor/github.com/go-openapi/loads/doc.go b/vendor/github.com/go-openapi/loads/doc.go new file mode 100644 index 000000000000..3046da4cef39 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/doc.go @@ -0,0 +1,21 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package loads provides document loading methods for swagger (OAI) specifications. + +It is used by other go-openapi packages to load and run analysis on local or remote spec documents. + +*/ +package loads diff --git a/vendor/github.com/go-openapi/loads/fixtures/bugs/1816/fixture-1816.yaml b/vendor/github.com/go-openapi/loads/fixtures/bugs/1816/fixture-1816.yaml new file mode 100644 index 000000000000..df87988bb3d2 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/bugs/1816/fixture-1816.yaml @@ -0,0 +1,558 @@ +swagger: '2.0' +info: + version: "1.0.0" + title: Service Manager + +schemes: + - http + +produces: + - application/vnd.cia.v1+json + +tags: + - name: services + +basePath: /v1 + +paths: + /services: + parameters: + - name: "X-Request-Id" + in: "header" + description: Unique identifier associated with request + required: true + type: string + format: uuid + get: + tags: [services] + operationId: searchServices + summary: Search for Services + description: | + To apply filter criteria use the following approach: + + `?` + + Example: `?name=Test` + + parameters: + - name: name + in: query + description: Search records by name + required: false + type: string + minLength: 3 + - name: version + in: query + description: Search records by version + required: false + type: string + minLength: 3 + - name: location + in: query + description: Search records by location + required: false + type: string + minLength: 3 + - name: region + in: query + description: Search records by region + required: false + type: string + minLength: 3 + responses: + 200: + description: List of Services + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + type: array + items: + $ref: "#/definitions/service_view" + 400: + $ref: "#/responses/400" + 401: + $ref: '#/responses/401' + 403: + $ref: '#/responses/403' + default: + $ref: '#/responses/500' + + /services/{serviceId}: + parameters: + - name: serviceId + description: Unique identifier for Service + in: path + required: true + type: string + format: uuid + - name: "X-Request-Id" + in: "header" + description: Unique identifier associated with request + required: true + type: string + format: uuid + get: + tags: [services] + operationId: viewService + summary: View Service + description: View Service + responses: + 200: + description: Service + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/service_view" + 400: + $ref: "#/responses/400" + 401: + $ref: '#/responses/401' + 403: + $ref: '#/responses/403' + 404: + $ref: '#/responses/404' + default: + $ref: '#/responses/500' + + /services/{serviceId}/units: + parameters: + - name: serviceId + description: Unique identifier for Service + in: path + required: true + type: string + format: uuid + - name: "X-Request-Id" + in: "header" + description: Unique identifier associated with request + required: true + type: string + format: uuid + get: + tags: [services] + operationId: searchUnits + summary: Search for Service Units + description: | + To apply filter criteria use the following approach: + + `?` + + Example: `?name=Test` + + parameters: + - name: name + in: query + description: Search records by name + required: false + type: string + minLength: 3 + - name: type + in: query + description: Search records by type + required: false + type: string + minLength: 3 + responses: + 200: + description: List of Service Units + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + type: array + items: + $ref: "#/definitions/unit_view" + 400: + $ref: "#/responses/400" + 401: + $ref: '#/responses/401' + 403: + $ref: '#/responses/403' + 404: + $ref: '#/responses/404' + default: + $ref: '#/responses/500' + + + /services/{serviceId}/units/{unitId}: + parameters: + - name: serviceId + description: Unique identifier for Service + in: path + required: true + type: string + format: uuid + - name: unitId + description: Unique identifier for Service Unit + in: path + required: true + type: string + format: uuid + - name: "X-Request-Id" + in: "header" + description: Unique identifier associated with request + required: true + type: string + format: uuid + get: + tags: [services] + operationId: viewUnit + summary: View Service Unit + description: View Service Unit + responses: + 200: + description: Service Unit + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/unit_view" + 400: + $ref: "#/responses/400" + 401: + $ref: '#/responses/401' + 403: + $ref: '#/responses/403' + 404: + $ref: '#/responses/404' + default: + $ref: '#/responses/500' + +definitions: + + service_view: + title: Service View + description: An order-able Service defined in Service Catalog. + type: object + properties: + id: + title: Service Identifier + description: unique identifier for the service + type: string + format: uuid + example: 3377f6e7-dd55-4639-bc54-65646fde2adc + name: + title: Service Name + description: name for the service + type: string + example: OpenStack Service + version: + title: Service Version + description: labeled variant of the service + type: string + example: Newton + tenant_id: + title: Service Providers Tenant + description: The Tenant identifier for the Tenant providing the service + type: string + format: uuid + example: 3377f6e7-dd55-4639-bc54-65646fde2adc + location: + title: Service Location + description: location of the service + type: string + example: ALLN + region: + title: Service Region + description: region of the service + type: string + example: US-EAST + status: + title: Service Status + description: status of the service + type: string + enum: [CREATING, ACTIVE, UPDATING, DELETING, INACTIVE, DELETED, FAILED] + example: ACTIVE + tags: + title: Service Tags + description: tags to identify service properties + type: array + items: + type: string + x-omitempty: true + example: [LA, ALPHA] + metadata: + title: Service Metadata + description: metadata of the service + type: object + additionalProperties: + type: string + audit: + $ref: "#/definitions/audit" + + unit_view: + title: Service Unit View + description: A Service Unit for the Location defined in Service Catalog. + type: object + properties: + id: + title: Service Unit Identifier + description: unique identifier for the service unit + type: string + format: uuid + example: 3377f6e7-dd55-4639-bc54-65646fde2adc + name: + title: Service Unit Name + description: name for the service unit + type: string + example: Example Service Unit + type: + title: Service Unit Type + description: type of the service unit + type: string + example: VCPU + description: + title: Service Unit Description + description: summary describing the service unit + type: string + example: Long description about the service unit + cost: + title: Service Unit Cost + description: cost for the service unit + type: number + format: double + example: 0.02 + period: + title: Service Unit Cost Period + description: cost period for the service unit + type: string + enum: [FLAT, HOURLY, MONTHLY] + example: HOURLY + cost_algorithm: + title: Service Unit Consumption Cost Algorithm + description: consumption cost algorithm for the service unit + type: string + enum: [SIMPLE_SUM, DAILY_MAX_AVG] + example: SIMPLE_SUM + quotas: + title: Service Unit Quotas + description: quotas defined for the service unit + type: object + additionalProperties: + $ref: '#/definitions/quota' + activated_at: + title: Service Unit Cost Activation Timestamp + description: > + service unit activation date (inclusive) as YYYY-MM-DD with respect to the billing period + always as the start of the billing period + type: string + example: 2018-02-01 + deactivated_at: + title: Service Unit Cost Deactivation Timestamp + description: > + service unit deactivation date (exclusive) as YYYY-MM-DD with respect to the billing period + - omitted if empty or the end of the billing period + type: string + example: 2018-03-01 + audit: + $ref: "#/definitions/audit" + + quota: + title: Service Unit Quota + description: quota details with values + type: object + properties: + min: + title: Minimum quota value + type: number + format: double + minimum: 0.0 + example: 1.0 + max: + title: Maximum quota value + type: number + format: double + minimum: 0.0 + example: 1.0 + default: + title: Default quota value + type: number + format: double + minimum: 0.0 + example: 1.0 + + audit: + title: Audit + description: Audit trail details + type: object + properties: + created_at: + title: Created At + description: timestamp when the record was created + type: string + format: date-time + readOnly: true + created_by: + title: Created By + description: entity that created the record + type: string + readOnly: true + updated_at: + title: Updated At + description: timestamp when the record was updated + type: string + format: date-time + readOnly: true + updated_by: + title: Updated By + description: entity that updated the record + type: string + readOnly: true + + error: + title: Error + description: | + An error provides information about why the request failed. + type: object + required: + - code + - message + properties: + code: + title: Code + description: | + Code identifies a specific type of error. + type: string + message: + title: Message + description: | + The message provides details about the failure. + type: string + +responses: + 400: + description: | + Invalid Request Error. Check error message in the response body for details. + examples: + application/json: + code: EXAMPLE-001 + message: Missing required attribute 'name'. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 401: + description: | + Authentication Failure + examples: + application/json: + code: EXAMPLE-002 + message: Authentication failed bad password. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 402: + description: | + Insufficient Funds + examples: + application/json: + code: EXAMPLE-003 + message: Insufficient Funds to cover the cost of resource. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 403: + description: | + Authorization Failure + examples: + application/json: + code: EXAMPLE-004 + message: Not authorized to perform action. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 404: + description: | + Resource not found + examples: + application/json: + code: EXAMPLE-005 + message: Resource does not exist. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 409: + description: | + Conflict + examples: + application/json: + code: EXAMPLE-006 + message: Resource already exists. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 422: + description: | + Unprocessable Entity Failure + examples: + application/json: + code: EXAMPLE-007 + message: Resource is not able to be updated. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 500: + description: | + Internal Server Error. Check error entity in the response body for details. + examples: + application/json: + code: EXAMPLE-009 + message: Internal server error. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" + 503: + description: | + Service Unavailable + examples: + application/json: + code: EXAMPLE-010 + message: Service Unavailable. + headers: + X-Request-Id: + description: Unique identifier associated with request + type: string + format: uuid + schema: + $ref: "#/definitions/error" diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml new file mode 100644 index 000000000000..dcaf3ff80617 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/1/2/3/4/swagger.yaml @@ -0,0 +1,42 @@ +swagger: '2.0' +info: + title: Something + contact: + name: Somebody + url: https://url.com + email: email@url.com + description: Something + version: v1 +host: security.sonusnet.com +schemes: +- https +- http +basePath: /api +produces: +- application/json +- plain/text + +paths: + /whatnot: + get: + description: Get something + responses: + 200: + description: The something + schema: + $ref: '#/definitions/Something' + 500: + description: Oops + +definitions: + Something: + description: A collection of service events + type: object + properties: + page: + $ref: '../../../../shared/definitions/page.yaml#/definitions/Page' + something: + #type: array + #description: An array of something + #items: + $ref: '../../../../shared/something.yaml#/definitions/Something' diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml new file mode 100644 index 000000000000..29355d42d163 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/definitions/page.yaml @@ -0,0 +1,18 @@ +definitions: + Page: + description: A description of a paged result + type: object + properties: + page: + type: integer + description: the page that was requested + pages: + type: integer + description: the total number of pages available + total_items: + type: integer + description: the total number of items available + format: int64 + page_items: + type: integer + description: the number of items per page requested diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml new file mode 100644 index 000000000000..f5f2e391976c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/shared/something.yaml @@ -0,0 +1,44 @@ +swagger: '2.0' +info: + title: Something definitions + contact: + name: Somebody + url: https://url.com + email: email@url.com + description: Something + version: v1 +host: security.sonusnet.com +schemes: +- https +- http +basePath: /api/something/definitions +produces: +- application/json +- plain/text + +paths: + /shared: + get: + operationId: Get + tags: + - Shared + responses: + 200: + description: OK + schema: + properties: + name: + type: string + +definitions: + Something: + description: Something + type: object + properties: + p1: + type: string + description: A string + p2: + type: integer + description: An integer + diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml new file mode 100644 index 000000000000..7e6b1331ae0c --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/spec.yml @@ -0,0 +1,38 @@ +swagger: "2.0" +info: + version: 0.1.1 + title: test 1 + description: recursively following JSON references + contact: + name: Fred + +schemes: + - http + +consumes: + - application/json +produces: + - application/json + +paths: + /getAll: + get: + operationId: getAll + parameters: + - name: a + in: body + description: max number of results + required: false + schema: + $ref: '#/definitions/a' + responses: + '200': + description: Success + schema: + $ref: '#/definitions/b' + +definitions: + a: + type: string + b: + $ref: './test3-ter-model-schema.json#/definitions/b' \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json new file mode 100644 index 000000000000..eb446c76202a --- /dev/null +++ b/vendor/github.com/go-openapi/loads/fixtures/yaml/swagger/test3-ter-model-schema.json @@ -0,0 +1,14 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "./test3-model-schema.json", + "title": "test3-model-schema", + "description": "Test schema responses", + "definitions": { + "b": { + "type": "array", + "items": { + "type": "string" + } + } + } +} \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/fmts/yaml_test.go b/vendor/github.com/go-openapi/loads/fmts/yaml_test.go index e347805d31ff..30b9b4092297 100644 --- a/vendor/github.com/go-openapi/loads/fmts/yaml_test.go +++ b/vendor/github.com/go-openapi/loads/fmts/yaml_test.go @@ -27,10 +27,10 @@ import ( "github.com/stretchr/testify/assert" ) -type failJSONMarhal struct { +type failJSONMarshal struct { } -func (f failJSONMarhal) MarshalJSON() ([]byte, error) { +func (f failJSONMarshal) MarshalJSON() ([]byte, error) { return nil, errors.New("expected") } @@ -48,7 +48,7 @@ func TestLoadHTTPBytes(t *testing.T) { ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusOK) - rw.Write([]byte("the content")) + _, _ = rw.Write([]byte("the content")) })) defer ts2.Close() @@ -65,7 +65,7 @@ name: a string value 'y': some value ` var data yaml.MapSlice - yaml.Unmarshal([]byte(sd), &data) + _ = yaml.Unmarshal([]byte(sd), &data) d, err := YAMLToJSON(data) if assert.NoError(t, err) { @@ -84,7 +84,8 @@ name: a string value d, err = YAMLToJSON(data) assert.NoError(t, err) - assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value","tag":{"name":"tag name"}}`, string(d)) + assert.Equal(t, `{"1":"the int key value","name":"a string value","y":"some value","tag":{"name":"tag name"}}`, + string(d)) tag = yaml.MapSlice{{Key: true, Value: "bool tag name"}} data = append(data[:len(data)-1], yaml.MapItem{Key: "tag", Value: tag}) @@ -106,8 +107,9 @@ name: a string value assert.Error(t, err) assert.Nil(t, d) - // _, err := yamlToJSON(failJSONMarhal{}) - // assert.Error(t, err) + // test failure + _, err = YAMLToJSON(failJSONMarshal{}) + assert.Error(t, err) _, err = BytesToYAMLDoc([]byte("- name: hello\n")) assert.Error(t, err) @@ -142,7 +144,7 @@ func TestLoadStrategy(t *testing.T) { ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusNotFound) - rw.Write([]byte("\n")) + _, _ = rw.Write([]byte("\n")) })) defer ts2.Close() _, err = YAMLDoc(ts2.URL) @@ -151,7 +153,7 @@ func TestLoadStrategy(t *testing.T) { var yamlPestoreServer = func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusOK) - rw.Write([]byte(yamlPetStore)) + _, _ = rw.Write([]byte(yamlPetStore)) } func TestWithYKey(t *testing.T) { diff --git a/vendor/github.com/go-openapi/loads/go.mod b/vendor/github.com/go-openapi/loads/go.mod new file mode 100644 index 000000000000..e83c6ec30498 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.mod @@ -0,0 +1,9 @@ +module github.com/go-openapi/loads + +require ( + github.com/go-openapi/analysis v0.19.2 + github.com/go-openapi/spec v0.19.2 + github.com/go-openapi/swag v0.19.2 + github.com/stretchr/testify v1.3.0 + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/go-openapi/loads/go.sum b/vendor/github.com/go-openapi/loads/go.sum new file mode 100644 index 000000000000..b0658b2cd478 --- /dev/null +++ b/vendor/github.com/go-openapi/loads/go.sum @@ -0,0 +1,79 @@ +github.com/PuerkitoBio/purell v1.1.0/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= +github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M= +github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= +github.com/asaskevich/govalidator v0.0.0-20180720115003-f9ffefc3facf/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a h1:idn718Q4B6AGu/h5Sxe66HYVdqdGu2l9Iebqhi/AEoA= +github.com/asaskevich/govalidator v0.0.0-20190424111038-f61b66f89f4a/go.mod h1:lB+ZfQJz7igIIfQNfa7Ml4HSf2uFQQRzpGGRXenZAgY= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/globalsign/mgo v0.0.0-20180905125535-1ca0a4f7cbcb/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8 h1:DujepqpGd1hyOd7aW59XpK7Qymp8iy83xq74fLr21is= +github.com/globalsign/mgo v0.0.0-20181015135952-eeefdecb41b8/go.mod h1:xkRDCp4j0OGD1HRkm4kmhM+pmpv3AKq5SU7GMg4oO/Q= +github.com/go-openapi/analysis v0.0.0-20180825180245-b006789cd277/go.mod h1:k70tL6pCuVxPJOHXQ+wIac1FUrvNkHolPie/cLEU6hI= +github.com/go-openapi/analysis v0.19.0 h1:sYEyyO7OKQvJX0z4OyHWoGt0uLuALxB/ZJ4Jb3I6KNU= +github.com/go-openapi/analysis v0.19.0/go.mod h1:IowGgpVeD0vNm45So8nr+IcQ3pxVtpRoBWb8PVZO0ik= +github.com/go-openapi/analysis v0.19.2 h1:ophLETFestFZHk3ji7niPEL4d466QjW+0Tdg5VyDq7E= +github.com/go-openapi/analysis v0.19.2/go.mod h1:3P1osvZa9jKjb8ed2TPng3f0i/UY9snX6gxi44djMjk= +github.com/go-openapi/errors v0.17.0/go.mod h1:LcZQpmvG4wyF5j4IhA73wkLFQg+QJXOQHVjmcZxhka0= +github.com/go-openapi/errors v0.19.2 h1:a2kIyV3w+OS3S97zxUndRVD46+FhGOUBDFY7nmu4CsY= +github.com/go-openapi/errors v0.19.2/go.mod h1:qX0BLWsyaKfvhluLejVpVNwNRdXZhEbTA4kxxpKBC94= +github.com/go-openapi/jsonpointer v0.17.0/go.mod h1:cOnomiV+CVVwFLk0A/MExoFMjwdsUdVpsRhURCKh+3M= +github.com/go-openapi/jsonpointer v0.19.2 h1:A9+F4Dc/MCNB5jibxf6rRvOvR/iFgQdyNx9eIhnGqq0= +github.com/go-openapi/jsonpointer v0.19.2/go.mod h1:3akKfEdA7DF1sugOqz1dVQHBcuDBPKZGEoHC/NkiQRg= +github.com/go-openapi/jsonreference v0.17.0/go.mod h1:g4xxGn04lDIRh0GJb5QlpE3HfopLOL6uZrK/VgnsK9I= +github.com/go-openapi/jsonreference v0.19.2 h1:o20suLFB4Ri0tuzpWtyHlh7E7HnkqTNLq6aR6WVNS1w= +github.com/go-openapi/jsonreference v0.19.2/go.mod h1:jMjeRr2HHw6nAVajTXJ4eiUwohSTlpa0o73RUL1owJc= +github.com/go-openapi/loads v0.17.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/loads v0.19.0/go.mod h1:72tmFy5wsWx89uEVddd0RjRWPZm92WRLhf7AC+0+OOU= +github.com/go-openapi/spec v0.17.0/go.mod h1:XkF/MOi14NmjsfZ8VtAKf8pIlbZzyoTvZsdfssdxcBI= +github.com/go-openapi/spec v0.19.2 h1:SStNd1jRcYtfKCN7R0laGNs80WYYvn5CbBjM2sOmCrE= +github.com/go-openapi/spec v0.19.2/go.mod h1:sCxk3jxKgioEJikev4fgkNmwS+3kuYdJtcsZsD5zxMY= +github.com/go-openapi/strfmt v0.17.0/go.mod h1:P82hnJI0CXkErkXi8IKjPbNBM6lV6+5pLP5l494TcyU= +github.com/go-openapi/strfmt v0.19.0 h1:0Dn9qy1G9+UJfRU7TR8bmdGxb4uifB7HNrJjOnV0yPk= +github.com/go-openapi/strfmt v0.19.0/go.mod h1:+uW+93UVvGGq2qGaZxdDeJqSAqBqBdl+ZPMF/cC8nDY= +github.com/go-openapi/swag v0.17.0/go.mod h1:AByQ+nYG6gQg71GINrmuDXCPWdL640yX49/kXLo40Tg= +github.com/go-openapi/swag v0.19.2 h1:jvO6bCMBEilGwMfHhrd61zIID4oIFdwb76V17SM88dE= +github.com/go-openapi/swag v0.19.2/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= +github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/pty v1.1.5/go.mod h1:9r2w37qlBe7rQ6e1fg1S/9xpWHSnaqNdHD3WcMdbPDA= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/mailru/easyjson v0.0.0-20180823135443-60711f1a8329/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63 h1:nTT4s92Dgz2HlrB2NaMgvlfqHH39OgMhA7z3PK7PGD4= +github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190611184440-5c40567a22f8/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/net v0.0.0-20181005035420-146acd28ed58/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190614205625-5aca471b1d59/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/go-openapi/loads/hack/build-drone.sh b/vendor/github.com/go-openapi/loads/hack/build-drone.sh deleted file mode 100755 index 52b1180cc9c5..000000000000 --- a/vendor/github.com/go-openapi/loads/hack/build-drone.sh +++ /dev/null @@ -1,25 +0,0 @@ -#!/bin/bash -set -e -o pipefail - - mkdir -p /drone/{testresults,coverage,dist} - go test -race -timeout 20m -v ./... | go-junit-report -dir /drone/testresults - -# Run test coverage on each subdirectories and merge the coverage profile. -echo "mode: ${GOCOVMODE-count}" > profile.cov - -# Standard go tooling behavior is to ignore dirs with leading underscores -# skip generator for race detection and coverage -for dir in $(go list ./...) -do - pth="$GOPATH/src/$dir" - go test -covermode=${GOCOVMODE-count} -coverprofile=${pth}/profile.out $dir - if [ -f $pth/profile.out ] - then - cat $pth/profile.out | tail -n +2 >> profile.cov - # rm $pth/profile.out - fi -done - -go tool cover -func profile.cov -gocov convert profile.cov | gocov report -gocov convert profile.cov | gocov-html > /drone/coverage/coverage-${CI_BUILD_NUM-"0"}.html \ No newline at end of file diff --git a/vendor/github.com/go-openapi/loads/hack/coverage b/vendor/github.com/go-openapi/loads/hack/coverage deleted file mode 100755 index b8e6dbd0c556..000000000000 --- a/vendor/github.com/go-openapi/loads/hack/coverage +++ /dev/null @@ -1,20 +0,0 @@ -#!/bin/bash -set -e -o pipefail - -# Run test coverage on each subdirectories and merge the coverage profile. -echo "mode: ${GOCOVMODE-atomic}" > coverage.txt - -# Standard go tooling behavior is to ignore dirs with leading underscores -# skip generator for race detection and coverage -for dir in $(go list ./...) -do - pth="$GOPATH/src/$dir" - go test -race -timeout 20m -covermode=${GOCOVMODE-atomic} -coverprofile=${pth}/profile.out $dir - if [ -f $pth/profile.out ] - then - cat $pth/profile.out | tail -n +2 >> coverage.txt - rm $pth/profile.out - fi -done - -go tool cover -func coverage.txt diff --git a/vendor/github.com/go-openapi/loads/json_test.go b/vendor/github.com/go-openapi/loads/json_test.go index 8b60eb19f4dc..6f8faf492ff7 100644 --- a/vendor/github.com/go-openapi/loads/json_test.go +++ b/vendor/github.com/go-openapi/loads/json_test.go @@ -32,7 +32,7 @@ func TestLoadJSON(t *testing.T) { ts2 := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusNotFound) - rw.Write([]byte("{}")) + _, _ = rw.Write([]byte("{}")) })) defer ts2.Close() _, err = JSONSpec(ts2.URL) @@ -41,7 +41,7 @@ func TestLoadJSON(t *testing.T) { var jsonPestoreServer = func(rw http.ResponseWriter, r *http.Request) { rw.WriteHeader(http.StatusOK) - rw.Write([]byte(petstoreJSON)) + _, _ = rw.Write([]byte(petstoreJSON)) } const petstoreJSON = `{ diff --git a/vendor/github.com/go-openapi/loads/spec.go b/vendor/github.com/go-openapi/loads/spec.go index 6d967389b11a..e4b4a3cf7638 100644 --- a/vendor/github.com/go-openapi/loads/spec.go +++ b/vendor/github.com/go-openapi/loads/spec.go @@ -16,12 +16,11 @@ package loads import ( "bytes" + "encoding/gob" "encoding/json" "fmt" "net/url" - "path/filepath" - "github.com/go-openapi/analysis" "github.com/go-openapi/spec" "github.com/go-openapi/swag" @@ -52,6 +51,10 @@ func init() { loaders = defaultLoader spec.PathLoader = loaders.Fn AddLoader(swag.YAMLMatcher, swag.YAMLDoc) + + gob.Register(map[string]interface{}{}) + gob.Register([]interface{}{}) + //gob.Register(spec.Refable{}) } // AddLoader for a document @@ -78,7 +81,7 @@ func JSONSpec(path string) (*Document, error) { return nil, err } // convert to json - return Analyzed(json.RawMessage(data), "") + return Analyzed(data, "") } // Document represents a swagger spec document @@ -92,6 +95,22 @@ type Document struct { raw json.RawMessage } +// Embedded returns a Document based on embedded specs. No analysis is required +func Embedded(orig, flat json.RawMessage) (*Document, error) { + var origSpec, flatSpec spec.Swagger + if err := json.Unmarshal(orig, &origSpec); err != nil { + return nil, err + } + if err := json.Unmarshal(flat, &flatSpec); err != nil { + return nil, err + } + return &Document{ + raw: orig, + origSpec: &origSpec, + spec: &flatSpec, + }, nil +} + // Spec loads a new spec document func Spec(path string) (*Document, error) { specURL, err := url.Parse(path) @@ -106,9 +125,9 @@ func Spec(path string) (*Document, error) { lastErr = err2 continue } - doc, err := Analyzed(b, "") - if err != nil { - return nil, err + doc, err3 := Analyzed(b, "") + if err3 != nil { + return nil, err3 } if doc != nil { doc.specFilePath = path @@ -162,8 +181,8 @@ func Analyzed(data json.RawMessage, version string) (*Document, error) { return nil, err } - origsqspec := new(spec.Swagger) - if err := json.Unmarshal(raw, origsqspec); err != nil { + origsqspec, err := cloneSpec(swspec) + if err != nil { return nil, err } @@ -186,10 +205,10 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { var expandOptions *spec.ExpandOptions if len(options) > 0 { - expandOptions = options[1] + expandOptions = options[0] } else { expandOptions = &spec.ExpandOptions{ - RelativeBase: filepath.Dir(d.specFilePath), + RelativeBase: d.specFilePath, } } @@ -198,11 +217,12 @@ func (d *Document) Expanded(options ...*spec.ExpandOptions) (*Document, error) { } dd := &Document{ - Analyzer: analysis.New(swspec), - spec: swspec, - schema: spec.MustLoadSwagger20Schema(), - raw: d.raw, - origSpec: d.origSpec, + Analyzer: analysis.New(swspec), + spec: swspec, + specFilePath: d.specFilePath, + schema: spec.MustLoadSwagger20Schema(), + raw: d.raw, + origSpec: d.origSpec, } return dd, nil } @@ -237,6 +257,7 @@ func (d *Document) Raw() json.RawMessage { return d.raw } +// OrigSpec yields the original spec func (d *Document) OrigSpec() *spec.Swagger { return d.origSpec } @@ -262,3 +283,16 @@ func (d *Document) Pristine() *Document { func (d *Document) SpecFilePath() string { return d.specFilePath } + +func cloneSpec(src *spec.Swagger) (*spec.Swagger, error) { + var b bytes.Buffer + if err := gob.NewEncoder(&b).Encode(src); err != nil { + return nil, err + } + + var dst spec.Swagger + if err := gob.NewDecoder(&b).Decode(&dst); err != nil { + return nil, err + } + return &dst, nil +} diff --git a/vendor/github.com/go-openapi/loads/spec_test.go b/vendor/github.com/go-openapi/loads/spec_test.go index 5c5e7ca4e22c..2f5b3586c021 100644 --- a/vendor/github.com/go-openapi/loads/spec_test.go +++ b/vendor/github.com/go-openapi/loads/spec_test.go @@ -1,7 +1,24 @@ +// Copyright 2015 go-swagger maintainers +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + package loads import ( "encoding/json" + "regexp" + "strconv" + "strings" "testing" "github.com/stretchr/testify/assert" @@ -32,13 +49,362 @@ func TestLoadsYAMLContent(t *testing.T) { } } +// for issue 11 +func TestRegressionExpand(t *testing.T) { + swaggerFile := "fixtures/yaml/swagger/1/2/3/4/swagger.yaml" + document, err := Spec(swaggerFile) + assert.NoError(t, err) + assert.NotNil(t, document) + d, err := document.Expanded() + assert.NoError(t, err) + assert.NotNil(t, d) + b, _ := d.Spec().MarshalJSON() + assert.JSONEq(t, expectedExpanded, string(b)) +} + +func TestCascadingRefExpand(t *testing.T) { + swaggerFile := "fixtures/yaml/swagger/spec.yml" + document, err := Spec(swaggerFile) + assert.NoError(t, err) + assert.NotNil(t, document) + d, err := document.Expanded() + assert.NoError(t, err) + assert.NotNil(t, d) + b, _ := d.Spec().MarshalJSON() + assert.JSONEq(t, cascadeRefExpanded, string(b)) +} + func TestFailsInvalidJSON(t *testing.T) { _, err := Analyzed(json.RawMessage([]byte("{]")), "") assert.Error(t, err) } -var YAMLSpec = `swagger: '2.0' +// issue go-swagger/go-swagger#1816 (regression when cloning original spec) +func TestIssue1846(t *testing.T) { + swaggerFile := "fixtures/bugs/1816/fixture-1816.yaml" + document, err := Spec(swaggerFile) + assert.NoError(t, err) + assert.NotNil(t, document) + + sp, err := cloneSpec(document.Spec()) + assert.NoError(t, err) + jazon, _ := json.MarshalIndent(sp, "", " ") + //t.Logf("%s", string(jazon)) + rex := regexp.MustCompile(`"\$ref":\s*"(.+)"`) + m := rex.FindAllStringSubmatch(string(jazon), -1) + if assert.NotNil(t, m) { + for _, matched := range m { + subMatch := matched[1] + if !assert.True(t, strings.HasPrefix(subMatch, "#/definitions") || strings.HasPrefix(subMatch, "#/responses"), + "expected $ref to point either to definitions or responses section, got: %s", matched[0]) { + t.FailNow() + } + } + } +} + +func BenchmarkAnalyzed(b *testing.B) { + d := []byte(`{ + "swagger": "2.0", + "info": { + "version": "1.0.0", + "title": "Swagger Petstore", + "contact": { + "name": "Wordnik API Team", + "url": "http://developer.wordnik.com" + }, + "license": { + "name": "Creative Commons 4.0 International", + "url": "http://creativecommons.org/licenses/by/4.0/" + } + }, + "host": "petstore.swagger.wordnik.com", + "basePath": "/api", + "schemes": [ + "http" + ], + "paths": { + "/pets": { + "get": { + "security": [ + { + "basic": [] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "getAllPets", + "parameters": [ + { + "name": "status", + "in": "query", + "description": "The status to filter by", + "type": "string" + }, + { + "name": "limit", + "in": "query", + "description": "The maximum number of results to return", + "type": "integer", + "format": "int64" + } + ], + "summary": "Finds all pets in the system", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "type": "array", + "items": { + "$ref": "#/definitions/Pet" + } + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "post": { + "security": [ + { + "basic": [] + } + ], + "tags": [ "Pet Operations" ], + "operationId": "createPet", + "summary": "Creates a new pet", + "consumes": ["application/x-yaml"], + "produces": ["application/x-yaml"], + "parameters": [ + { + "name": "pet", + "in": "body", + "description": "The Pet to create", + "required": true, + "schema": { + "$ref": "#/definitions/newPet" + } + } + ], + "responses": { + "200": { + "description": "Created Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + } + }`) + + for i := 0; i < 1000; i++ { + d = append(d, []byte(`, + "/pets/`)...) + d = strconv.AppendInt(d, int64(i), 10) + d = append(d, []byte(`": { + "delete": { + "security": [ + { + "apiKey": [] + } + ], + "description": "Deletes the Pet by id", + "operationId": "deletePet", + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet to delete", + "required": true, + "type": "integer", + "format": "int64" + } + ], + "responses": { + "204": { + "description": "pet deleted" + }, + "default": { + "description": "unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "get": { + "tags": [ "Pet Operations" ], + "operationId": "getPetById", + "summary": "Finds the pet by id", + "responses": { + "200": { + "description": "Pet response", + "schema": { + "$ref": "#/definitions/Pet" + } + }, + "default": { + "description": "Unexpected error", + "schema": { + "$ref": "#/definitions/Error" + } + } + } + }, + "parameters": [ + { + "name": "id", + "in": "path", + "description": "ID of pet", + "required": true, + "type": "integer", + "format": "int64" + } + ] + }`)...) + } + + d = append(d, []byte(` + }, + "definitions": { + "Category": { + "id": "Category", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Pet": { + "id": "Pet", + "properties": { + "category": { + "$ref": "#/definitions/Category" + }, + "id": { + "description": "unique identifier for the pet", + "format": "int64", + "maximum": 100.0, + "minimum": 0.0, + "type": "integer" + }, + "name": { + "type": "string" + }, + "photoUrls": { + "items": { + "type": "string" + }, + "type": "array" + }, + "status": { + "description": "pet status in the store", + "enum": [ + "available", + "pending", + "sold" + ], + "type": "string" + }, + "tags": { + "items": { + "$ref": "#/definitions/Tag" + }, + "type": "array" + } + }, + "required": [ + "id", + "name" + ] + }, + "newPet": { + "anyOf": [ + { + "$ref": "#/definitions/Pet" + }, + { + "required": [ + "name" + ] + } + ] + }, + "Tag": { + "id": "Tag", + "properties": { + "id": { + "format": "int64", + "type": "integer" + }, + "name": { + "type": "string" + } + } + }, + "Error": { + "required": [ + "code", + "message" + ], + "properties": { + "code": { + "type": "integer", + "format": "int32" + }, + "message": { + "type": "string" + } + } + } + }, + "consumes": [ + "application/json", + "application/xml" + ], + "produces": [ + "application/json", + "application/xml", + "text/plain", + "text/html" + ], + "securityDefinitions": { + "basic": { + "type": "basic" + }, + "apiKey": { + "type": "apiKey", + "in": "header", + "name": "X-API-KEY" + } + } +} +`)...) + rm := json.RawMessage(d) + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := Analyzed(rm, "") + if err != nil { + b.Fatal(err) + } + } +} + +const YAMLSpec = `swagger: '2.0' info: version: "1.0.0" @@ -499,3 +865,194 @@ const PetStore20 = `{ } } ` + +const expectedExpanded = ` +{ + "produces":[ + "application/json", + "plain/text" + ], + "schemes":[ + "https", + "http" + ], + "swagger":"2.0", + "info":{ + "description":"Something", + "title":"Something", + "contact":{ + "name":"Somebody", + "url":"https://url.com", + "email":"email@url.com" + }, + "version":"v1" + }, + "host":"security.sonusnet.com", + "basePath":"/api", + "paths":{ + "/whatnot":{ + "get":{ + "description":"Get something", + "responses":{ + "200":{ + "description":"The something", + "schema":{ + "description":"A collection of service events", + "type":"object", + "properties":{ + "page":{ + "description":"A description of a paged result", + "type":"object", + "properties":{ + "page":{ + "description":"the page that was requested", + "type":"integer" + }, + "page_items":{ + "description":"the number of items per page requested", + "type":"integer" + }, + "pages":{ + "description":"the total number of pages available", + "type":"integer" + }, + "total_items":{ + "description":"the total number of items available", + "type":"integer", + "format":"int64" + } + } + }, + "something":{ + "description":"Something", + "type":"object", + "properties":{ + "p1":{ + "description":"A string", + "type":"string" + }, + "p2":{ + "description":"An integer", + "type":"integer" + } + } + } + } + } + }, + "500":{ + "description":"Oops" + } + } + } + } + }, + "definitions":{ + "Something":{ + "description":"A collection of service events", + "type":"object", + "properties":{ + "page":{ + "description":"A description of a paged result", + "type":"object", + "properties":{ + "page":{ + "description":"the page that was requested", + "type":"integer" + }, + "page_items":{ + "description":"the number of items per page requested", + "type":"integer" + }, + "pages":{ + "description":"the total number of pages available", + "type":"integer" + }, + "total_items":{ + "description":"the total number of items available", + "type":"integer", + "format":"int64" + } + } + }, + "something":{ + "description":"Something", + "type":"object", + "properties":{ + "p1":{ + "description":"A string", + "type":"string" + }, + "p2":{ + "description":"An integer", + "type":"integer" + } + } + } + } + } + } +} +` + +const cascadeRefExpanded = ` +{ + "swagger": "2.0", + "consumes":[ + "application/json" + ], + "produces":[ + "application/json" + ], + "schemes":[ + "http" + ], + "info":{ + "description":"recursively following JSON references", + "title":"test 1", + "contact":{ + "name":"Fred" + }, + "version":"0.1.1" + }, + "paths":{ + "/getAll":{ + "get":{ + "operationId":"getAll", + "parameters":[ + { + "description":"max number of results", + "name":"a", + "in":"body", + "schema":{ + "type":"string" + } + } + ], + "responses":{ + "200":{ + "description":"Success", + "schema":{ + "type":"array", + "items":{ + "type":"string" + } + } + } + } + } + } + }, + "definitions":{ + "a":{ + "type":"string" + }, + "b":{ + "type":"array", + "items":{ + "type":"string" + } + } + } +} +` diff --git a/vendor/github.com/gonum/blas/README.md b/vendor/github.com/gonum/blas/README.md index 34ef3c53da3a..86e8971d75cc 100644 --- a/vendor/github.com/gonum/blas/README.md +++ b/vendor/github.com/gonum/blas/README.md @@ -88,9 +88,9 @@ Currently blas/cblas64 and blas/cblas128 require blas/cgo. ## Issues -If you find any bugs, feel free to file an issue on the github issue tracker. -Discussions on API changes, added features, code review, or similar requests -are preferred on the [gonum-dev Google Group](https://groups.google.com/forum/#!forum/gonum-dev). +If you find any bugs, feel free to file an issue on the github [issue tracker for gonum/gonum](https://github.com/gonum/gonum/issues) or [gonum/netlib for the CGO implementation](https://github.com/gonum/netlib/issues) if the bug exists in that reposity; no code changes will be made to this repository. Other discussions should be taken to the gonum-dev Google Group. + +https://groups.google.com/forum/#!forum/gonum-dev ## License diff --git a/vendor/github.com/gonum/blas/blas.go b/vendor/github.com/gonum/blas/blas.go index 6c14aac42e87..0ea4960da804 100644 --- a/vendor/github.com/gonum/blas/blas.go +++ b/vendor/github.com/gonum/blas/blas.go @@ -3,6 +3,9 @@ // license that can be found in the LICENSE file. /* +This repository is no longer maintained. +Development has moved to https://github.com/gonum/gonum. + Package blas provides interfaces for the BLAS linear algebra standard. All methods must perform appropriate parameter checking and panic if diff --git a/vendor/github.com/gonum/blas/blas32/blas32.go b/vendor/github.com/gonum/blas/blas32/blas32.go index 82971ea13436..746a2fa8fc02 100644 --- a/vendor/github.com/gonum/blas/blas32/blas32.go +++ b/vendor/github.com/gonum/blas/blas32/blas32.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package blas32 provides a simple interface to the float32 BLAS API. package blas32 diff --git a/vendor/github.com/gonum/blas/blas64/blas64.go b/vendor/github.com/gonum/blas/blas64/blas64.go index 088c30e97d0f..40841f5eca05 100644 --- a/vendor/github.com/gonum/blas/blas64/blas64.go +++ b/vendor/github.com/gonum/blas/blas64/blas64.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package blas64 provides a simple interface to the float64 BLAS API. package blas64 diff --git a/vendor/github.com/gonum/blas/cblas128/cblas128.go b/vendor/github.com/gonum/blas/cblas128/cblas128.go index 60be9dd76c21..ca5d4cbbb19a 100644 --- a/vendor/github.com/gonum/blas/cblas128/cblas128.go +++ b/vendor/github.com/gonum/blas/cblas128/cblas128.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package cblas128 provides a simple interface to the complex128 BLAS API. package cblas128 diff --git a/vendor/github.com/gonum/blas/cblas64/cblas64.go b/vendor/github.com/gonum/blas/cblas64/cblas64.go index 08c7738c1670..695a0ddc39cc 100644 --- a/vendor/github.com/gonum/blas/cblas64/cblas64.go +++ b/vendor/github.com/gonum/blas/cblas64/cblas64.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package cblas64 provides a simple interface to the complex64 BLAS API. package cblas64 diff --git a/vendor/github.com/gonum/blas/cgo/doc.go b/vendor/github.com/gonum/blas/cgo/doc.go index 4e3efcf24006..1cfb30d2ef5f 100644 --- a/vendor/github.com/gonum/blas/cgo/doc.go +++ b/vendor/github.com/gonum/blas/cgo/doc.go @@ -7,6 +7,9 @@ // Ensure changes made to blas/cgo are reflected in blas/native where relevant. /* +This repository is no longer maintained. +Development has moved to https://github.com/gonum/gonum. + Package cgo provides bindings to a C BLAS library. This wrapper interface panics when the input arguments are invalid as per the standard, for example if a vector increment is zero. Please note that the treatment of NaN values diff --git a/vendor/github.com/gonum/blas/native/doc.go b/vendor/github.com/gonum/blas/native/doc.go index cb63fe776e27..a4f971b5f938 100644 --- a/vendor/github.com/gonum/blas/native/doc.go +++ b/vendor/github.com/gonum/blas/native/doc.go @@ -5,6 +5,9 @@ // Ensure changes made to blas/native are reflected in blas/cgo where relevant. /* +This repository is no longer maintained. +Development has moved to https://github.com/gonum/gonum. + Package native is a Go implementation of the BLAS API. This implementation panics when the input arguments are invalid as per the standard, for example if a vector increment is zero. Please note that the treatment of NaN values diff --git a/vendor/github.com/gonum/blas/native/internal/math32/math.go b/vendor/github.com/gonum/blas/native/internal/math32/math.go index b33401b98268..09c38d72f717 100644 --- a/vendor/github.com/gonum/blas/native/internal/math32/math.go +++ b/vendor/github.com/gonum/blas/native/internal/math32/math.go @@ -6,6 +6,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package math32 provides float32 versions of standard library math package // routines used by gonum/blas/native. package math32 diff --git a/vendor/github.com/gonum/blas/testblas/level1double.go b/vendor/github.com/gonum/blas/testblas/level1double.go index 9e16059108c1..b13d2dfc4c27 100644 --- a/vendor/github.com/gonum/blas/testblas/level1double.go +++ b/vendor/github.com/gonum/blas/testblas/level1double.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package testblas provides tests for blas implementations. package testblas diff --git a/vendor/github.com/gonum/floats/README.md b/vendor/github.com/gonum/floats/README.md index c7a8eb28ad29..a01a4b15ff26 100644 --- a/vendor/github.com/gonum/floats/README.md +++ b/vendor/github.com/gonum/floats/README.md @@ -6,7 +6,7 @@ package floats provides a set of helper routines for dealing with slices of floa ## Issues -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the gonum-dev Google Group. +If you find any bugs, feel free to file an issue on the github [issue tracker for gonum/gonum](https://github.com/gonum/gonum/issues) if the bug exists in that reposity; no code changes will be made to this repository. Other discussions should be taken to the gonum-dev Google Group. https://groups.google.com/forum/#!forum/gonum-dev diff --git a/vendor/github.com/gonum/floats/floats.go b/vendor/github.com/gonum/floats/floats.go index e1807d2a1bd7..601eb42b6eb3 100644 --- a/vendor/github.com/gonum/floats/floats.go +++ b/vendor/github.com/gonum/floats/floats.go @@ -2,6 +2,9 @@ // Use of this code is governed by a BSD-style // license that can be found in the LICENSE file +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package floats provides a set of helper routines for dealing with slices // of float64. The functions avoid allocations to allow for use within tight // loops without garbage collection overhead. diff --git a/vendor/github.com/gonum/internal/README.md b/vendor/github.com/gonum/internal/README.md index 84ec056b26ce..c89e61207731 100644 --- a/vendor/github.com/gonum/internal/README.md +++ b/vendor/github.com/gonum/internal/README.md @@ -6,7 +6,7 @@ This is the set of internal packages for the Gonum project. ## Issues -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the gonum-dev Google Group. +If you find any bugs, feel free to file an issue on the github [issue tracker for gonum/gonum](https://github.com/gonum/gonum/issues) if the bug exists in that reposity; no code changes will be made to this repository. Other dicussions should be taken to the gonum-dev Google Group. https://groups.google.com/forum/#!forum/gonum-dev diff --git a/vendor/github.com/gonum/internal/asm/c128/doc.go b/vendor/github.com/gonum/internal/asm/c128/doc.go index 1a03f2b85e18..4987830a3253 100644 --- a/vendor/github.com/gonum/internal/asm/c128/doc.go +++ b/vendor/github.com/gonum/internal/asm/c128/doc.go @@ -2,5 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package c128 provides complex128 vector primitives. package c128 diff --git a/vendor/github.com/gonum/internal/asm/c64/doc.go b/vendor/github.com/gonum/internal/asm/c64/doc.go index 9e524a89b9f4..47f5c7a6f8b5 100644 --- a/vendor/github.com/gonum/internal/asm/c64/doc.go +++ b/vendor/github.com/gonum/internal/asm/c64/doc.go @@ -2,5 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package c64 provides complex64 vector primitives. package c64 diff --git a/vendor/github.com/gonum/internal/asm/f32/doc.go b/vendor/github.com/gonum/internal/asm/f32/doc.go index e7e1d9f89eb1..142f7dc65c28 100644 --- a/vendor/github.com/gonum/internal/asm/f32/doc.go +++ b/vendor/github.com/gonum/internal/asm/f32/doc.go @@ -2,5 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package f32 provides float32 vector primitives. package f32 diff --git a/vendor/github.com/gonum/internal/asm/f64/doc.go b/vendor/github.com/gonum/internal/asm/f64/doc.go index 7f1cc87c10a5..5a006cf5cff1 100644 --- a/vendor/github.com/gonum/internal/asm/f64/doc.go +++ b/vendor/github.com/gonum/internal/asm/f64/doc.go @@ -2,5 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package f64 provides float64 vector primitives. package f64 diff --git a/vendor/github.com/gonum/internal/binding/binding.go b/vendor/github.com/gonum/internal/binding/binding.go index 1b02a4bb3a9c..3b573948c5c8 100644 --- a/vendor/github.com/gonum/internal/binding/binding.go +++ b/vendor/github.com/gonum/internal/binding/binding.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package binding provides helpers for building autogenerated cgo bindings. package binding diff --git a/vendor/github.com/gonum/lapack/README.md b/vendor/github.com/gonum/lapack/README.md index ffc082412add..df63c74f73f4 100644 --- a/vendor/github.com/gonum/lapack/README.md +++ b/vendor/github.com/gonum/lapack/README.md @@ -51,7 +51,7 @@ The recommended (free) option for good performance on both linux and darwin is O ## Issues -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the gonum-dev Google Group. +If you find any bugs, feel free to file an issue on the github [issue tracker for gonum/gonum](https://github.com/gonum/gonum/issues) or [gonum/netlib for the CGO implementation](https://github.com/gonum/netlib/issues) if the bug exists in that reposity; no code changes will be made to this repository. Other discussions should be taken to the gonum-dev Google Group. https://groups.google.com/forum/#!forum/gonum-dev diff --git a/vendor/github.com/gonum/lapack/cgo/lapack.go b/vendor/github.com/gonum/lapack/cgo/lapack.go index e99455bc60f7..f9f437aabacd 100644 --- a/vendor/github.com/gonum/lapack/cgo/lapack.go +++ b/vendor/github.com/gonum/lapack/cgo/lapack.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/netlib. +// // Package cgo provides an interface to bindings for a C LAPACK library. package cgo diff --git a/vendor/github.com/gonum/lapack/cgo/lapacke/generate_lapacke.go b/vendor/github.com/gonum/lapack/cgo/lapacke/generate_lapacke.go index 9d82f86a4d6b..7c99e90e3ab2 100644 --- a/vendor/github.com/gonum/lapack/cgo/lapacke/generate_lapacke.go +++ b/vendor/github.com/gonum/lapack/cgo/lapacke/generate_lapacke.go @@ -519,6 +519,9 @@ const handwritten = `// Code generated by "go generate github.com/gonum/lapack/c // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/netlib. +// // Package lapacke provides bindings to the LAPACKE C Interface to LAPACK. // // Links are provided to the NETLIB fortran implementation/dependencies for each function. diff --git a/vendor/github.com/gonum/lapack/cgo/lapacke/lapacke.go b/vendor/github.com/gonum/lapack/cgo/lapacke/lapacke.go index 31d6669d3347..dd40ab01f80f 100644 --- a/vendor/github.com/gonum/lapack/cgo/lapacke/lapacke.go +++ b/vendor/github.com/gonum/lapack/cgo/lapacke/lapacke.go @@ -4,6 +4,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/netlib. +// // Package lapacke provides bindings to the LAPACKE C Interface to LAPACK. // // Links are provided to the NETLIB fortran implementation/dependencies for each function. diff --git a/vendor/github.com/gonum/lapack/lapack.go b/vendor/github.com/gonum/lapack/lapack.go index 46b493df9e6d..7eb392271c03 100644 --- a/vendor/github.com/gonum/lapack/lapack.go +++ b/vendor/github.com/gonum/lapack/lapack.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. package lapack import "github.com/gonum/blas" diff --git a/vendor/github.com/gonum/lapack/lapack64/lapack64.go b/vendor/github.com/gonum/lapack/lapack64/lapack64.go index 619734fb1eb1..323ec94641af 100644 --- a/vendor/github.com/gonum/lapack/lapack64/lapack64.go +++ b/vendor/github.com/gonum/lapack/lapack64/lapack64.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package lapack64 provides a set of convenient wrapper functions for LAPACK // calls, as specified in the netlib standard (www.netlib.org). // diff --git a/vendor/github.com/gonum/lapack/native/doc.go b/vendor/github.com/gonum/lapack/native/doc.go index d622dc504a9b..6f28770b0632 100644 --- a/vendor/github.com/gonum/lapack/native/doc.go +++ b/vendor/github.com/gonum/lapack/native/doc.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package native is a pure-go implementation of the LAPACK API. The LAPACK API defines // a set of algorithms for advanced matrix operations. // diff --git a/vendor/github.com/gonum/lapack/testlapack/general.go b/vendor/github.com/gonum/lapack/testlapack/general.go index e9bf80b5a227..bcc0dca996f6 100644 --- a/vendor/github.com/gonum/lapack/testlapack/general.go +++ b/vendor/github.com/gonum/lapack/testlapack/general.go @@ -2,6 +2,8 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. package testlapack import ( diff --git a/vendor/github.com/gonum/matrix/README.md b/vendor/github.com/gonum/matrix/README.md index 980b7498b107..0c4b0c6425dd 100644 --- a/vendor/github.com/gonum/matrix/README.md +++ b/vendor/github.com/gonum/matrix/README.md @@ -6,10 +6,10 @@ This is a matrix package for the Go language. ## Issues -If you find any bugs, feel free to file an issue on the github issue tracker. Discussions on API changes, added features, code review, or similar requests are preferred on the gonum-dev Google Group. +If you find any bugs, feel free to file an issue on the github [issue tracker for gonum/gonum](https://github.com/gonum/gonum/issues) if the bug exists in that reposity; no code changes will be made to this repository. Other discussions should be taken to the gonum-dev Google Group. https://groups.google.com/forum/#!forum/gonum-dev ## License -Please see github.com/gonum/license for general license information, contributors, authors, etc on the Gonum suite of packages. +Please see github.com/gonum/gonum for general license information, contributors, authors, etc on the Gonum suite of packages. diff --git a/vendor/github.com/gonum/matrix/cmat128/doc.go b/vendor/github.com/gonum/matrix/cmat128/doc.go index c47b8ffd19c8..d14bfc03556f 100644 --- a/vendor/github.com/gonum/matrix/cmat128/doc.go +++ b/vendor/github.com/gonum/matrix/cmat128/doc.go @@ -6,6 +6,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package cmat128 provides implementations of complex128 matrix structures and // linear algebra operations on them. // diff --git a/vendor/github.com/gonum/matrix/conv/conv.go b/vendor/github.com/gonum/matrix/conv/conv.go index f3c85bf26c50..d7d9eb31b1f0 100644 --- a/vendor/github.com/gonum/matrix/conv/conv.go +++ b/vendor/github.com/gonum/matrix/conv/conv.go @@ -2,6 +2,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package conv provides matrix type interconversion utilities. package conv diff --git a/vendor/github.com/gonum/matrix/doc.go b/vendor/github.com/gonum/matrix/doc.go index c94f8df1e49f..65c296509869 100644 --- a/vendor/github.com/gonum/matrix/doc.go +++ b/vendor/github.com/gonum/matrix/doc.go @@ -6,6 +6,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package matrix provides common error handling mechanisms for matrix operations // in mat64 and cmat128. // diff --git a/vendor/github.com/gonum/matrix/gendoc.go b/vendor/github.com/gonum/matrix/gendoc.go index f82b6b20c10d..b0cd2dde8ed8 100644 --- a/vendor/github.com/gonum/matrix/gendoc.go +++ b/vendor/github.com/gonum/matrix/gendoc.go @@ -25,6 +25,9 @@ var docs = template.Must(template.New("docs").Funcs(funcs).Parse(`{{define "comm // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package {{.Name}} provides {{.Provides}} // // Overview diff --git a/vendor/github.com/gonum/matrix/mat64/doc.go b/vendor/github.com/gonum/matrix/mat64/doc.go index 61335708a980..323a8542c72c 100644 --- a/vendor/github.com/gonum/matrix/mat64/doc.go +++ b/vendor/github.com/gonum/matrix/mat64/doc.go @@ -6,6 +6,9 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. +// This repository is no longer maintained. +// Development has moved to https://github.com/gonum/gonum. +// // Package mat64 provides implementations of float64 matrix structures and // linear algebra operations on them. // diff --git a/vendor/github.com/lestrrat-go/jspointer/.gitignore b/vendor/github.com/lestrrat-go/jspointer/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/lestrrat-go/jspointer/.travis.yml b/vendor/github.com/lestrrat-go/jspointer/.travis.yml new file mode 100644 index 000000000000..21e0a8e8d7c0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +go: + - 1.11 + - tip diff --git a/vendor/github.com/lestrrat-go/jspointer/LICENSE b/vendor/github.com/lestrrat-go/jspointer/LICENSE new file mode 100644 index 000000000000..20054b15434d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/jspointer/README.md b/vendor/github.com/lestrrat-go/jspointer/README.md new file mode 100644 index 000000000000..e1a4fbcd01b0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/README.md @@ -0,0 +1,34 @@ +# go-jspointer + +[![Build Status](https://travis-ci.org/lestrrat-go/jspointer.svg?branch=master)](https://travis-ci.org/lestrrat-go/jspointer) + +[![GoDoc](https://godoc.org/github.com/lestrrat-go/jspointer?status.svg)](https://godoc.org/github.com/lestrrat-go/jspointer) + +JSON pointer for Go + +# Features + +* Compile and match against Maps, Slices, Structs (or pointers to those) +* Set values in each of those + +# Usage + +```go +p, _ := jspointer.New(`/foo/bar/baz`) +result, _ := p.Get(someStruct) +``` + +# Credits + +This is almost a fork of https://github.com/xeipuuv/gojsonpointer. + +# References + +| Name | Notes | +|:--------------------------------------------------------:|:---------------------------------| +| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | +| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | +| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | +| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | + + diff --git a/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go b/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go new file mode 100644 index 000000000000..3ffe29fb4ded --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/bench/bench_test.go @@ -0,0 +1,40 @@ +// +build bench + +package bench_test + +import ( + "encoding/json" + "testing" + + "github.com/lestrrat-go/jspointer" + "github.com/xeipuuv/gojsonpointer" +) + +const jsontxt = `{"a":[{"b": 1, "c": 2}], "d": 3}` + +var m map[string]interface{} + +func init() { + if err := json.Unmarshal([]byte(jsontxt), &m); err != nil { + panic(err) + } +} + +func BenchmarkGojsonpointer(b *testing.B) { + p, _ := gojsonpointer.NewJsonPointer(`/a/0/c`) + for i := 0; i < b.N; i++ { + res, kind, err := p.Get(m) + _ = res + _ = kind + _ = err + } +} + +func BenchmarkJspointer(b *testing.B) { + p, _ := jspointer.New(`/a/0/c`) + for i := 0; i < b.N; i++ { + res, err := p.Get(m) + _ = res + _ = err + } +} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/jspointer/interface.go b/vendor/github.com/lestrrat-go/jspointer/interface.go new file mode 100644 index 000000000000..7fe800233bb2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/interface.go @@ -0,0 +1,27 @@ +package jspointer + +import "errors" + +// Errors used in jspointer package +var ( + ErrInvalidPointer = errors.New("invalid pointer") + ErrCanNotSet = errors.New("field cannot be set to") + ErrSliceIndexOutOfBounds = errors.New("slice index out of bounds") +) + +// Consntants used in jspointer package. Mostly for internal usage only +const ( + EncodedTilde = "~0" + EncodedSlash = "~1" + Separator = '/' +) + +type ErrNotFound struct { + Ptr string +} + +// JSPointer represents a JSON pointer +type JSPointer struct { + raw string + tokens tokens +} diff --git a/vendor/github.com/lestrrat-go/jspointer/jspointer.go b/vendor/github.com/lestrrat-go/jspointer/jspointer.go new file mode 100644 index 000000000000..c42b9613f9a3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/jspointer.go @@ -0,0 +1,262 @@ +package jspointer + +import ( + "bytes" + "encoding/json" + "errors" + "reflect" + "strconv" + + "github.com/lestrrat-go/structinfo" +) + +type tokens struct { + s string + positions [][2]int +} + +func (t *tokens) size() int { + return len(t.positions) +} + +func (t *tokens) get(i int) string { + p := t.positions[i] + return t.s[p[0]:p[1]] +} + +// New creates a new JSON pointer for given path spec. If the path fails +// to be parsed, an error is returned +func New(path string) (*JSPointer, error) { + var p JSPointer + + if err := p.parse(path); err != nil { + return nil, err + } + p.raw = path + return &p, nil +} + +func (p *JSPointer) parse(s string) error { + if s == "" { + return nil + } + + if s[0] != Separator { + return ErrInvalidPointer + } + + if len(s) < 2 { + return ErrInvalidPointer + } + + ntokens := 0 + for i := 0; i < len(s); i++ { + if s[i] == '/' { + ntokens++ + } + } + + positions := make([][2]int, 0, ntokens) + start := 1 + var buf bytes.Buffer + buf.WriteByte(s[0]) + for i := 1; i < len(s); i++ { + switch s[i] { + case Separator: + buf.WriteByte(s[i]) + positions = append(positions, [2]int{start, buf.Len() - 1}) + start = i + 1 + case '~': + if len(s) == 1 { + buf.WriteByte(s[i]) + } else { + switch s[1] { + case '0': + buf.WriteByte('~') + case '1': + buf.WriteByte('/') + default: + buf.WriteByte(s[i]) + } + } + default: + buf.WriteByte(s[i]) + } + } + + if start < buf.Len() { + positions = append(positions, [2]int{start, buf.Len()}) + } + + p.tokens.s = buf.String() + p.tokens.positions = positions + return nil +} + +// String returns the stringified version of this JSON pointer +func (p JSPointer) String() string { + return p.raw +} + +// Get applies the JSON pointer to the given item, and returns +// the result. +func (p JSPointer) Get(item interface{}) (interface{}, error) { + var ctx matchCtx + + ctx.raw = p.raw + ctx.tokens = &p.tokens + ctx.apply(item) + return ctx.result, ctx.err +} + +// Set applies the JSON pointer to the given item, and sets the +// value accordingly. +func (p JSPointer) Set(item interface{}, value interface{}) error { + var ctx matchCtx + + ctx.set = true + ctx.raw = p.raw + ctx.tokens = &p.tokens + ctx.setvalue = value + ctx.apply(item) + return ctx.err +} + +type matchCtx struct { + err error + raw string + result interface{} + set bool + setvalue interface{} + tokens *tokens +} + +func (e ErrNotFound) Error() string { + return "match to JSON pointer not found: " + e.Ptr +} + +type JSONGetter interface { + JSONGet(tok string) (interface{}, error) +} + +var strType = reflect.TypeOf("") +var zeroval reflect.Value + +func (c *matchCtx) apply(item interface{}) { + if c.tokens.size() == 0 { + c.result = item + return + } + + node := item + lastidx := c.tokens.size() - 1 + for i := 0; i < c.tokens.size(); i++ { + token := c.tokens.get(i) + + if getter, ok := node.(JSONGetter); ok { + x, err := getter.JSONGet(token) + if err != nil { + c.err = ErrNotFound{Ptr: c.raw} + return + } + if i == lastidx { + c.result = x + return + } + node = x + continue + } + v := reflect.ValueOf(node) + + // Does this thing implement a JSONGet? + + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + + switch v.Kind() { + case reflect.Struct: + fn := structinfo.StructFieldFromJSONName(v, token) + if fn == "" { + c.err = ErrNotFound{Ptr: c.raw} + return + } + f := v.FieldByName(fn) + if i == lastidx { + if c.set { + if !f.CanSet() { + c.err = ErrCanNotSet + return + } + f.Set(reflect.ValueOf(c.setvalue)) + return + } + c.result = f.Interface() + return + } + node = f.Interface() + case reflect.Map: + var vt reflect.Value + // We shall try to inflate the token to its Go native + // type if it's not a string. In other words, try not to + // outdo yourselves. + if t := v.Type().Key(); t != strType { + vt = reflect.New(t).Elem() + if err := json.Unmarshal([]byte(token), vt.Addr().Interface()); err != nil { + name := t.PkgPath() + "." + t.Name() + if name == "" { + name = "(anonymous type)" + } + c.err = errors.New("unsupported conversion of string to " + name) + return + } + } else { + vt = reflect.ValueOf(token) + } + n := v.MapIndex(vt) + if zeroval == n { + c.err = ErrNotFound{Ptr: c.raw} + return + } + + if i == lastidx { + if c.set { + v.SetMapIndex(vt, reflect.ValueOf(c.setvalue)) + } else { + c.result = n.Interface() + } + return + } + + node = n.Interface() + case reflect.Slice: + m := node.([]interface{}) + wantidx, err := strconv.Atoi(token) + if err != nil { + c.err = err + return + } + + if wantidx < 0 || len(m) <= wantidx { + c.err = ErrSliceIndexOutOfBounds + return + } + + if i == lastidx { + if c.set { + m[wantidx] = c.setvalue + } else { + c.result = m[wantidx] + } + return + } + node = m[wantidx] + default: + c.err = ErrNotFound{Ptr: c.raw} + return + } + } + + // If you fell through here, there was a big problem + c.err = ErrNotFound{Ptr: c.raw} +} diff --git a/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go b/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go new file mode 100644 index 000000000000..50ba8e610d29 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jspointer/jspointer_test.go @@ -0,0 +1,168 @@ +package jspointer_test + +import ( + "encoding/json" + "testing" + + "github.com/lestrrat-go/jspointer" + "github.com/stretchr/testify/assert" +) + +var src = `{ +"foo": ["bar", "baz"], +"obj": { "a":1, "b":2, "c":[3,4], "d":[ {"e":9}, {"f":[50,51]} ] }, +"": 0, +"a/b": 1, +"c%d": 2, +"e^f": 3, +"g|h": 4, +"i\\j": 5, +"k\"l": 6, +" ": 7, +"m~n": 8 +}` +var target map[string]interface{} + +func init() { + if err := json.Unmarshal([]byte(src), &target); err != nil { + panic(err) + } +} + +func TestEscaping(t *testing.T) { + data := []string{ + `/a~1b`, + `/m~0n`, + `/a~1b/m~0n`, + } + for _, pat := range data { + p, err := jspointer.New(pat) + if !assert.NoError(t, err, "jspointer.New should succeed for '%s'", pat) { + return + } + + if !assert.Equal(t, pat, p.String(), "input pattern and generated expression should match") { + return + } + } +} + +func runmatch(t *testing.T, pat string, m interface{}) (interface{}, error) { + p, err := jspointer.New(pat) + if !assert.NoError(t, err, "jspointer.New should succeed for '%s'", pat) { + return nil, err + } + + return p.Get(m) +} + +func TestFullDocument(t *testing.T) { + res, err := runmatch(t, ``, target) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + if !assert.Equal(t, res, target, "res should be equal to target") { + return + } +} + +func TestGetObject(t *testing.T) { + pats := map[string]interface{}{ + `/obj/a`: float64(1), + `/obj/b`: float64(2), + `/obj/c/0`: float64(3), + `/obj/c/1`: float64(4), + `/obj/d/1/f/0`: float64(50), + } + for pat, expected := range pats { + res, err := runmatch(t, pat, target) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + + if !assert.Equal(t, res, expected, "res should be equal to expected") { + return + } + } +} + +func TestGetArray(t *testing.T) { + foo := target["foo"].([]interface{}) + pats := map[string]interface{}{ + `/foo/0`: foo[0], + `/foo/1`: foo[1], + } + for pat, expected := range pats { + res, err := runmatch(t, pat, target) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + + if !assert.Equal(t, res, expected, "res should be equal to expected") { + return + } + } +} + +func TestSet(t *testing.T) { + var m interface{} + json.Unmarshal([]byte(`{ +"a": [{"b": 1, "c": 2}], "d": 3 +}`), &m) + + p, err := jspointer.New(`/a/0/c`) + if !assert.NoError(t, err, "jspointer.New should succeed") { + return + } + + if !assert.NoError(t, p.Set(m, 999), "jspointer.Set should succeed") { + return + } + + res, err := runmatch(t, `/a/0/c`, m) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + + if !assert.Equal(t, res, 999, "res should be equal to expected") { + return + } +} + +func TestStruct(t *testing.T) { + var s struct { + Foo string `json:"foo"` + Bar map[string]interface{} `json:"bar"` + Baz map[int]int `json:"baz"` + quux int + } + + s.Foo = "foooooo" + s.Bar = map[string]interface{}{ + "a": 0, + "b": 1, + } + s.Baz = map[int]int{ + 2: 3, + } + + res, err := runmatch(t, `/bar/b`, s) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + + if !assert.Equal(t, res, 1, "res should be equal to expected value") { + return + } + + res, err = runmatch(t, `/baz/2`, s) + if !assert.NoError(t, err, "jsonpointer.Get should succeed") { + return + } + + if !assert.Equal(t, res, 3, "res should be equal to expected value") { + return + } +} + + diff --git a/vendor/github.com/lestrrat-go/jsref/.gitignore b/vendor/github.com/lestrrat-go/jsref/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/lestrrat-go/jsref/.travis.yml b/vendor/github.com/lestrrat-go/jsref/.travis.yml new file mode 100644 index 000000000000..2fbcb829a8e6 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +go: + - 1.11.x + - tip diff --git a/vendor/github.com/lestrrat-go/jsref/LICENSE b/vendor/github.com/lestrrat-go/jsref/LICENSE new file mode 100644 index 000000000000..20054b15434d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/jsref/README.md b/vendor/github.com/lestrrat-go/jsref/README.md new file mode 100644 index 000000000000..16a88c1aafa8 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/README.md @@ -0,0 +1,107 @@ +# go-jsref + +[![Build Status](https://travis-ci.org/lestrrat-go/jsref.svg?branch=master)](https://travis-ci.org/lestrrat-go/jsref) + +[![GoDoc](https://godoc.org/github.com/lestrrat-go/jsref?status.svg)](https://godoc.org/github.com/lestrrat-go/jsref) + +JSON Reference Implementation for Go + +# SYNOPSIS + +```go +package jsref_test + +import ( + "encoding/json" + "fmt" + "log" + + jsref "github.com/lestrrat-go/jsref" + "github.com/lestrrat-go/jsref/provider" +) + +func Example() { + var v interface{} + src := []byte(` +{ + "foo": ["bar", {"$ref": "#/sub"}, {"$ref": "obj2#/sub"}], + "sub": "baz" +}`) + if err := json.Unmarshal(src, &v); err != nil { + log.Printf("%s", err) + return + } + + // External reference + mp := provider.NewMap() + mp.Set("obj2", map[string]string{"sub": "quux"}) + + res := jsref.New() + res.AddProvider(mp) // Register the provider + + data := []struct { + Ptr string + Options []jsref.Option + }{ + { + Ptr: "#/foo/0", // "bar" + }, + { + Ptr: "#/foo/1", // "baz" + }, + { + Ptr: "#/foo/2", // "quux" (resolves via `mp`) + }, + { + Ptr: "#/foo", // ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] + }, + { + Ptr: "#/foo", // ["bar","baz","quux"] + // experimental option to resolve all resulting values + Options: []jsref.Option{ jsref.WithRecursiveResolution(true) }, + }, + } + for _, set := range data { + result, err := res.Resolve(v, set.Ptr, set.Options...) + if err != nil { // failed to resolve + fmt.Printf("err: %s\n", err) + continue + } + b, _ := json.Marshal(result) + fmt.Printf("%s -> %s\n", set.Ptr, string(b)) + } + + // OUTPUT: + // #/foo/0 -> "bar" + // #/foo/1 -> "baz" + // #/foo/2 -> "quux" + // #/foo -> ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] + // #/foo -> ["bar","baz","quux"] +} +``` + +# Providers + +The Resolver object by default does not know how to resolve *any* reference: +You must provide it one or more `Provider`s to look for and resolve external references. + +Currently available `Provider`s are: + +| Name | Description | +|:--------------|:------------| +| provider.FS | Resolve from local file system. References must start with a `file:///` prefix | +| provider.Map | Resolve from in memory map. | +| provider.HTTP | Resolve by making HTTP requests. References must start with a `http(s?)://` prefix | + +# References + +| Name | Notes | +|:--------------------------------------------------------:|:---------------------------------| +| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | +| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | +| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | +| [go-jspointer](https://github.com/lestrrat-go/jspointer) | JSON Pointer implementations | + +# Acknowledgements + +* Boris Burtin diff --git a/vendor/github.com/lestrrat-go/jsref/interface.go b/vendor/github.com/lestrrat-go/jsref/interface.go new file mode 100644 index 000000000000..905a8c313e45 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/interface.go @@ -0,0 +1,23 @@ +package jsref + +import ( + "errors" + "net/url" + "reflect" +) + +var zeroval = reflect.Value{} + +var ErrMaxRecursion = errors.New("reached max number of recursions") + +// Resolver is responsible for interpreting the provided JSON +// reference. +type Resolver struct { + providers []Provider + MaxRecursions int +} + +// Provider resolves a URL into a ... thing. +type Provider interface { + Get(*url.URL) (interface{}, error) +} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref.go b/vendor/github.com/lestrrat-go/jsref/jsref.go new file mode 100644 index 000000000000..107473062bf2 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/jsref.go @@ -0,0 +1,396 @@ +package jsref + +import ( + "net/url" + "reflect" + + "github.com/lestrrat-go/jspointer" + "github.com/lestrrat-go/pdebug" + "github.com/lestrrat-go/structinfo" + "github.com/pkg/errors" +) + +const ref = "$ref" +var refrv = reflect.ValueOf(ref) + +type Option interface { + Name() string + Value() interface{} +} + +type option struct { + name string + value interface{} +} + +func (o option) Name() string { return o.name } +func (o option) Value() interface{} { return o.value } + +// WithRecursiveResolution allows ou to enable recursive resolution +// on the *result* data structure. This means that after resolving +// the JSON reference in the structure at hand, it does another +// pass at resolving the entire data structure. Depending on your +// structure and size, this may incur significant cost. +// +// Please note that recursive resolution of the result is still +// experimental. If you find problems, please submit a pull request +// with a failing test case. +func WithRecursiveResolution(b bool) Option { + return &option{ + name: "recursiveResolution", + value: b, + } +} + +var DefaultMaxRecursions = 10 + +// New creates a new Resolver +func New() *Resolver { + return &Resolver{MaxRecursions: DefaultMaxRecursions} +} + +// AddProvider adds a new Provider to be searched for in case +// a JSON pointer with more than just the URI fragment is given. +func (r *Resolver) AddProvider(p Provider) error { + r.providers = append(r.providers, p) + return nil +} + +type resolveCtx struct { + rlevel int // recurse level + maxrlevel int // max recurse level + object interface{} // the main object that was passed to `Resolve()` +} + +// Resolve takes a target `v`, and a JSON pointer `spec`. +// spec is expected to be in the form of +// +// [scheme://[userinfo@]host/path[?query]]#fragment +// [scheme:opaque[?query]]#fragment +// +// where everything except for `#fragment` is optional. +// If the fragment is empty, an error is returned. +// +// If `spec` is the empty string, `v` is returned +// This method handles recursive JSON references. +// +// If `WithRecursiveResolution` option is given and its value is true, +// an attempt to resolve all references within the resulting object +// is made by traversing the structure recursively. Default is false +func (r *Resolver) Resolve(v interface{}, ptr string, options ...Option) (ret interface{}, err error) { + if pdebug.Enabled { + g := pdebug.Marker("Resolver.Resolve(%s)", ptr).BindError(&err) + defer g.End() + } + var recursiveResolution bool + for _, opt := range options { + switch opt.Name() { + case "recursiveResolution": + recursiveResolution = opt.Value().(bool) + } + } + + ctx := resolveCtx{ + rlevel: 0, + maxrlevel: r.MaxRecursions, + object: v, + } + + // First, expand the target as much as we can + v, err = expandRefRecursive(&ctx, r, v) + if err != nil { + return nil, errors.Wrap(err, "recursive search failed") + } + + result, err := evalptr(&ctx, r, v, ptr) + if err != nil { + return nil, err + } + + if recursiveResolution { + rv, err := traverseExpandRefRecursive(&ctx, r, reflect.ValueOf(result)) + if err != nil { + return nil, errors.Wrap(err, `failed to resolve result`) + } + result = rv.Interface() + } + + return result, nil +} + +func setPtrOrInterface(container, value reflect.Value) bool { + switch container.Kind() { + case reflect.Ptr: + if !value.CanAddr() { + return false + } + container.Set(value.Addr()) + case reflect.Interface: + container.Set(value) + default: + return false + } + return true +} + +func traverseExpandRefRecursive(ctx *resolveCtx, r *Resolver, rv reflect.Value) (reflect.Value, error) { + if pdebug.Enabled { + g := pdebug.Marker("traverseExpandRefRecursive") + defer g.End() + } + + switch rv.Kind() { + case reflect.Ptr, reflect.Interface: + rv = rv.Elem() + } + + switch rv.Kind() { + case reflect.Array, reflect.Slice: + for i := 0; i < rv.Len(); i++ { + elem := rv.Index(i) + var elemcontainer reflect.Value + switch elem.Kind() { + case reflect.Ptr, reflect.Interface: + elemcontainer = elem + elem = elem.Elem() + } + + // Need to check for elem being Valid, otherwise the + // subsequent call to Interface() will fail + if !elem.IsValid() { + continue + } + + if elemcontainer.IsValid() { + if !elemcontainer.CanSet() { + continue + } + } + newv, err := expandRefRecursive(ctx, r, elem.Interface()) + if err != nil { + return zeroval, errors.Wrap(err, `failed to expand array/slice element`) + } + newrv, err := traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) + if err != nil { + return zeroval, errors.Wrap(err, `failed to recurse into array/slice element`) + } + + if elemcontainer.IsValid() { + setPtrOrInterface(elemcontainer, newrv) + } else { + elem.Set(newrv) + } + } + case reflect.Map: + // No refs found in the map keys, but there could be more + // in the values + if _, err := findRef(rv.Interface()); err != nil { + for _, key := range rv.MapKeys() { + value, err := traverseExpandRefRecursive(ctx, r, rv.MapIndex(key)) + if err != nil { + return zeroval, errors.Wrap(err, `failed to traverse map value`) + } + rv.SetMapIndex(key, value) + } + return rv, nil + } + newv, err := expandRefRecursive(ctx, r, rv.Interface()) + if err != nil { + return zeroval, errors.Wrap(err, `failed to expand map element`) + } + return traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) + case reflect.Struct: + // No refs found in the map keys, but there could be more + // in the values + if _, err := findRef(rv.Interface()); err != nil { + for i := 0; i < rv.NumField(); i++ { + field := rv.Field(i) + value, err := traverseExpandRefRecursive(ctx, r, field) + if err != nil { + return zeroval, errors.Wrap(err, `failed to traverse struct field value`) + } + field.Set(value) + } + return rv, nil + } + newv, err := expandRefRecursive(ctx, r, rv.Interface()) + if err != nil { + return zeroval, errors.Wrap(err, `failed to expand struct element`) + } + return traverseExpandRefRecursive(ctx, r, reflect.ValueOf(newv)) + } + return rv, nil +} + +// expands $ref with in v, until all $refs are expanded. +// note: DOES NOT recurse down into structures +func expandRefRecursive(ctx *resolveCtx, r *Resolver, v interface{}) (ret interface{}, err error) { + if pdebug.Enabled { + g := pdebug.Marker("expandRefRecursive") + defer g.End() + } + for { + ref, err := findRef(v) + if err != nil { + if pdebug.Enabled { + pdebug.Printf("No refs found. bailing out of loop") + } + break + } + + if pdebug.Enabled { + pdebug.Printf("Found ref '%s'", ref) + } + + newv, err := expandRef(ctx, r, v, ref) + if err != nil { + if pdebug.Enabled { + pdebug.Printf("Failed to expand ref '%s': %s", ref, err) + } + return nil, errors.Wrap(err, "failed to expand ref") + } + + v = newv + } + + return v, nil +} + +func expandRef(ctx *resolveCtx, r *Resolver, v interface{}, ref string) (ret interface{}, err error) { + ctx.rlevel++ + if ctx.rlevel > ctx.maxrlevel { + return nil, ErrMaxRecursion + } + + defer func() { ctx.rlevel-- }() + + u, err := url.Parse(ref) + if err != nil { + return nil, errors.Wrap(err, "failed to parse ref as URL") + } + + ptr := "#" + u.Fragment + if u.Host == "" && u.Path == "" { + if pdebug.Enabled { + pdebug.Printf("ptr doesn't contain any host/path part, apply json pointer directly to object") + } + return evalptr(ctx, r, ctx.object, ptr) + } + + u.Fragment = "" + for _, p := range r.providers { + pv, err := p.Get(u) + if err == nil { + if pdebug.Enabled { + pdebug.Printf("Found object matching %s", u) + } + + return evalptr(ctx, r, pv, ptr) + } + } + + return nil, errors.New("element pointed by $ref '" + ref + "' not found") +} + +func findRef(v interface{}) (ref string, err error) { + if pdebug.Enabled { + g := pdebug.Marker("findRef").BindError(&err) + defer g.End() + } + + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Interface, reflect.Ptr: + rv = rv.Elem() + } + + if pdebug.Enabled { + pdebug.Printf("object is a '%s'", rv.Kind()) + } + + // Find if we have a "$ref" element + var refv reflect.Value + switch rv.Kind() { + case reflect.Map: + refv = rv.MapIndex(refrv) + case reflect.Struct: + if fn := structinfo.StructFieldFromJSONName(rv, ref); fn != "" { + refv = rv.FieldByName(fn) + } + default: + return "", errors.New("element is not a map-like container") + } + + if !refv.IsValid() { + return "", errors.New("$ref element not found") + } + + switch refv.Kind() { + case reflect.Interface, reflect.Ptr: + refv = refv.Elem() + } + + switch refv.Kind() { + case reflect.String: + // Empty string isn't a valid pointer + if refv.Len() <= 0 { + return "", errors.New("$ref element not found (empty)") + } + if pdebug.Enabled { + pdebug.Printf("Found ref '%s'", refv) + } + return refv.String(), nil + case reflect.Invalid: + return "", errors.New("$ref element not found") + default: + if pdebug.Enabled { + pdebug.Printf("'$ref' was found, but its kind is %s", refv.Kind()) + } + } + + return "", errors.New("$ref element must be a string") +} + +func evalptr(ctx *resolveCtx, r *Resolver, v interface{}, ptrspec string) (ret interface{}, err error) { + if pdebug.Enabled { + g := pdebug.Marker("evalptr(%s)", ptrspec).BindError(&err) + defer g.End() + } + + // If the reference is empty, return v + if ptrspec == "" || ptrspec == "#" { + if pdebug.Enabled { + pdebug.Printf("Empty pointer, return v itself") + } + return v, nil + } + + // Parse the spec. + u, err := url.Parse(ptrspec) + if err != nil { + return nil, errors.Wrap(err, "failed to parse reference spec") + } + + ptr := u.Fragment + + // We are evaluating the pointer part. That means if the + // Fragment portion is not set, there's no point in evaluating + if ptr == "" { + return nil, errors.Wrap(err, "empty json pointer") + } + + p, err := jspointer.New(ptr) + if err != nil { + return nil, errors.Wrap(err, "failed create a new JSON pointer") + } + x, err := p.Get(v) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch value") + } + + if pdebug.Enabled { + pdebug.Printf("Evaulated JSON pointer, now checking if we can expand further") + } + // If this result contains more refs, expand that + return expandRefRecursive(ctx, r, x) +} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go b/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go new file mode 100644 index 000000000000..eeacd46da33d --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/jsref_example_test.go @@ -0,0 +1,69 @@ +package jsref_test + +import ( + "encoding/json" + "fmt" + "log" + + jsref "github.com/lestrrat-go/jsref" + "github.com/lestrrat-go/jsref/provider" +) + +func Example() { + var v interface{} + src := []byte(` +{ + "foo": ["bar", {"$ref": "#/sub"}, {"$ref": "obj2#/sub"}], + "sub": "baz" +}`) + if err := json.Unmarshal(src, &v); err != nil { + log.Printf("%s", err) + return + } + + // External reference + mp := provider.NewMap() + mp.Set("obj2", map[string]string{"sub": "quux"}) + + res := jsref.New() + res.AddProvider(mp) // Register the provider + + data := []struct { + Ptr string + Options []jsref.Option + }{ + { + Ptr: "#/foo/0", // "bar" + }, + { + Ptr: "#/foo/1", // "baz" + }, + { + Ptr: "#/foo/2", // "quux" (resolves via `mp`) + }, + { + Ptr: "#/foo", // ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] + }, + { + Ptr: "#/foo", // ["bar","baz","quux"] + // experimental option to resolve all resulting values + Options: []jsref.Option{ jsref.WithRecursiveResolution(true) }, + }, + } + for _, set := range data { + result, err := res.Resolve(v, set.Ptr, set.Options...) + if err != nil { // failed to resolve + fmt.Printf("err: %s\n", err) + continue + } + b, _ := json.Marshal(result) + fmt.Printf("%s -> %s\n", set.Ptr, string(b)) + } + + // OUTPUT: + // #/foo/0 -> "bar" + // #/foo/1 -> "baz" + // #/foo/2 -> "quux" + // #/foo -> ["bar",{"$ref":"#/sub"},{"$ref":"obj2#/sub"}] + // #/foo -> ["bar","baz","quux"] +} diff --git a/vendor/github.com/lestrrat-go/jsref/jsref_test.go b/vendor/github.com/lestrrat-go/jsref/jsref_test.go new file mode 100644 index 000000000000..7c47688baff9 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/jsref_test.go @@ -0,0 +1,295 @@ +package jsref_test + +import ( + "encoding/json" + "io/ioutil" + "log" + "net" + "net/http" + "os" + "path/filepath" + "sort" + "strconv" + "strings" + "testing" + "time" + + "github.com/lestrrat-go/jsref" + "github.com/lestrrat-go/jsref/provider" + "github.com/stretchr/testify/assert" +) + +func TestResolveMemory(t *testing.T) { + m := map[string]interface{}{ + "foo": []interface{}{ + "bar", + map[string]interface{}{ + "$ref": "#/sub", + }, + map[string]interface{}{ + "$ref": "obj2#/sub", + }, + }, + "sub": "baz", + } + + data := map[string]string{ + "#/foo/0": "bar", + "#/foo/1": "baz", + "#/foo/2": "quux", + } + + res := jsref.New() + mp := provider.NewMap() + mp.Set("obj2", map[string]string{"sub": "quux"}) + res.AddProvider(mp) + + ptrlist := make([]string, 0, len(data)) + for ptr := range data { + ptrlist = append(ptrlist, ptr) + } + sort.Strings(ptrlist) + + for _, ptr := range ptrlist { + expected := data[ptr] + v, err := res.Resolve(m, ptr) + if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { + return + } + if !assert.Equal(t, v, expected, "Resolve(%s) resolves to '%s'", ptr, expected) { + return + } + } + + // In this test we test if we can optionally recursively + // resolve references + v, err := res.Resolve(m, "#/foo", jsref.WithRecursiveResolution(true)) + if !assert.NoError(t, err, "Resolve(%s) should succeed", "#/foo") { + return + } + + if !assert.Equal(t, []interface{}{"bar", "baz", "quux"}, v) { + return + } +} + +func TestResolveFS(t *testing.T) { + dir, err := ioutil.TempDir("", "jsref-test-") + if !assert.NoError(t, err, "creating temporary directory should succeed") { + return + } + defer os.RemoveAll(dir) + + path := filepath.Join(dir, "obj2") + f, err := os.Create(path) + if !assert.NoError(t, err, "creating %s file should succeed", path) { + return + } + f.Write([]byte(`{"sub":"quux"}`)) + f.Close() + + m := map[string]interface{}{ + "foo": []interface{}{ + "bar", + map[string]interface{}{ + "$ref": "#/sub", + }, + map[string]interface{}{ + "$ref": "file:///obj2#/sub", + }, + }, + "sub": "baz", + } + + data := map[string]string{ + "#/foo/0": "bar", + "#/foo/1": "baz", + "#/foo/2": "quux", + } + + res := jsref.New() + res.AddProvider(provider.NewFS(dir)) + + ptrlist := make([]string, 0, len(data)) + for ptr := range data { + ptrlist = append(ptrlist, ptr) + } + sort.Strings(ptrlist) + + for _, ptr := range ptrlist { + expected := data[ptr] + v, err := res.Resolve(m, ptr) + if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { + return + } + if !assert.Equal(t, v, expected, "Resolve(%s) resolves to '%s'", ptr, expected) { + return + } + } +} + +func TestResolveHTTP(t *testing.T) { + if b, _ := strconv.ParseBool(os.Getenv("JSREF_LIVE_TESTS")); !b { + t.Skip("JSREF_LIVE_TESTS is not available, skipping test") + } + + cl := http.Client{ + Transport: &http.Transport{ + Dial: func(n, a string) (net.Conn, error) { + return net.DialTimeout(n, a, 2*time.Second) + }, + }, + } + + const schemaURL = `http://json-schema.org/draft-04/schema#` + if _, err := cl.Get(schemaURL); err != nil { + t.Skip("JSON schema '" + schemaURL + "' unavailable, skipping test") + } + + res := jsref.New() + hp := provider.NewHTTP() + res.AddProvider(hp) + + m := map[string]interface{}{ + "fetch": map[string]string{ + "$ref": schemaURL, + }, + } + + ptr := "#/fetch" + v, err := res.Resolve(m, ptr) + if !assert.NoError(t, err, "Resolve(%s) should succeed", ptr) { + return + } + + switch v.(type) { + case map[string]interface{}: + mv := v.(map[string]interface{}) + if !assert.Equal(t, mv["id"], schemaURL, "Resolve("+schemaURL+") resolved to JSON schema") { + return + } + default: + t.Errorf("Expected map[string]interface{}") + } +} + +func TestResolveRecursive(t *testing.T) { + var v interface{} + src := []byte(` +{ + "foo": { + "type": "array", + "items": [{ "$ref": "#" }] + } +}`) + if err := json.Unmarshal(src, &v); err != nil { + log.Printf("%s", err) + return + } + + res := jsref.New() + _, err := res.Resolve(v, "#/foo") // "bar" + if !assert.NoError(t, err, "res.Resolve should succeed") { + return + } +} + +func TestGHPR12(t *testing.T) { + // https://github.com/lestrrat-go/jsref/pull/2 gave me an example + // using "foo" as the JS pointer (could've been a typo) + // but it gave me weird results, so this is where I'm testing it + var v interface{} + src := []byte(` +{ + "foo": "bar" +}`) + if err := json.Unmarshal(src, &v); err != nil { + log.Printf("%s", err) + return + } + + res := jsref.New() + _, err := res.Resolve(v, "foo") + if !assert.NoError(t, err, "res.Resolve should fail") { + return + } +} + +func TestHyperSchemaRecursive(t *testing.T) { + src := []byte(` +{ + "definitions": { + "virtual_machine": { + "type": "object" + } + }, + "links": [ + { + "schema": { + "type": "object" + }, + "targetSchema": { + "$ref": "#/definitions/virtual_machine" + } + }, + { + "targetSchema": { + "type": "array", + "items": { + "$ref": "#/definitions/virtual_machine" + } + } + } + ] +}`) + var v interface{} + err := json.Unmarshal(src, &v) + assert.Nil(t, err) + res := jsref.New() + + ptrs := []string{ + "#/links/0/schema", + "#/links/0/targetSchema", + "#/links/1/targetSchema", + } + for _, ptr := range ptrs { + result, err := res.Resolve(v, ptr, jsref.WithRecursiveResolution(true)) + assert.Nil(t, err) + b, err := json.Marshal(result) + if !assert.NoError(t, err, "json.Marshal should succeed") { + return + } + if !assert.False(t, strings.Contains(string(b), "$ref"), "%s did not recursively resolve", ptr) { + t.Logf("resolved to '%s'", b) + return + } + } +} + +func TestGHIssue7(t *testing.T) { + src := []byte(`{ + "status": { + "type": ["string", "null"], + "enum": [ + "sent", + "duplicate", + "error", + "invalid", + "rejected", + "unqueued", + "unsubscribed", + null + ] + } +}`) + + var v interface{} + if !assert.NoError(t, json.Unmarshal(src, &v), `Unmarshal should succeed`) { + return + } + + res := jsref.New() + result, err := res.Resolve(v, "", jsref.WithRecursiveResolution(true)) + t.Logf("%s", result) + t.Logf("%s", err) +} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/fs.go b/vendor/github.com/lestrrat-go/jsref/provider/fs.go new file mode 100644 index 000000000000..21eab966d109 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/provider/fs.go @@ -0,0 +1,75 @@ +package provider + +import ( + "encoding/json" + "net/url" + "os" + "path/filepath" + "strings" + + "github.com/lestrrat-go/pdebug" + "github.com/pkg/errors" +) + +// NewFS creates a new Provider that looks for JSON documents +// from the local file system. Documents are only searched +// within `root` +func NewFS(root string) *FS { + return &FS{ + mp: NewMap(), + Root: root, + } +} + +// Get fetches the document specified by the `key` argument. +// Everything other than `.Path` is ignored. +// Note that once a document is read, it WILL be cached for the +// duration of this object, unless you call `Reset` +func (fp *FS) Get(key *url.URL) (out interface{}, err error) { + if pdebug.Enabled { + g := pdebug.Marker("provider.FS.Get(%s)", key.String()).BindError(&err) + defer g.End() + } + + if strings.ToLower(key.Scheme) != "file" { + return nil, errors.New("unsupported scheme '" + key.Scheme + "'") + } + + // Everything other than "Path" is ignored + path := filepath.Clean(filepath.Join(fp.Root, key.Path)) + + mpkey := &url.URL{Path: path} + if x, err := fp.mp.Get(mpkey); err == nil { + return x, nil + } + + fi, err := os.Stat(path) + if err != nil { + return nil, errors.Wrap(err, "failed to stat local resource") + } + + if fi.IsDir() { + return nil, errors.New("target is not a file") + } + + f, err := os.Open(path) + if err != nil { + return nil, errors.Wrap(err, "failed to open local resource") + } + defer f.Close() + + var x interface{} + dec := json.NewDecoder(f) + if err := dec.Decode(&x); err != nil { + return nil, errors.Wrap(err, "failed to parse JSON local resource") + } + + fp.mp.Set(path, x) + + return x, nil +} + +// Reset resets the in memory cache of JSON documents +func (fp *FS) Reset() error { + return fp.mp.Reset() +} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/http.go b/vendor/github.com/lestrrat-go/jsref/provider/http.go new file mode 100644 index 000000000000..acb407b8850f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/provider/http.go @@ -0,0 +1,65 @@ +package provider + +import ( + "encoding/json" + "net/http" + "net/url" + "strings" + "time" + + "github.com/lestrrat-go/pdebug" + "github.com/pkg/errors" +) + +// NewFS creates a new Provider that looks for JSON documents +// from the internet over HTTP(s) +func NewHTTP() *HTTP { + return &HTTP{ + mp: NewMap(), + Client: &http.Client{ + Timeout: 5 * time.Second, + }, + } +} + +// Get fetches the document specified by the `key` argument, making +// a HTTP request if necessary. +// Note that once a document is read, it WILL be cached for the +// duration of this object, unless you call `Reset` +func (hp *HTTP) Get(key *url.URL) (interface{}, error) { + if pdebug.Enabled { + g := pdebug.Marker("HTTP.Get(%s)", key) + defer g.End() + } + + switch strings.ToLower(key.Scheme) { + case "http", "https": + default: + return nil, errors.New("key is not http/https URL") + } + + v, err := hp.mp.Get(key) + if err == nil { // Found! + return v, nil + } + + res, err := hp.Client.Get(key.String()) + if err != nil { + return nil, errors.Wrap(err, "failed to fetch HTTP resource") + } + defer res.Body.Close() + + dec := json.NewDecoder(res.Body) + + var x interface{} + if err := dec.Decode(&x); err != nil { + return nil, errors.Wrap(err, "failed to parse JSON from HTTP resource") + } + + return x, nil +} + +// Reset resets the in memory cache of JSON documents +func (hp *HTTP) Reset() error { + return hp.mp.Reset() +} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/interface.go b/vendor/github.com/lestrrat-go/jsref/provider/interface.go new file mode 100644 index 000000000000..4eaf7190a28f --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/provider/interface.go @@ -0,0 +1,21 @@ +package provider + +import ( + "net/http" + "sync" +) + +type FS struct { + mp *Map + Root string +} + +type HTTP struct { + mp *Map + Client *http.Client +} + +type Map struct { + lock sync.Mutex + mapping map[string]interface{} +} diff --git a/vendor/github.com/lestrrat-go/jsref/provider/map.go b/vendor/github.com/lestrrat-go/jsref/provider/map.go new file mode 100644 index 000000000000..48d65a6a4c48 --- /dev/null +++ b/vendor/github.com/lestrrat-go/jsref/provider/map.go @@ -0,0 +1,47 @@ +package provider + +import ( + "net/url" + + "github.com/lestrrat-go/pdebug" + "github.com/pkg/errors" +) + +func NewMap() *Map { + return &Map{ + mapping: make(map[string]interface{}), + } +} + +func (mp *Map) Set(key string, v interface{}) error { + mp.lock.Lock() + defer mp.lock.Unlock() + + mp.mapping[key] = v + return nil +} + +func (mp *Map) Get(key *url.URL) (res interface{}, err error) { + if pdebug.Enabled { + g := pdebug.Marker("Map.Get(%s)", key).BindError(&err) + defer g.End() + } + + mp.lock.Lock() + defer mp.lock.Unlock() + + v, ok := mp.mapping[key.String()] + if !ok { + return nil, errors.New("not found") + } + + return v, nil +} + +func (mp *Map) Reset() error { + mp.lock.Lock() + defer mp.lock.Unlock() + + mp.mapping = make(map[string]interface{}) + return nil +} diff --git a/vendor/github.com/lestrrat-go/pdebug/.gitignore b/vendor/github.com/lestrrat-go/pdebug/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/lestrrat-go/pdebug/.travis.yml b/vendor/github.com/lestrrat-go/pdebug/.travis.yml new file mode 100644 index 000000000000..baecfce60a99 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/.travis.yml @@ -0,0 +1,14 @@ +language: go +sudo: false +go: + - 1.6 + - 1.7 + - tip +install: + - go get -t -v ./... + - go get -t -tags debug0 -v ./... +script: + - go test -v ./... + - go test -tags debug ./... + - PDEBUG_TRACE=1 go test -tags debug ./... + - go test -tags debug0 ./... diff --git a/vendor/github.com/lestrrat-go/pdebug/LICENSE b/vendor/github.com/lestrrat-go/pdebug/LICENSE new file mode 100644 index 000000000000..20054b15434d --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/pdebug/README.md b/vendor/github.com/lestrrat-go/pdebug/README.md new file mode 100644 index 000000000000..4f6d88959f8a --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/README.md @@ -0,0 +1,95 @@ +# go-pdebug + +[![Build Status](https://travis-ci.org/lestrrat-go/pdebug.svg?branch=master)](https://travis-ci.org/lestrrat-go/pdebug) + +[![GoDoc](https://godoc.org/github.com/lestrrat-go/pdebug?status.svg)](https://godoc.org/github.com/lestrrat-go/pdebug) + +Utilities for my print debugging fun. YMMV + +# Synopsis + +![optimized](https://pbs.twimg.com/media/CbiqhzLUUAIN_7o.png) + +# Description + +Building with `pdebug` declares a constant, `pdebug.Enabled` which you +can use to easily compile in/out depending on the presence of a build tag. + +```go +func Foo() { + // will only be available if you compile with `-tags debug` + if pdebug.Enabled { + pdebug.Printf("Starting Foo()! + } +} +``` + +Note that using `github.com/lestrrat-go/pdebug` and `-tags debug` only +compiles in the code. In order to actually show the debug trace, you need +to specify an environment variable: + +```shell +# For example, to show debug code during testing: +PDEBUG_TRACE=1 go test -tags debug +``` + +If you want to forcefully show the trace (which is handy when you're +debugging/testing), you can use the `debug0` tag instead: + +```shell +go test -tags debug0 +``` + +# Markers + +When you want to print debug a chain of function calls, you can use the +`Marker` functions: + +```go +func Foo() { + if pdebug.Enabled { + g := pdebug.Marker("Foo") + defer g.End() + } + + pdebug.Printf("Inside Foo()!") +} +``` + +This will cause all of the `Printf` calls to automatically indent +the output so it's visually easier to see where a certain trace log +is being generated. + +By default it will print something like: + +``` +|DEBUG| START Foo +|DEBUG| Inside Foo()! +|DEBUG| END Foo (1.23μs) +``` + +If you want to automatically show the error value you are returning +(but only if there is an error), you can use the `BindError` method: + +```go +func Foo() (err error) { + if pdebug.Enabled { + g := pdebug.Marker("Foo").BindError(&err) + defer g.End() + } + + pdebug.Printf("Inside Foo()!") + + return errors.New("boo") +} +``` + +This will print something like: + + +``` +|DEBUG| START Foo +|DEBUG| Inside Foo()! +|DEBUG| END Foo (1.23μs): ERROR boo +``` + diff --git a/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go b/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go new file mode 100644 index 000000000000..3ca774591fd0 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/autoflag_off.go @@ -0,0 +1,15 @@ +// +build debug + +package pdebug + +import ( + "os" + "strconv" +) + +var Trace = false +func init() { + if b, err := strconv.ParseBool(os.Getenv("PDEBUG_TRACE")); err == nil && b { + Trace = true + } +} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go b/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go new file mode 100644 index 000000000000..f5f674db5d05 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/autoflag_on.go @@ -0,0 +1,6 @@ +// +build debug0 + +package pdebug + +var Trace = true + diff --git a/vendor/github.com/lestrrat-go/pdebug/common.go b/vendor/github.com/lestrrat-go/pdebug/common.go new file mode 100644 index 000000000000..95f11a007695 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/common.go @@ -0,0 +1,43 @@ +package pdebug + +import ( + "io" + "os" + "sync" + "time" +) + +type pdctx struct { + mutex sync.Mutex + indentL int + LogTime bool + Prefix string + Writer io.Writer +} + +var emptyMarkerGuard = &markerg{} + +type markerg struct { + indentg guard + ctx *pdctx + f string + args []interface{} + start time.Time + errptr *error +} + +var DefaultCtx = &pdctx{ + LogTime: true, + Prefix: "|DEBUG| ", + Writer: os.Stdout, +} + +type guard struct { + cb func() +} + +func (g *guard) End() { + if cb := g.cb; cb != nil { + cb() + } +} diff --git a/vendor/github.com/lestrrat-go/pdebug/common_test.go b/vendor/github.com/lestrrat-go/pdebug/common_test.go new file mode 100644 index 000000000000..2589727304ce --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/common_test.go @@ -0,0 +1,112 @@ +package pdebug + +import ( + "bytes" + "errors" + "io" + "regexp" + "testing" + + "github.com/stretchr/testify/assert" +) + +func setw(ctx *pdctx, w io.Writer) func() { + oldw := ctx.Writer + ctx.Writer = w + return func() { ctx.Writer = oldw } +} + +func TestPrintf(t *testing.T) { + buf := &bytes.Buffer{} + wg := setw(DefaultCtx, buf) + defer wg() + + Printf("Hello, World!") + + if Enabled && Trace { + re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ Hello, World!\n`) + if !assert.True(t, re.MatchString(buf.String()), "Simple Printf works") { + return + } + } else { + if !assert.Equal(t, "", buf.String(), "Simple Printf should be suppressed") { + return + } + } +} + +func TestMarker(t *testing.T) { + buf := &bytes.Buffer{} + wg := setw(DefaultCtx, buf) + defer wg() + + f2 := func() (err error) { + g := Marker("f2").BindError(&err) + defer g.End() + Printf("Hello, World!") + return errors.New("dummy error") + } + + f1 := func() { + g := Marker("f1") + defer g.End() + f2() + } + + f1() + + if Enabled && Trace { + re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ START f1\n\|DEBUG\| \d+\.\d+ START f2\n\|DEBUG\| \d+\.\d+ Hello, World!\n\|DEBUG\| \d+\.\d+ END f2 \(`) + if !assert.True(t, re.MatchString(buf.String()), "Markers should work") { + t.Logf("Expected '%v'", re) + t.Logf("Actual '%v'", buf.String()) + return + } + } else { + if !assert.Equal(t, "", buf.String(), "Markers should work") { + return + } + } +} + +func TestLegacyMarker(t *testing.T) { + buf := &bytes.Buffer{} + wg := setw(DefaultCtx, buf) + defer wg() + + f2 := func() (err error) { + g := IPrintf("START f2") + defer func() { + if err == nil { + g.IRelease("END f2") + } else { + g.IRelease("END f2: %s", err) + } + }() + Printf("Hello, World!") + return errors.New("dummy error") + } + + f1 := func() { + g := IPrintf("START f1") + defer g.IRelease("END f1") + f2() + } + + f1() + + if Enabled && Trace { + re := regexp.MustCompile(`\|DEBUG\| \d+\.\d+ START f1\n\|DEBUG\| \d+\.\d+ START f2\n\|DEBUG\| \d+\.\d+ Hello, World!\n\|DEBUG\| \d+\.\d+ END f2`) + if !assert.True(t, re.MatchString(buf.String()), "Markers should work") { + t.Logf("Expected '%v'", re) + t.Logf("Actual '%v'", buf.String()) + return + } + + // TODO: check for error and timestamp + } else { + if !assert.Equal(t, "", buf.String(), "Markers should work") { + return + } + } +} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug0_test.go b/vendor/github.com/lestrrat-go/pdebug/debug0_test.go new file mode 100644 index 000000000000..052fcca96231 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/debug0_test.go @@ -0,0 +1,13 @@ +//+build debug0,!debug + +package pdebug + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDebug0Enabled(t *testing.T) { + assert.True(t, Enabled, "Enable is true") +} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_off.go b/vendor/github.com/lestrrat-go/pdebug/debug_off.go new file mode 100644 index 000000000000..9f794b29747b --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/debug_off.go @@ -0,0 +1,39 @@ +//+build !debug,!debug0 + +package pdebug + +// Enabled is true if `-tags debug` or `-tags debug0` is used +// during compilation. Use this to "ifdef-out" debug blocks. +const Enabled = false + +// Trace is true if `-tags debug` is used AND the environment +// variable `PDEBUG_TRACE` is set to a `true` value (i.e., +// 1, true, etc), or `-tags debug0` is used. This allows you to +// compile-in the trace logs, but only show them when you +// set the environment variable +const Trace = false + +// IRelease is deprecated. Use Marker()/End() instead +func (g guard) IRelease(f string, args ...interface{}) {} + +// IPrintf is deprecated. Use Marker()/End() instead +func IPrintf(f string, args ...interface{}) guard { return guard{} } + +// Printf prints to standard out, just like a normal fmt.Printf, +// but respects the indentation level set by IPrintf/IRelease. +// Printf is no op unless you compile with the `debug` tag. +func Printf(f string, args ...interface{}) {} + +// Dump dumps the objects using go-spew. +// Dump is a no op unless you compile with the `debug` tag. +func Dump(v ...interface{}) {} + +// Marker marks the beginning of an indented block. The message +// you specify in the arguments is prefixed witha "START", and +// subsequent calls to Printf will be indented one level more. +// +// To reset this, you must call End() on the guard object that +// gets returned by Marker(). +func Marker(f string, args ...interface{}) *markerg { return emptyMarkerGuard } +func (g *markerg) BindError(_ *error) *markerg { return g } +func (g *markerg) End() {} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_on.go b/vendor/github.com/lestrrat-go/pdebug/debug_on.go new file mode 100644 index 000000000000..064f3420ca8b --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/debug_on.go @@ -0,0 +1,170 @@ +// +build debug OR debug0 + +package pdebug + +import ( + "bytes" + "fmt" + "strings" + "time" + + "github.com/davecgh/go-spew/spew" +) + +const Enabled = true + +type Guard interface { + End() +} + +var emptyGuard = &guard{} + +func (ctx *pdctx) Unindent() { + ctx.mutex.Lock() + defer ctx.mutex.Unlock() + ctx.indentL-- +} + +func (ctx *pdctx) Indent() guard { + ctx.mutex.Lock() + ctx.indentL++ + ctx.mutex.Unlock() + + return guard{cb: ctx.Unindent} +} + +func (ctx *pdctx) preamble(buf *bytes.Buffer) { + if p := ctx.Prefix; len(p) > 0 { + buf.WriteString(p) + } + if ctx.LogTime { + fmt.Fprintf(buf, "%0.5f ", float64(time.Now().UnixNano()) / 1000000.0) + } + + for i := 0; i < ctx.indentL; i++ { + buf.WriteString(" ") + } +} + +func (ctx *pdctx) Printf(f string, args ...interface{}) { + if !strings.HasSuffix(f, "\n") { + f = f + "\n" + } + buf := bytes.Buffer{} + ctx.preamble(&buf) + fmt.Fprintf(&buf, f, args...) + buf.WriteTo(ctx.Writer) +} + +func Marker(f string, args ...interface{}) *markerg { + return DefaultCtx.Marker(f, args...) +} + +func (ctx *pdctx) Marker(f string, args ...interface{}) *markerg { + if !Trace { + return emptyMarkerGuard + } + + buf := &bytes.Buffer{} + ctx.preamble(buf) + buf.WriteString("START ") + fmt.Fprintf(buf, f, args...) + if buf.Len() > 0 { + if b := buf.Bytes(); b[buf.Len()-1] != '\n' { + buf.WriteRune('\n') + } + } + + buf.WriteTo(ctx.Writer) + + g := ctx.Indent() + return &markerg{ + indentg: g, + ctx: ctx, + f: f, + args: args, + start: time.Now(), + errptr: nil, + } +} + +func (g *markerg) BindError(errptr *error) *markerg { + if g.ctx == nil { + return g + } + g.ctx.mutex.Lock() + defer g.ctx.mutex.Unlock() + + g.errptr = errptr + return g +} + +func (g *markerg) End() { + if g.ctx == nil { + return + } + + g.indentg.End() // unindent + buf := &bytes.Buffer{} + g.ctx.preamble(buf) + fmt.Fprint(buf, "END ") + fmt.Fprintf(buf, g.f, g.args...) + fmt.Fprintf(buf, " (%s)", time.Since(g.start)) + if errptr := g.errptr; errptr != nil && *errptr != nil { + fmt.Fprintf(buf, ": ERROR: %s", *errptr) + } + + if buf.Len() > 0 { + if b := buf.Bytes(); b[buf.Len()-1] != '\n' { + buf.WriteRune('\n') + } + } + + buf.WriteTo(g.ctx.Writer) +} + +type legacyg struct { + guard + start time.Time +} + +var emptylegacyg = legacyg{} + +func (g legacyg) IRelease(f string, args ...interface{}) { + if !Trace { + return + } + g.End() + dur := time.Since(g.start) + Printf("%s (%s)", fmt.Sprintf(f, args...), dur) +} + +// IPrintf indents and then prints debug messages. Execute the callback +// to undo the indent +func IPrintf(f string, args ...interface{}) legacyg { + if !Trace { + return emptylegacyg + } + + DefaultCtx.Printf(f, args...) + g := legacyg{ + guard: DefaultCtx.Indent(), + start: time.Now(), + } + return g +} + +// Printf prints debug messages. Only available if compiled with "debug" tag +func Printf(f string, args ...interface{}) { + if !Trace { + return + } + DefaultCtx.Printf(f, args...) +} + +func Dump(v ...interface{}) { + if !Trace { + return + } + spew.Dump(v...) +} diff --git a/vendor/github.com/lestrrat-go/pdebug/debug_test.go b/vendor/github.com/lestrrat-go/pdebug/debug_test.go new file mode 100644 index 000000000000..7e1077ee7596 --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/debug_test.go @@ -0,0 +1,30 @@ +//+build debug,!debug0 + +package pdebug + +import ( + "os" + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDebugEnabled(t *testing.T) { + if !assert.True(t, Enabled, "Enable is true") { + return + } + + b, err := strconv.ParseBool(os.Getenv("PDEBUG_TRACE")) + if err == nil && b { + if !assert.True(t, Trace, "Trace is true") { + return + } + t.Logf("Trace is enabled") + } else { + if !assert.False(t, Trace, "Trace is false") { + return + } + t.Logf("Trace is disabled") + } +} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/pdebug/doc.go b/vendor/github.com/lestrrat-go/pdebug/doc.go new file mode 100644 index 000000000000..d0566de384ab --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/doc.go @@ -0,0 +1,15 @@ +// Package pdebug provides tools to produce debug logs the way the author +// (Daisuke Maki a.k.a. lestrrat) likes. All of the functions are no-ops +// unless you compile with the `-tags debug` option. +// +// When you compile your program with `-tags debug`, no trace is displayed, +// but the code enclosed within `if pdebug.Enabled { ... }` is compiled in. +// To show the debug trace, set the PDEBUG_TRACE environment variable to +// true (or 1, or whatever `strconv.ParseBool` parses to true) +// +// If you want to show the debug trace regardless of an environment variable, +// for example, perhaps while you are debugging or running tests, use the +// `-tags debug0` build tag instead. This will enable the debug trace +// forcefully +package pdebug + diff --git a/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go b/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go new file mode 100644 index 000000000000..ae09c76fa85c --- /dev/null +++ b/vendor/github.com/lestrrat-go/pdebug/nodebug_test.go @@ -0,0 +1,14 @@ +//+build !debug,!debug0 + +package pdebug + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestDisabled(t *testing.T) { + assert.False(t, Enabled, "Enable is false") + assert.False(t, Trace, "Trace is false") +} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/structinfo/.gitignore b/vendor/github.com/lestrrat-go/structinfo/.gitignore new file mode 100644 index 000000000000..daf913b1b347 --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/vendor/github.com/lestrrat-go/structinfo/.travis.yml b/vendor/github.com/lestrrat-go/structinfo/.travis.yml new file mode 100644 index 000000000000..5b800297f00e --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/.travis.yml @@ -0,0 +1,5 @@ +language: go +sudo: false +go: + - 1.5 + - tip diff --git a/vendor/github.com/lestrrat-go/structinfo/LICENSE b/vendor/github.com/lestrrat-go/structinfo/LICENSE new file mode 100644 index 000000000000..20054b15434d --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 lestrrat + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/lestrrat-go/structinfo/README.md b/vendor/github.com/lestrrat-go/structinfo/README.md new file mode 100644 index 000000000000..fbf6c877d683 --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/README.md @@ -0,0 +1,7 @@ +# structinfo + +[![Build Status](https://travis-ci.org/lestrrat-go/structinfo.svg?branch=master)](https://travis-ci.org/lestrrat-go/structinfo) + +[![GoDoc](https://godoc.org/github.com/lestrrat-go/structinfo?status.svg)](https://godoc.org/github.com/lestrrat-go/structinfo) + +Tools to inspect Go structs diff --git a/vendor/github.com/lestrrat-go/structinfo/structinfo.go b/vendor/github.com/lestrrat-go/structinfo/structinfo.go new file mode 100644 index 000000000000..0a283ca88897 --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/structinfo.go @@ -0,0 +1,118 @@ +// Package structinfo contains tools to inspect structs. + +package structinfo + +import ( + "reflect" + "sync" +) + +type jsonFieldMap struct { + lock sync.Mutex + fields map[string]string +} + +var type2jfm = map[reflect.Type]jsonFieldMap{} +var type2jfmMutex = sync.Mutex{} + +// JSONFieldsFromStruct returns the names of JSON fields associated +// with the given struct. Returns nil if v is not a struct +func JSONFieldsFromStruct(v reflect.Value) []string { + if v.Kind() != reflect.Struct { + return nil + } + + m := getType2jfm(v.Type()) + m.lock.Lock() + defer m.lock.Unlock() + + l := make([]string, 0, len(m.fields)) + for k := range m.fields { + l = append(l, k) + } + return l +} + +// StructFieldFromJSONName returns the struct field name on the +// given struct value. Empty value means the field is either not +// public, or does not exist. +// +// This can be used to map JSON field names to actual struct fields. +func StructFieldFromJSONName(v reflect.Value, name string) string { + if v.Kind() != reflect.Struct { + return "" + } + + m := getType2jfm(v.Type()) + m.lock.Lock() + defer m.lock.Unlock() + + s, ok := m.fields[name] + if !ok { + return "" + } + return s +} + +func getType2jfm(t reflect.Type) jsonFieldMap { + type2jfmMutex.Lock() + defer type2jfmMutex.Unlock() + + return getType2jfm_nolock(t) +} + +func getType2jfm_nolock(t reflect.Type) jsonFieldMap { + fm, ok := type2jfm[t] + if ok { + return fm + } + + fm = constructJfm(t) + type2jfm[t] = fm + return fm +} + +func constructJfm(t reflect.Type) jsonFieldMap { + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + + fm := jsonFieldMap{ + fields: make(map[string]string), + } + for i := 0; i < t.NumField(); i++ { + sf := t.Field(i) + if sf.Anonymous { // embedded! got to recurse + fm2 := getType2jfm_nolock(sf.Type) + for k, v := range fm2.fields { + fm.fields[k] = v + } + continue + } + + if sf.PkgPath != "" { // unexported + continue + } + + tag := sf.Tag.Get("json") + if tag == "-" { + continue + } + + if tag == "" || tag[0] == ',' { + fm.fields[sf.Name] = sf.Name + continue + } + + flen := 0 + for j := 0; j < len(tag); j++ { + if tag[j] == ',' { + break + } + flen = j + } + fm.fields[tag[:flen+1]] = sf.Name + } + + return fm +} \ No newline at end of file diff --git a/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go b/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go new file mode 100644 index 000000000000..980c2654b7c3 --- /dev/null +++ b/vendor/github.com/lestrrat-go/structinfo/structinfo_test.go @@ -0,0 +1,63 @@ +package structinfo_test + +import ( + "reflect" + "testing" + + "github.com/lestrrat-go/structinfo" + "github.com/stretchr/testify/assert" +) + +type Quux struct { + Baz string `json:"baz"` +} + +type X struct { + private int + Quux + Foo string `json:"foo"` + Bar string `json:"bar,omitempty"` +} + +func TestStructFields(t *testing.T) { + fields := make(map[string]struct{}) + for _, name := range structinfo.JSONFieldsFromStruct(reflect.ValueOf(X{})) { + fields[name] = struct{}{} + } + + expected := map[string]struct{}{ + "foo": {}, + "bar": {}, + "baz": {}, + } + + if !assert.Equal(t, expected, fields, "expected fields match") { + return + } +} + +func TestLookupSructFieldFromJSONName(t *testing.T) { + rv := reflect.ValueOf(X{}) + + data := map[string]string{ + "foo": "Foo", + "bar": "Bar", + "baz": "Baz", + } + + for jsname, fname := range data { + fn := structinfo.StructFieldFromJSONName(rv, jsname) + if !assert.NotEqual(t, fn, "", "should find '%s'", jsname) { + return + } + + sf, ok := rv.Type().FieldByName(fn) + if !assert.True(t, ok, "should be able resolve '%s' (%s)", jsname, fn) { + return + } + + if !assert.Equal(t, sf.Name, fname, "'%s' should map to '%s'", jsname, fname) { + return + } + } +} diff --git a/vendor/github.com/lestrrat/go-jspointer/.travis.yml b/vendor/github.com/lestrrat/go-jspointer/.travis.yml index 5b800297f00e..21e0a8e8d7c0 100644 --- a/vendor/github.com/lestrrat/go-jspointer/.travis.yml +++ b/vendor/github.com/lestrrat/go-jspointer/.travis.yml @@ -1,5 +1,5 @@ language: go sudo: false go: - - 1.5 + - 1.11 - tip diff --git a/vendor/github.com/lestrrat/go-jspointer/README.md b/vendor/github.com/lestrrat/go-jspointer/README.md index 8bce21bcaff3..e1a4fbcd01b0 100644 --- a/vendor/github.com/lestrrat/go-jspointer/README.md +++ b/vendor/github.com/lestrrat/go-jspointer/README.md @@ -1,8 +1,8 @@ # go-jspointer -[![Build Status](https://travis-ci.org/lestrrat/go-jspointer.svg?branch=master)](https://travis-ci.org/lestrrat/go-jspointer) +[![Build Status](https://travis-ci.org/lestrrat-go/jspointer.svg?branch=master)](https://travis-ci.org/lestrrat-go/jspointer) -[![GoDoc](https://godoc.org/github.com/lestrrat/go-jspointer?status.svg)](https://godoc.org/github.com/lestrrat/go-jspointer) +[![GoDoc](https://godoc.org/github.com/lestrrat-go/jspointer?status.svg)](https://godoc.org/github.com/lestrrat-go/jspointer) JSON pointer for Go @@ -26,9 +26,9 @@ This is almost a fork of https://github.com/xeipuuv/gojsonpointer. | Name | Notes | |:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat/go-jsval) | Validator generator | -| [go-jsschema](https://github.com/lestrrat/go-jsschema) | JSON Schema implementation | -| [go-jshschema](https://github.com/lestrrat/go-jshschema) | JSON Hyper Schema implementation | -| [go-jsref](https://github.com/lestrrat/go-jsref) | JSON Reference implementation | +| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | +| [go-jsschema](https://github.com/lestrrat-go/jsschema) | JSON Schema implementation | +| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | +| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | diff --git a/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go b/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go index c0350b280b93..3ffe29fb4ded 100644 --- a/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go +++ b/vendor/github.com/lestrrat/go-jspointer/bench/bench_test.go @@ -6,7 +6,7 @@ import ( "encoding/json" "testing" - "github.com/lestrrat/go-jspointer" + "github.com/lestrrat-go/jspointer" "github.com/xeipuuv/gojsonpointer" ) diff --git a/vendor/github.com/lestrrat/go-jspointer/interface.go b/vendor/github.com/lestrrat/go-jspointer/interface.go index 1e83d599556e..7fe800233bb2 100644 --- a/vendor/github.com/lestrrat/go-jspointer/interface.go +++ b/vendor/github.com/lestrrat/go-jspointer/interface.go @@ -23,5 +23,5 @@ type ErrNotFound struct { // JSPointer represents a JSON pointer type JSPointer struct { raw string - tokens []string + tokens tokens } diff --git a/vendor/github.com/lestrrat/go-jspointer/jspointer.go b/vendor/github.com/lestrrat/go-jspointer/jspointer.go index 1c1f88fd2a53..c42b9613f9a3 100644 --- a/vendor/github.com/lestrrat/go-jspointer/jspointer.go +++ b/vendor/github.com/lestrrat/go-jspointer/jspointer.go @@ -1,79 +1,96 @@ package jspointer import ( + "bytes" "encoding/json" "errors" "reflect" "strconv" - "strings" - "sync" - "github.com/lestrrat/go-structinfo" + "github.com/lestrrat-go/structinfo" ) -var ctxPool = sync.Pool{ - New: moreCtx, +type tokens struct { + s string + positions [][2]int } -func moreCtx() interface{} { - return &matchCtx{} +func (t *tokens) size() int { + return len(t.positions) } -func getCtx() *matchCtx { - return ctxPool.Get().(*matchCtx) -} - -func releaseCtx(ctx *matchCtx) { - ctx.err = nil - ctx.set = false - ctx.tokens = nil - ctx.result = nil - ctxPool.Put(ctx) +func (t *tokens) get(i int) string { + p := t.positions[i] + return t.s[p[0]:p[1]] } // New creates a new JSON pointer for given path spec. If the path fails // to be parsed, an error is returned func New(path string) (*JSPointer, error) { var p JSPointer - dtokens, err := parse(path) - if err != nil { + + if err := p.parse(path); err != nil { return nil, err } p.raw = path - p.tokens = dtokens return &p, nil } -func parse(s string) ([]string, error) { +func (p *JSPointer) parse(s string) error { if s == "" { - return nil, nil + return nil } if s[0] != Separator { - return nil, ErrInvalidPointer + return ErrInvalidPointer } - prev := 0 - tokens := []string{} + if len(s) < 2 { + return ErrInvalidPointer + } + + ntokens := 0 + for i := 0; i < len(s); i++ { + if s[i] == '/' { + ntokens++ + } + } + + positions := make([][2]int, 0, ntokens) + start := 1 + var buf bytes.Buffer + buf.WriteByte(s[0]) for i := 1; i < len(s); i++ { switch s[i] { case Separator: - tokens = append(tokens, s[prev+1:i]) - prev = i + buf.WriteByte(s[i]) + positions = append(positions, [2]int{start, buf.Len() - 1}) + start = i + 1 + case '~': + if len(s) == 1 { + buf.WriteByte(s[i]) + } else { + switch s[1] { + case '0': + buf.WriteByte('~') + case '1': + buf.WriteByte('/') + default: + buf.WriteByte(s[i]) + } + } + default: + buf.WriteByte(s[i]) } } - if prev != len(s) { - tokens = append(tokens, s[prev+1:]) - } - - dtokens := make([]string, 0, len(tokens)) - for _, t := range tokens { - t = strings.Replace(strings.Replace(t, EncodedSlash, "/", -1), EncodedTilde, "~", -1) - dtokens = append(dtokens, t) + if start < buf.Len() { + positions = append(positions, [2]int{start, buf.Len()}) } - return dtokens, nil + p.tokens.s = buf.String() + p.tokens.positions = positions + return nil } // String returns the stringified version of this JSON pointer @@ -84,11 +101,10 @@ func (p JSPointer) String() string { // Get applies the JSON pointer to the given item, and returns // the result. func (p JSPointer) Get(item interface{}) (interface{}, error) { - ctx := getCtx() - defer releaseCtx(ctx) + var ctx matchCtx ctx.raw = p.raw - ctx.tokens = p.tokens + ctx.tokens = &p.tokens ctx.apply(item) return ctx.result, ctx.err } @@ -96,12 +112,11 @@ func (p JSPointer) Get(item interface{}) (interface{}, error) { // Set applies the JSON pointer to the given item, and sets the // value accordingly. func (p JSPointer) Set(item interface{}, value interface{}) error { - ctx := getCtx() - defer releaseCtx(ctx) + var ctx matchCtx ctx.set = true ctx.raw = p.raw - ctx.tokens = p.tokens + ctx.tokens = &p.tokens ctx.setvalue = value ctx.apply(item) return ctx.err @@ -113,25 +128,48 @@ type matchCtx struct { result interface{} set bool setvalue interface{} - tokens []string + tokens *tokens } func (e ErrNotFound) Error() string { return "match to JSON pointer not found: " + e.Ptr } +type JSONGetter interface { + JSONGet(tok string) (interface{}, error) +} + var strType = reflect.TypeOf("") +var zeroval reflect.Value func (c *matchCtx) apply(item interface{}) { - if len(c.tokens) == 0 { + if c.tokens.size() == 0 { c.result = item return } - lastidx := len(c.tokens) - 1 node := item - for tidx, token := range c.tokens { + lastidx := c.tokens.size() - 1 + for i := 0; i < c.tokens.size(); i++ { + token := c.tokens.get(i) + + if getter, ok := node.(JSONGetter); ok { + x, err := getter.JSONGet(token) + if err != nil { + c.err = ErrNotFound{Ptr: c.raw} + return + } + if i == lastidx { + c.result = x + return + } + node = x + continue + } v := reflect.ValueOf(node) + + // Does this thing implement a JSONGet? + if v.Kind() == reflect.Ptr { v = v.Elem() } @@ -144,7 +182,7 @@ func (c *matchCtx) apply(item interface{}) { return } f := v.FieldByName(fn) - if tidx == lastidx { + if i == lastidx { if c.set { if !f.CanSet() { c.err = ErrCanNotSet @@ -176,12 +214,12 @@ func (c *matchCtx) apply(item interface{}) { vt = reflect.ValueOf(token) } n := v.MapIndex(vt) - if (reflect.Value{}) == n { + if zeroval == n { c.err = ErrNotFound{Ptr: c.raw} return } - if tidx == lastidx { + if i == lastidx { if c.set { v.SetMapIndex(vt, reflect.ValueOf(c.setvalue)) } else { @@ -204,7 +242,7 @@ func (c *matchCtx) apply(item interface{}) { return } - if tidx == lastidx { + if i == lastidx { if c.set { m[wantidx] = c.setvalue } else { diff --git a/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go b/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go index 388c76261ee6..50ba8e610d29 100644 --- a/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go +++ b/vendor/github.com/lestrrat/go-jspointer/jspointer_test.go @@ -4,7 +4,7 @@ import ( "encoding/json" "testing" - "github.com/lestrrat/go-jspointer" + "github.com/lestrrat-go/jspointer" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/.travis.yml b/vendor/github.com/lestrrat/go-jsschema/.travis.yml index ad9a6993d127..a20c62692f00 100644 --- a/vendor/github.com/lestrrat/go-jsschema/.travis.yml +++ b/vendor/github.com/lestrrat/go-jsschema/.travis.yml @@ -1,7 +1,7 @@ language: go sudo: false go: - - 1.7 + - 1.11.x - tip script: - go test -v ./... diff --git a/vendor/github.com/lestrrat/go-jsschema/README.md b/vendor/github.com/lestrrat/go-jsschema/README.md index c3f78a31e776..79031957e1dd 100644 --- a/vendor/github.com/lestrrat/go-jsschema/README.md +++ b/vendor/github.com/lestrrat/go-jsschema/README.md @@ -1,8 +1,8 @@ # go-jsschema -[![Build Status](https://travis-ci.org/lestrrat/go-jsschema.svg?branch=master)](https://travis-ci.org/lestrrat/go-jsschema) +[![Build Status](https://travis-ci.org/lestrrat-go/jsschema.svg?branch=master)](https://travis-ci.org/lestrrat-go/jsschema) -[![GoDoc](https://godoc.org/github.com/lestrrat/go-jsschema?status.svg)](https://godoc.org/github.com/lestrrat/go-jsschema) +[![GoDoc](https://godoc.org/github.com/lestrrat-go/jsschema?status.svg)](https://godoc.org/github.com/lestrrat-go/jsschema) JSON Schema for Go @@ -14,8 +14,8 @@ package schema_test import ( "log" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" ) func Example() { @@ -47,13 +47,13 @@ This packages parses a JSON Schema file, and allows you to inspect, modify the schema, but does nothing more. If you want to validate using the JSON Schema that you read using this package, -look at [go-jsval](https://github.com/lestrrat/go-jsval), which allows you to +look at [go-jsval](https://github.com/lestrrat-go/jsval), which allows you to generate validators, so that you don't have to dynamically read in the JSON schema for each instance of your program. In the same lines, this package does not really care about loading external schemas from various locations (it's just easier to just gather all the schemas -in your local system). It *is* possible to do this via [go-jsref](https://github.com/lestrrat/go-jsref) +in your local system). It *is* possible to do this via [go-jsref](https://github.com/lestrrat-go/jsref) if you really want to do it. # BENCHMARKS @@ -86,7 +86,7 @@ PASS | Name | Notes | |:--------------------------------------------------------:|:---------------------------------| -| [go-jsval](https://github.com/lestrrat/go-jsval) | Validator generator | -| [go-jshschema](https://github.com/lestrrat/go-jshschema) | JSON Hyper Schema implementation | -| [go-jsref](https://github.com/lestrrat/go-jsref) | JSON Reference implementation | -| [go-jspointer](https://github.com/lestrrat/go-jspointer) | JSON Pointer implementations | +| [go-jsval](https://github.com/lestrrat-go/jsval) | Validator generator | +| [go-jshschema](https://github.com/lestrrat-go/jshschema) | JSON Hyper Schema implementation | +| [go-jsref](https://github.com/lestrrat-go/jsref) | JSON Reference implementation | +| [go-jspointer](https://github.com/lestrrat-go/jspointer) | JSON Pointer implementations | diff --git a/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go b/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go index a774668f870a..df973b226d86 100644 --- a/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/benchmark_test.go @@ -6,8 +6,8 @@ import ( "strings" "testing" - schema "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + schema "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" "github.com/xeipuuv/gojsonschema" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go b/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go index e31a94aa335c..d1e39f97388f 100644 --- a/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go +++ b/vendor/github.com/lestrrat/go-jsschema/cmd/jsschema/jsschema.go @@ -7,8 +7,8 @@ import ( "log" "os" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" ) func main() { diff --git a/vendor/github.com/lestrrat/go-jsschema/interface.go b/vendor/github.com/lestrrat/go-jsschema/interface.go index 416cfa7a7b09..d47412b31531 100644 --- a/vendor/github.com/lestrrat/go-jsschema/interface.go +++ b/vendor/github.com/lestrrat/go-jsschema/interface.go @@ -5,7 +5,7 @@ import ( "regexp" "sync" - "github.com/lestrrat/go-jsref" + "github.com/lestrrat-go/jsref" ) const ( diff --git a/vendor/github.com/lestrrat/go-jsschema/marshal.go b/vendor/github.com/lestrrat/go-jsschema/marshal.go index b08bc1c7d618..7a409cfb58e1 100644 --- a/vendor/github.com/lestrrat/go-jsschema/marshal.go +++ b/vendor/github.com/lestrrat/go-jsschema/marshal.go @@ -5,7 +5,7 @@ import ( "regexp" "strconv" - "github.com/lestrrat/go-pdebug" + "github.com/lestrrat-go/pdebug" "github.com/pkg/errors" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/marshal_test.go b/vendor/github.com/lestrrat/go-jsschema/marshal_test.go index fc811d131711..584928bfed30 100644 --- a/vendor/github.com/lestrrat/go-jsschema/marshal_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/marshal_test.go @@ -5,8 +5,8 @@ import ( "strings" "testing" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/schema.go b/vendor/github.com/lestrrat/go-jsschema/schema.go index 8d2b2f9f7f37..706ed7091292 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema.go @@ -8,9 +8,9 @@ import ( "reflect" "strconv" - "github.com/lestrrat/go-jsref" - "github.com/lestrrat/go-jsref/provider" - "github.com/lestrrat/go-pdebug" + "github.com/lestrrat-go/jsref" + "github.com/lestrrat-go/jsref/provider" + "github.com/lestrrat-go/pdebug" "github.com/pkg/errors" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go b/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go index 9f69f228035a..cd1dab975a18 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema_example_test.go @@ -3,8 +3,8 @@ package schema_test import ( "log" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" ) func Example() { diff --git a/vendor/github.com/lestrrat/go-jsschema/schema_test.go b/vendor/github.com/lestrrat/go-jsschema/schema_test.go index 8c55f728f739..499e8d11b26d 100644 --- a/vendor/github.com/lestrrat/go-jsschema/schema_test.go +++ b/vendor/github.com/lestrrat/go-jsschema/schema_test.go @@ -8,8 +8,8 @@ import ( "strings" "testing" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsschema/validator" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsschema/validator" "github.com/stretchr/testify/assert" ) diff --git a/vendor/github.com/lestrrat/go-jsschema/validator/validator.go b/vendor/github.com/lestrrat/go-jsschema/validator/validator.go index b1930617733d..365f01427593 100644 --- a/vendor/github.com/lestrrat/go-jsschema/validator/validator.go +++ b/vendor/github.com/lestrrat/go-jsschema/validator/validator.go @@ -3,9 +3,9 @@ package validator import ( "sync" - "github.com/lestrrat/go-jsschema" - "github.com/lestrrat/go-jsval" - "github.com/lestrrat/go-jsval/builder" + "github.com/lestrrat-go/jsschema" + "github.com/lestrrat-go/jsval" + "github.com/lestrrat-go/jsval/builder" "github.com/pkg/errors" ) diff --git a/vendor/github.com/spf13/viper/.gitignore b/vendor/github.com/spf13/viper/.gitignore index 836562412fe8..01b5c44b9c16 100644 --- a/vendor/github.com/spf13/viper/.gitignore +++ b/vendor/github.com/spf13/viper/.gitignore @@ -21,3 +21,9 @@ _testmain.go *.exe *.test +*.bench + +.vscode + +# exclude dependencies in the `/vendor` folder +vendor diff --git a/vendor/github.com/spf13/viper/.travis.yml b/vendor/github.com/spf13/viper/.travis.yml index e793edbab85f..bb83057ba459 100644 --- a/vendor/github.com/spf13/viper/.travis.yml +++ b/vendor/github.com/spf13/viper/.travis.yml @@ -1,10 +1,13 @@ go_import_path: github.com/spf13/viper language: go + +env: + global: + - GO111MODULE="on" + go: - - 1.5.4 - - 1.6.3 - - 1.7 + - 1.11.x - tip os: @@ -18,6 +21,7 @@ matrix: script: - go install ./... + - diff -u <(echo -n) <(gofmt -d .) - go test -v ./... after_success: diff --git a/vendor/github.com/spf13/viper/README.md b/vendor/github.com/spf13/viper/README.md index 4ebd8ddb8155..0208eac84dae 100644 --- a/vendor/github.com/spf13/viper/README.md +++ b/vendor/github.com/spf13/viper/README.md @@ -6,18 +6,19 @@ Many Go projects are built using Viper including: * [Hugo](http://gohugo.io) * [EMC RexRay](http://rexray.readthedocs.org/en/stable/) -* [Imgur's Incus](https://github.com/Imgur/incus) +* [Imgur’s Incus](https://github.com/Imgur/incus) * [Nanobox](https://github.com/nanobox-io/nanobox)/[Nanopack](https://github.com/nanopack) * [Docker Notary](https://github.com/docker/Notary) * [BloomApi](https://www.bloomapi.com/) -* [doctl(https://github.com/digitalocean/doctl) +* [doctl](https://github.com/digitalocean/doctl) +* [Clairctl](https://github.com/jgsqware/clairctl) - [![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Build Status](https://travis-ci.org/spf13/viper.svg)](https://travis-ci.org/spf13/viper) [![Join the chat at https://gitter.im/spf13/viper](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/spf13/viper?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) [![GoDoc](https://godoc.org/github.com/spf13/viper?status.svg)](https://godoc.org/github.com/spf13/viper) ## What is Viper? -Viper is a complete configuration solution for go applications including 12 factor apps. It is designed +Viper is a complete configuration solution for Go applications including 12-Factor apps. It is designed to work within an application, and can handle all types of configuration needs and formats. It supports: @@ -68,7 +69,7 @@ Viper configuration keys are case insensitive. ### Establishing Defaults A good configuration system will support default values. A default value is not -required for a key, but it's useful in the event that a key hasn’t been set via +required for a key, but it’s useful in the event that a key hasn’t been set via config file, environment variable, remote configuration or flag. Examples: @@ -110,16 +111,16 @@ Gone are the days of needing to restart a server to have a config take effect, viper powered applications can read an update to a config file while running and not miss a beat. -Simply tell the viper instance to watchConfig. +Simply tell the viper instance to watchConfig. Optionally you can provide a function for Viper to run each time a change occurs. **Make sure you add all of the configPaths prior to calling `WatchConfig()`** ```go - viper.WatchConfig() - viper.OnConfigChange(func(e fsnotify.Event) { - fmt.Println("Config file changed:", e.Name) - }) +viper.WatchConfig() +viper.OnConfigChange(func(e fsnotify.Event) { + fmt.Println("Config file changed:", e.Name) +}) ``` ### Reading Config from io.Reader @@ -178,19 +179,20 @@ viper.GetBool("verbose") // true ### Working with Environment Variables Viper has full support for environment variables. This enables 12 factor -applications out of the box. There are four methods that exist to aid working +applications out of the box. There are five methods that exist to aid working with ENV: * `AutomaticEnv()` * `BindEnv(string...) : error` * `SetEnvPrefix(string)` - * `SetEnvReplacer(string...) *strings.Replacer` + * `SetEnvKeyReplacer(string...) *strings.Replacer` + * `AllowEmptyEnvVar(bool)` _When working with ENV variables, it’s important to recognize that Viper treats ENV variables as case sensitive._ Viper provides a mechanism to try to ensure that ENV variables are unique. By -using `SetEnvPrefix`, you can tell Viper to use add a prefix while reading from +using `SetEnvPrefix`, you can tell Viper to use a prefix while reading from the environment variables. Both `BindEnv` and `AutomaticEnv` will use this prefix. @@ -211,11 +213,15 @@ time a `viper.Get` request is made. It will apply the following rules. It will check for a environment variable with a name matching the key uppercased and prefixed with the `EnvPrefix` if set. -`SetEnvReplacer` allows you to use a `strings.Replacer` object to rewrite Env +`SetEnvKeyReplacer` allows you to use a `strings.Replacer` object to rewrite Env keys to an extent. This is useful if you want to use `-` or something in your `Get()` calls, but want your environmental variables to use `_` delimiters. An example of using it can be found in `viper_test.go`. +By default empty environment variables are considered unset and will fall back to +the next configuration source. To treat empty environment variables as set, use +the `AllowEmptyEnv` method. + #### Env example ```go @@ -236,7 +242,7 @@ Like `BindEnv`, the value is not set when the binding method is called, but when it is accessed. This means you can bind as early as you want, even in an `init()` function. -The `BindPFlag()` method provides this functionality. +For individual flags, the `BindPFlag()` method provides this functionality. Example: @@ -245,6 +251,19 @@ serverCmd.Flags().Int("port", 1138, "Port to run Application server on") viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) ``` +You can also bind an existing set of pflags (pflag.FlagSet): + +Example: + +```go +pflag.Int("flagname", 1234, "help message for flagname") + +pflag.Parse() +viper.BindPFlags(pflag.CommandLine) + +i := viper.GetInt("flagname") // retrieve values from viper instead of pflag +``` + The use of [pflag](https://github.com/spf13/pflag/) in Viper does not preclude the use of other packages that use the [flag](https://golang.org/pkg/flag/) package from the standard library. The pflag package can handle the flags @@ -263,24 +282,32 @@ import ( ) func main() { + + // using standard library "flag" package + flag.Int("flagname", 1234, "help message for flagname") + pflag.CommandLine.AddGoFlagSet(flag.CommandLine) pflag.Parse() - ... + viper.BindPFlags(pflag.CommandLine) + + i := viper.GetInt("flagname") // retrieve value from viper + + ... } ``` #### Flag interfaces -Viper provides two Go interfaces to bind other flag systems if you don't use `Pflags`. +Viper provides two Go interfaces to bind other flag systems if you don’t use `Pflags`. `FlagValue` represents a single flag. This is a very simple example on how to implement this interface: ```go type myFlag struct {} -func (f myFlag) IsChanged() { return false } -func (f myFlag) Name() { return "my-flag-name" } -func (f myFlag) ValueString() { return "my-flag-value" } -func (f myFlag) ValueType() { return "string" } +func (f myFlag) HasChanged() bool { return false } +func (f myFlag) Name() string { return "my-flag-name" } +func (f myFlag) ValueString() string { return "my-flag-value" } +func (f myFlag) ValueType() string { return "string" } ``` Once your flag implements this interface, you can simply tell Viper to bind it: @@ -298,7 +325,7 @@ type myFlagSet struct { func (f myFlagSet) VisitAll(fn func(FlagValue)) { for _, flag := range flags { - fn(flag) + fn(flag) } } ``` @@ -351,12 +378,33 @@ how to use Consul. ### Remote Key/Value Store Example - Unencrypted +#### etcd ```go viper.AddRemoteProvider("etcd", "http://127.0.0.1:4001","/config/hugo.json") viper.SetConfigType("json") // because there is no file extension in a stream of bytes, supported extensions are "json", "toml", "yaml", "yml", "properties", "props", "prop" err := viper.ReadRemoteConfig() ``` +#### Consul +You need to set a key to Consul key/value storage with JSON value containing your desired config. +For example, create a Consul key/value store key `MY_CONSUL_KEY` with value: + +```json +{ + "port": 8080, + "hostname": "myhostname.com" +} +``` + +```go +viper.AddRemoteProvider("consul", "localhost:8500", "MY_CONSUL_KEY") +viper.SetConfigType("json") // Need to explicitly set this to json +err := viper.ReadRemoteConfig() + +fmt.Println(viper.Get("port")) // 8080 +fmt.Println(viper.Get("hostname")) // myhostname.com +``` + ### Remote Key/Value Store Example - Encrypted ```go @@ -401,7 +449,7 @@ go func(){ ## Getting Values From Viper -In Viper, there are a few ways to get a value depending on the value's type. +In Viper, there are a few ways to get a value depending on the value’s type. The following functions and methods exist: * `Get(key string) : interface{}` @@ -415,6 +463,7 @@ The following functions and methods exist: * `GetTime(key string) : time.Time` * `GetDuration(key string) : time.Duration` * `IsSet(key string) : bool` + * `AllSettings() : map[string]interface{}` One important thing to recognize is that each Get function will return a zero value if it’s not found. To check if a given key exists, the `IsSet()` method @@ -458,16 +507,17 @@ Viper can access a nested field by passing a `.` delimited path of keys: GetString("datastore.metric.host") // (returns "127.0.0.1") ``` -This obeys the precedence rules established above; the search for the root key -(in this example, `datastore`) will cascade through the remaining configuration -registries until found. The search for the sub-keys (`metric` and `host`), -however, will not. +This obeys the precedence rules established above; the search for the path +will cascade through the remaining configuration registries until found. -For example, if the `metric` key was not defined in the configuration loaded -from file, but was defined in the defaults, Viper would return the zero value. +For example, given this configuration file, both `datastore.metric.host` and +`datastore.metric.port` are already defined (and may be overridden). If in addition +`datastore.metric.protocol` was defined in the defaults, Viper would also find it. -On the other hand, if the primary key was not defined, Viper would go through -the remaining registries looking for it. +However, if `datastore.metric` was overridden (by a flag, an environment variable, +the `Set()` method, …) with an immediate value, then all sub-keys of +`datastore.metric` become undefined, they are “shadowed” by the higher-priority +configuration level. Lastly, if there exists a key that matches the delimited key path, its value will be returned instead. E.g. @@ -491,7 +541,7 @@ will be returned instead. E.g. } } -GetString("datastore.metric.host") //returns "0.0.0.0" +GetString("datastore.metric.host") // returns "0.0.0.0" ``` ### Extract sub-tree @@ -530,7 +580,7 @@ func NewCache(cfg *Viper) *Cache {...} ``` which creates a cache based on config information formatted as `subv`. -Now it's easy to create these 2 caches separately as: +Now it’s easy to create these 2 caches separately as: ```go cfg1 := viper.Sub("app.cache1") @@ -567,6 +617,27 @@ if err != nil { } ``` +### Marshalling to string + +You may need to marhsal all the settings held in viper into a string rather than write them to a file. +You can use your favorite format's marshaller with the config returned by `AllSettings()`. + +```go +import ( + yaml "gopkg.in/yaml.v2" + // ... +) + +func yamlStringSettings() string { + c := viper.AllSettings() + bs, err := yaml.Marshal(c) + if err != nil { + t.Fatalf("unable to marshal config to YAML: %v", err) + } + return string(bs) +} +``` + ## Viper or Vipers? Viper comes ready to use out of the box. There is no configuration or @@ -574,13 +645,13 @@ initialization needed to begin using Viper. Since most applications will want to use a single central repository for their configuration, the viper package provides this. It is similar to a singleton. -In all of the examples above, they demonstrate using viper in it's singleton +In all of the examples above, they demonstrate using viper in its singleton style approach. ### Working with multiple vipers You can also create many different vipers for use in your application. Each will -have it’s own unique set of configurations and values. Each can read from a +have its own unique set of configurations and values. Each can read from a different config file, key value store, etc. All of the functions that viper package supports are mirrored as methods on a viper. diff --git a/vendor/github.com/spf13/viper/flags_test.go b/vendor/github.com/spf13/viper/flags_test.go index 5489278dbdac..0b976b60523c 100644 --- a/vendor/github.com/spf13/viper/flags_test.go +++ b/vendor/github.com/spf13/viper/flags_test.go @@ -22,7 +22,7 @@ func TestBindFlagValueSet(t *testing.T) { "endpoint": "/public", } - for name, _ := range testValues { + for name := range testValues { testValues[name] = flagSet.String(name, "", "test") } @@ -62,5 +62,4 @@ func TestBindFlagValue(t *testing.T) { flag.Changed = true //hack for pflag usage assert.Equal(t, "testing_mutate", Get("testvalue")) - } diff --git a/vendor/github.com/spf13/viper/go.mod b/vendor/github.com/spf13/viper/go.mod new file mode 100644 index 000000000000..86e801c150b9 --- /dev/null +++ b/vendor/github.com/spf13/viper/go.mod @@ -0,0 +1,24 @@ +module github.com/spf13/viper + +require ( + github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6 // indirect + github.com/coreos/etcd v3.3.10+incompatible // indirect + github.com/coreos/go-etcd v2.0.0+incompatible // indirect + github.com/coreos/go-semver v0.2.0 // indirect + github.com/fsnotify/fsnotify v1.4.7 + github.com/hashicorp/hcl v1.0.0 + github.com/magiconair/properties v1.8.0 + github.com/mitchellh/mapstructure v1.1.2 + github.com/pelletier/go-toml v1.2.0 + github.com/spf13/afero v1.1.2 + github.com/spf13/cast v1.3.0 + github.com/spf13/jwalterweatherman v1.0.0 + github.com/spf13/pflag v1.0.3 + github.com/stretchr/testify v1.2.2 + github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8 // indirect + github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77 + golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9 // indirect + golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a // indirect + golang.org/x/text v0.3.0 // indirect + gopkg.in/yaml.v2 v2.2.2 +) diff --git a/vendor/github.com/spf13/viper/go.sum b/vendor/github.com/spf13/viper/go.sum new file mode 100644 index 000000000000..5c9fb7d54471 --- /dev/null +++ b/vendor/github.com/spf13/viper/go.sum @@ -0,0 +1,35 @@ +github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= +github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= +github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= +github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/hashicorp/hcl v1.0.0 h1:0Anlzjpi4vEasTeNFn2mLJgTSwt0+6sfsiTG8qcWGx4= +github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ= +github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY= +github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= +github.com/mitchellh/mapstructure v1.1.2 h1:fmNYVwqnSfB9mZU6OS2O6GsXM+wcskZDuKQzvN1EDeE= +github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= +github.com/pelletier/go-toml v1.2.0 h1:T5zMGML61Wp+FlcbWjRDT7yAxhJNAiPPLOFECq181zc= +github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/spf13/afero v1.1.2 h1:m8/z1t7/fwjysjQRYbP0RD+bUIF/8tJwPdEZsI83ACI= +github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B0CQ= +github.com/spf13/cast v1.3.0 h1:oget//CVOEoFewqQxwr0Ej5yjygnqGkvggSE/gB35Q8= +github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/jwalterweatherman v1.0.0 h1:XHEdyB+EcvlqZamSM4ZOMGlc93t6AcsBEu9Gc1vn7yk= +github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= +github.com/spf13/pflag v1.0.3 h1:zPAT6CGy6wXeQ7NtTnaTerfKOsV6V6F8agHXFiazDkg= +github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= +github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= +golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a h1:1n5lsVfiQW3yfsRGu98756EH1YthsFqr/5mxHduZW2A= +golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/vendor/github.com/spf13/viper/nohup.out b/vendor/github.com/spf13/viper/nohup.out deleted file mode 100644 index 8973bf27b59d..000000000000 --- a/vendor/github.com/spf13/viper/nohup.out +++ /dev/null @@ -1 +0,0 @@ -QProcess::start: Process is already running diff --git a/vendor/github.com/spf13/viper/overrides_test.go b/vendor/github.com/spf13/viper/overrides_test.go new file mode 100644 index 000000000000..dd2aa9b0dbdb --- /dev/null +++ b/vendor/github.com/spf13/viper/overrides_test.go @@ -0,0 +1,173 @@ +package viper + +import ( + "fmt" + "strings" + "testing" + + "github.com/spf13/cast" + "github.com/stretchr/testify/assert" +) + +type layer int + +const ( + defaultLayer layer = iota + 1 + overrideLayer +) + +func TestNestedOverrides(t *testing.T) { + assert := assert.New(t) + var v *Viper + + // Case 0: value overridden by a value + overrideDefault(assert, "tom", 10, "tom", 20) // "tom" is first given 10 as default value, then overridden by 20 + override(assert, "tom", 10, "tom", 20) // "tom" is first given value 10, then overridden by 20 + overrideDefault(assert, "tom.age", 10, "tom.age", 20) + override(assert, "tom.age", 10, "tom.age", 20) + overrideDefault(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) + override(assert, "sawyer.tom.age", 10, "sawyer.tom.age", 20) + + // Case 1: key:value overridden by a value + v = overrideDefault(assert, "tom.age", 10, "tom", "boy") // "tom.age" is first given 10 as default value, then "tom" is overridden by "boy" + assert.Nil(v.Get("tom.age")) // "tom.age" should not exist anymore + v = override(assert, "tom.age", 10, "tom", "boy") + assert.Nil(v.Get("tom.age")) + + // Case 2: value overridden by a key:value + overrideDefault(assert, "tom", "boy", "tom.age", 10) // "tom" is first given "boy" as default value, then "tom" is overridden by map{"age":10} + override(assert, "tom.age", 10, "tom", "boy") + + // Case 3: key:value overridden by a key:value + v = overrideDefault(assert, "tom.size", 4, "tom.age", 10) + assert.Equal(4, v.Get("tom.size")) // value should still be reachable + v = override(assert, "tom.size", 4, "tom.age", 10) + assert.Equal(4, v.Get("tom.size")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "size"}, 4) + + // Case 4: key:value overridden by a map + v = overrideDefault(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) // "tom.size" is first given "4" as default value, then "tom" is overridden by map{"age":10} + assert.Equal(4, v.Get("tom.size")) // "tom.size" should still be reachable + assert.Equal(10, v.Get("tom.age")) // new value should be there + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) // new value should be there + v = override(assert, "tom.size", 4, "tom", map[string]interface{}{"age": 10}) + assert.Nil(v.Get("tom.size")) + assert.Equal(10, v.Get("tom.age")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, 10) + + // Case 5: array overridden by a value + overrideDefault(assert, "tom", []int{10, 20}, "tom", 30) + override(assert, "tom", []int{10, 20}, "tom", 30) + overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", 30) + override(assert, "tom.age", []int{10, 20}, "tom.age", 30) + + // Case 6: array overridden by an array + overrideDefault(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) + override(assert, "tom", []int{10, 20}, "tom", []int{30, 40}) + overrideDefault(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) + v = override(assert, "tom.age", []int{10, 20}, "tom.age", []int{30, 40}) + // explicit array merge: + s, ok := v.Get("tom.age").([]int) + if assert.True(ok, "tom[\"age\"] is not a slice") { + v.Set("tom.age", append(s, []int{50, 60}...)) + assert.Equal([]int{30, 40, 50, 60}, v.Get("tom.age")) + deepCheckValue(assert, v, overrideLayer, []string{"tom", "age"}, []int{30, 40, 50, 60}) + } +} + +func overrideDefault(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + return overrideFromLayer(defaultLayer, assert, firstPath, firstValue, secondPath, secondValue) +} +func override(assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + return overrideFromLayer(overrideLayer, assert, firstPath, firstValue, secondPath, secondValue) +} + +// overrideFromLayer performs the sequential override and low-level checks. +// +// First assignment is made on layer l for path firstPath with value firstValue, +// the second one on the override layer (i.e., with the Set() function) +// for path secondPath with value secondValue. +// +// firstPath and secondPath can include an arbitrary number of dots to indicate +// a nested element. +// +// After each assignment, the value is checked, retrieved both by its full path +// and by its key sequence (successive maps). +func overrideFromLayer(l layer, assert *assert.Assertions, firstPath string, firstValue interface{}, secondPath string, secondValue interface{}) *Viper { + v := New() + firstKeys := strings.Split(firstPath, v.keyDelim) + if assert == nil || + len(firstKeys) == 0 || len(firstKeys[0]) == 0 { + return v + } + + // Set and check first value + switch l { + case defaultLayer: + v.SetDefault(firstPath, firstValue) + case overrideLayer: + v.Set(firstPath, firstValue) + default: + return v + } + assert.Equal(firstValue, v.Get(firstPath)) + deepCheckValue(assert, v, l, firstKeys, firstValue) + + // Override and check new value + secondKeys := strings.Split(secondPath, v.keyDelim) + if len(secondKeys) == 0 || len(secondKeys[0]) == 0 { + return v + } + v.Set(secondPath, secondValue) + assert.Equal(secondValue, v.Get(secondPath)) + deepCheckValue(assert, v, overrideLayer, secondKeys, secondValue) + + return v +} + +// deepCheckValue checks that all given keys correspond to a valid path in the +// configuration map of the given layer, and that the final value equals the one given +func deepCheckValue(assert *assert.Assertions, v *Viper, l layer, keys []string, value interface{}) { + if assert == nil || v == nil || + len(keys) == 0 || len(keys[0]) == 0 { + return + } + + // init + var val interface{} + var ms string + switch l { + case defaultLayer: + val = v.defaults + ms = "v.defaults" + case overrideLayer: + val = v.override + ms = "v.override" + } + + // loop through map + var m map[string]interface{} + err := false + for _, k := range keys { + if val == nil { + assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) + return + } + + // deep scan of the map to get the final value + switch val.(type) { + case map[interface{}]interface{}: + m = cast.ToStringMap(val) + case map[string]interface{}: + m = val.(map[string]interface{}) + default: + assert.Fail(fmt.Sprintf("%s is not a map[string]interface{}", ms)) + return + } + ms = ms + "[\"" + k + "\"]" + val = m[k] + } + if !err { + assert.Equal(value, val) + } +} diff --git a/vendor/github.com/spf13/viper/remote/remote.go b/vendor/github.com/spf13/viper/remote/remote.go index faaf3b36610c..810d0702e71e 100644 --- a/vendor/github.com/spf13/viper/remote/remote.go +++ b/vendor/github.com/spf13/viper/remote/remote.go @@ -8,10 +8,11 @@ package remote import ( "bytes" - "github.com/spf13/viper" - crypt "github.com/xordataexchange/crypt/config" "io" "os" + + "github.com/spf13/viper" + crypt "github.com/xordataexchange/crypt/config" ) type remoteConfigProvider struct{} @@ -33,17 +34,45 @@ func (rc remoteConfigProvider) Watch(rp viper.RemoteProvider) (io.Reader, error) if err != nil { return nil, err } - resp := <-cm.Watch(rp.Path(), nil) - err = resp.Error + resp, err := cm.Get(rp.Path()) if err != nil { return nil, err } - return bytes.NewReader(resp.Value), nil + return bytes.NewReader(resp), nil } -func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) { +func (rc remoteConfigProvider) WatchChannel(rp viper.RemoteProvider) (<-chan *viper.RemoteResponse, chan bool) { + cm, err := getConfigManager(rp) + if err != nil { + return nil, nil + } + quit := make(chan bool) + quitwc := make(chan bool) + viperResponsCh := make(chan *viper.RemoteResponse) + cryptoResponseCh := cm.Watch(rp.Path(), quit) + // need this function to convert the Channel response form crypt.Response to viper.Response + go func(cr <-chan *crypt.Response, vr chan<- *viper.RemoteResponse, quitwc <-chan bool, quit chan<- bool) { + for { + select { + case <-quitwc: + quit <- true + return + case resp := <-cr: + vr <- &viper.RemoteResponse{ + Error: resp.Error, + Value: resp.Value, + } + + } + + } + }(cryptoResponseCh, viperResponsCh, quitwc, quit) + return viperResponsCh, quitwc +} + +func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) { var cm crypt.ConfigManager var err error @@ -69,7 +98,6 @@ func getConfigManager(rp viper.RemoteProvider) (crypt.ConfigManager, error) { return nil, err } return cm, nil - } func init() { diff --git a/vendor/github.com/spf13/viper/util.go b/vendor/github.com/spf13/viper/util.go index fe6cb4594631..952cad44c631 100644 --- a/vendor/github.com/spf13/viper/util.go +++ b/vendor/github.com/spf13/viper/util.go @@ -11,41 +11,80 @@ package viper import ( - "bytes" - "encoding/json" "fmt" - "io" "os" "path/filepath" "runtime" "strings" "unicode" - "github.com/hashicorp/hcl" - "github.com/magiconair/properties" - toml "github.com/pelletier/go-toml" + "github.com/spf13/afero" "github.com/spf13/cast" jww "github.com/spf13/jwalterweatherman" - "gopkg.in/yaml.v2" ) -// Denotes failing to parse configuration file. +// ConfigParseError denotes failing to parse configuration file. type ConfigParseError struct { err error } -// Returns the formatted configuration error. +// Error returns the formatted configuration error. func (pe ConfigParseError) Error() string { return fmt.Sprintf("While parsing config: %s", pe.err.Error()) } +// toCaseInsensitiveValue checks if the value is a map; +// if so, create a copy and lower-case the keys recursively. +func toCaseInsensitiveValue(value interface{}) interface{} { + switch v := value.(type) { + case map[interface{}]interface{}: + value = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + value = copyAndInsensitiviseMap(v) + } + + return value +} + +// copyAndInsensitiviseMap behaves like insensitiviseMap, but creates a copy of +// any map it makes case insensitive. +func copyAndInsensitiviseMap(m map[string]interface{}) map[string]interface{} { + nm := make(map[string]interface{}) + + for key, val := range m { + lkey := strings.ToLower(key) + switch v := val.(type) { + case map[interface{}]interface{}: + nm[lkey] = copyAndInsensitiviseMap(cast.ToStringMap(v)) + case map[string]interface{}: + nm[lkey] = copyAndInsensitiviseMap(v) + default: + nm[lkey] = v + } + } + + return nm +} + func insensitiviseMap(m map[string]interface{}) { for key, val := range m { + switch val.(type) { + case map[interface{}]interface{}: + // nested map: cast and recursively insensitivise + val = cast.ToStringMap(val) + insensitiviseMap(val.(map[string]interface{})) + case map[string]interface{}: + // nested map: recursively insensitivise + insensitiviseMap(val.(map[string]interface{})) + } + lower := strings.ToLower(key) if key != lower { + // remove old key (not lower-cased) delete(m, key) - m[lower] = val } + // update map + m[lower] = val } } @@ -68,16 +107,16 @@ func absPathify(inPath string) string { p, err := filepath.Abs(inPath) if err == nil { return filepath.Clean(p) - } else { - jww.ERROR.Println("Couldn't discover absolute path") - jww.ERROR.Println(err) } + + jww.ERROR.Println("Couldn't discover absolute path") + jww.ERROR.Println(err) return "" } // Check if File / Directory Exists -func exists(path string) (bool, error) { - _, err := v.fs.Stat(path) +func exists(fs afero.Fs, path string) (bool, error) { + _, err := fs.Stat(path) if err == nil { return true, nil } @@ -107,79 +146,6 @@ func userHomeDir() string { return os.Getenv("HOME") } -func findCWD() (string, error) { - serverFile, err := filepath.Abs(os.Args[0]) - - if err != nil { - return "", fmt.Errorf("Can't get absolute path for executable: %v", err) - } - - path := filepath.Dir(serverFile) - realFile, err := filepath.EvalSymlinks(serverFile) - - if err != nil { - if _, err = os.Stat(serverFile + ".exe"); err == nil { - realFile = filepath.Clean(serverFile + ".exe") - } - } - - if err == nil && realFile != serverFile { - path = filepath.Dir(realFile) - } - - return path, nil -} - -func unmarshallConfigReader(in io.Reader, c map[string]interface{}, configType string) error { - buf := new(bytes.Buffer) - buf.ReadFrom(in) - - switch strings.ToLower(configType) { - case "yaml", "yml": - if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "json": - if err := json.Unmarshal(buf.Bytes(), &c); err != nil { - return ConfigParseError{err} - } - - case "hcl": - obj, err := hcl.Parse(string(buf.Bytes())) - if err != nil { - return ConfigParseError{err} - } - if err = hcl.DecodeObject(&c, obj); err != nil { - return ConfigParseError{err} - } - - case "toml": - tree, err := toml.LoadReader(buf) - if err != nil { - return ConfigParseError{err} - } - tmap := tree.ToMap() - for k, v := range tmap { - c[k] = v - } - - case "properties", "props", "prop": - var p *properties.Properties - var err error - if p, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { - return ConfigParseError{err} - } - for _, key := range p.Keys() { - value, _ := p.Get(key) - c[key] = value - } - } - - insensitiviseMap(c) - return nil -} - func safeMul(a, b uint) uint { c := a * b if a > 1 && b > 1 && c/b != a { @@ -222,3 +188,34 @@ func parseSizeInBytes(sizeStr string) uint { return safeMul(uint(size), multiplier) } + +// deepSearch scans deep maps, following the key indexes listed in the +// sequence "path". +// The last value is expected to be another map, and is returned. +// +// In case intermediate keys do not exist, or map to a non-map value, +// a new map is created and inserted, and the search continues from there: +// the initial map "m" may be modified! +func deepSearch(m map[string]interface{}, path []string) map[string]interface{} { + for _, k := range path { + m2, ok := m[k] + if !ok { + // intermediate key does not exist + // => create it and continue from there + m3 := make(map[string]interface{}) + m[k] = m3 + m = m3 + continue + } + m3, ok := m2.(map[string]interface{}) + if !ok { + // intermediate key is a value + // => replace with a new map + m3 = make(map[string]interface{}) + m[k] = m3 + } + // continue search from here + m = m3 + } + return m +} diff --git a/vendor/github.com/spf13/viper/util_test.go b/vendor/github.com/spf13/viper/util_test.go new file mode 100644 index 000000000000..0af80bb635b7 --- /dev/null +++ b/vendor/github.com/spf13/viper/util_test.go @@ -0,0 +1,54 @@ +// Copyright © 2016 Steve Francia . +// +// Use of this source code is governed by an MIT-style +// license that can be found in the LICENSE file. + +// Viper is a application configuration system. +// It believes that applications can be configured a variety of ways +// via flags, ENVIRONMENT variables, configuration files retrieved +// from the file system, or a remote key/value store. + +package viper + +import ( + "reflect" + "testing" +) + +func TestCopyAndInsensitiviseMap(t *testing.T) { + var ( + given = map[string]interface{}{ + "Foo": 32, + "Bar": map[interface{}]interface { + }{ + "ABc": "A", + "cDE": "B"}, + } + expected = map[string]interface{}{ + "foo": 32, + "bar": map[string]interface { + }{ + "abc": "A", + "cde": "B"}, + } + ) + + got := copyAndInsensitiviseMap(given) + + if !reflect.DeepEqual(got, expected) { + t.Fatalf("Got %q\nexpected\n%q", got, expected) + } + + if _, ok := given["foo"]; ok { + t.Fatal("Input map changed") + } + + if _, ok := given["bar"]; ok { + t.Fatal("Input map changed") + } + + m := given["Bar"].(map[interface{}]interface{}) + if _, ok := m["ABc"]; !ok { + t.Fatal("Input map changed") + } +} diff --git a/vendor/github.com/spf13/viper/viper.go b/vendor/github.com/spf13/viper/viper.go index f17790e7607f..7173c6e9a20c 100644 --- a/vendor/github.com/spf13/viper/viper.go +++ b/vendor/github.com/spf13/viper/viper.go @@ -21,6 +21,8 @@ package viper import ( "bytes" + "encoding/csv" + "encoding/json" "fmt" "io" "log" @@ -28,18 +30,40 @@ import ( "path/filepath" "reflect" "strings" + "sync" "time" + yaml "gopkg.in/yaml.v2" + "github.com/fsnotify/fsnotify" + "github.com/hashicorp/hcl" + "github.com/hashicorp/hcl/hcl/printer" + "github.com/magiconair/properties" "github.com/mitchellh/mapstructure" + toml "github.com/pelletier/go-toml" "github.com/spf13/afero" "github.com/spf13/cast" jww "github.com/spf13/jwalterweatherman" "github.com/spf13/pflag" ) +// ConfigMarshalError happens when failing to marshal the configuration. +type ConfigMarshalError struct { + err error +} + +// Error returns the formatted configuration error. +func (e ConfigMarshalError) Error() string { + return fmt.Sprintf("While marshaling config: %s", e.err.Error()) +} + var v *Viper +type RemoteResponse struct { + Value []byte + Error error +} + func init() { v = New() } @@ -47,49 +71,66 @@ func init() { type remoteConfigFactory interface { Get(rp RemoteProvider) (io.Reader, error) Watch(rp RemoteProvider) (io.Reader, error) + WatchChannel(rp RemoteProvider) (<-chan *RemoteResponse, chan bool) } // RemoteConfig is optional, see the remote package var RemoteConfig remoteConfigFactory -// Denotes encountering an unsupported +// UnsupportedConfigError denotes encountering an unsupported // configuration filetype. type UnsupportedConfigError string -// Returns the formatted configuration error. +// Error returns the formatted configuration error. func (str UnsupportedConfigError) Error() string { return fmt.Sprintf("Unsupported Config Type %q", string(str)) } -// Denotes encountering an unsupported remote -// provider. Currently only etcd and Consul are -// supported. +// UnsupportedRemoteProviderError denotes encountering an unsupported remote +// provider. Currently only etcd and Consul are supported. type UnsupportedRemoteProviderError string -// Returns the formatted remote provider error. +// Error returns the formatted remote provider error. func (str UnsupportedRemoteProviderError) Error() string { return fmt.Sprintf("Unsupported Remote Provider Type %q", string(str)) } -// Denotes encountering an error while trying to +// RemoteConfigError denotes encountering an error while trying to // pull the configuration from the remote provider. type RemoteConfigError string -// Returns the formatted remote provider error +// Error returns the formatted remote provider error func (rce RemoteConfigError) Error() string { return fmt.Sprintf("Remote Configurations Error: %s", string(rce)) } -// Denotes failing to find configuration file. +// ConfigFileNotFoundError denotes failing to find configuration file. type ConfigFileNotFoundError struct { name, locations string } -// Returns the formatted configuration error. +// Error returns the formatted configuration error. func (fnfe ConfigFileNotFoundError) Error() string { return fmt.Sprintf("Config File %q Not Found in %q", fnfe.name, fnfe.locations) } +// A DecoderConfigOption can be passed to viper.Unmarshal to configure +// mapstructure.DecoderConfig options +type DecoderConfigOption func(*mapstructure.DecoderConfig) + +// DecodeHook returns a DecoderConfigOption which overrides the default +// DecoderConfig.DecodeHook value, the default is: +// +// mapstructure.ComposeDecodeHookFunc( +// mapstructure.StringToTimeDurationHookFunc(), +// mapstructure.StringToSliceHookFunc(","), +// ) +func DecodeHook(hook mapstructure.DecodeHookFunc) DecoderConfigOption { + return func(c *mapstructure.DecoderConfig) { + c.DecodeHook = hook + } +} + // Viper is a prioritized configuration registry. It // maintains a set of configuration sources, fetches // values to populate those, and provides them according @@ -107,11 +148,11 @@ func (fnfe ConfigFileNotFoundError) Error() string { // Defaults : { // "secret": "", // "user": "default", -// "endpoint": "https://localhost" +// "endpoint": "https://localhost" // } // Config : { // "user": "root" -// "secret": "defaultsecret" +// "secret": "defaultsecret" // } // Env : { // "secret": "somesecretkey" @@ -146,6 +187,7 @@ type Viper struct { automaticEnvApplied bool envKeyReplacer *strings.Replacer + allowEmptyEnv bool config map[string]interface{} override map[string]interface{} @@ -156,10 +198,14 @@ type Viper struct { aliases map[string]string typeByDefValue bool + // Store read properties on the object so that we can write back in order with comments. + // This will only be used if the configuration read is a properties file. + properties *properties.Properties + onConfigChange func(fsnotify.Event) } -// Returns an initialized Viper instance. +// New returns an initialized Viper instance. func New() *Viper { v := new(Viper) v.keyDelim = "." @@ -182,7 +228,7 @@ func New() *Viper { // can use it in their testing as well. func Reset() { v = New() - SupportedExts = []string{"json", "toml", "yaml", "yml", "hcl"} + SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} SupportedRemoteProviders = []string{"etcd", "consul"} } @@ -220,11 +266,11 @@ type RemoteProvider interface { SecretKeyring() string } -// Universally supported extensions. -var SupportedExts []string = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} +// SupportedExts are universally supported extensions. +var SupportedExts = []string{"json", "toml", "yaml", "yml", "properties", "props", "prop", "hcl"} -// Universally supported remote providers. -var SupportedRemoteProviders []string = []string{"etcd", "consul"} +// SupportedRemoteProviders are universally supported remote providers. +var SupportedRemoteProviders = []string{"etcd", "consul"} func OnConfigChange(run func(in fsnotify.Event)) { v.OnConfigChange(run) } func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { @@ -232,46 +278,77 @@ func (v *Viper) OnConfigChange(run func(in fsnotify.Event)) { } func WatchConfig() { v.WatchConfig() } + func (v *Viper) WatchConfig() { + initWG := sync.WaitGroup{} + initWG.Add(1) go func() { watcher, err := fsnotify.NewWatcher() if err != nil { log.Fatal(err) } defer watcher.Close() - // we have to watch the entire directory to pick up renames/atomic saves in a cross-platform way - configFile := filepath.Clean(v.getConfigFile()) + filename, err := v.getConfigFile() + if err != nil { + log.Printf("error: %v\n", err) + return + } + + configFile := filepath.Clean(filename) configDir, _ := filepath.Split(configFile) + realConfigFile, _ := filepath.EvalSymlinks(filename) - done := make(chan bool) + eventsWG := sync.WaitGroup{} + eventsWG.Add(1) go func() { for { select { - case event := <-watcher.Events: - // we only care about the config file - if filepath.Clean(event.Name) == configFile { - if event.Op&fsnotify.Write == fsnotify.Write || event.Op&fsnotify.Create == fsnotify.Create { - err := v.ReadInConfig() - if err != nil { - log.Println("error:", err) - } + case event, ok := <-watcher.Events: + if !ok { // 'Events' channel is closed + eventsWG.Done() + return + } + currentConfigFile, _ := filepath.EvalSymlinks(filename) + // we only care about the config file with the following cases: + // 1 - if the config file was modified or created + // 2 - if the real path to the config file changed (eg: k8s ConfigMap replacement) + const writeOrCreateMask = fsnotify.Write | fsnotify.Create + if (filepath.Clean(event.Name) == configFile && + event.Op&writeOrCreateMask != 0) || + (currentConfigFile != "" && currentConfigFile != realConfigFile) { + realConfigFile = currentConfigFile + err := v.ReadInConfig() + if err != nil { + log.Printf("error reading config file: %v\n", err) + } + if v.onConfigChange != nil { v.onConfigChange(event) } + } else if filepath.Clean(event.Name) == configFile && + event.Op&fsnotify.Remove&fsnotify.Remove != 0 { + eventsWG.Done() + return + } + + case err, ok := <-watcher.Errors: + if ok { // 'Errors' channel is not closed + log.Printf("watcher error: %v\n", err) } - case err := <-watcher.Errors: - log.Println("error:", err) + eventsWG.Done() + return } } }() - watcher.Add(configDir) - <-done + initWG.Done() // done initalizing the watch in this go routine, so the parent routine can move on... + eventsWG.Wait() // now, wait for event loop to end in this go-routine... }() + initWG.Wait() // make sure that the go routine above fully ended before returning } -// Explicitly define the path, name and extension of the config file -// Viper will use this and not check any of the config paths +// SetConfigFile explicitly defines the path, name and extension of the config file. +// Viper will use this and not check any of the config paths. func SetConfigFile(in string) { v.SetConfigFile(in) } func (v *Viper) SetConfigFile(in string) { if in != "" { @@ -279,9 +356,9 @@ func (v *Viper) SetConfigFile(in string) { } } -// Define a prefix that ENVIRONMENT variables will use. -// E.g. if your prefix is "spf", the env registry -// will look for env. variables that start with "SPF_" +// SetEnvPrefix defines a prefix that ENVIRONMENT variables will use. +// E.g. if your prefix is "spf", the env registry will look for env +// variables that start with "SPF_". func SetEnvPrefix(in string) { v.SetEnvPrefix(in) } func (v *Viper) SetEnvPrefix(in string) { if in != "" { @@ -297,25 +374,36 @@ func (v *Viper) mergeWithEnvPrefix(in string) string { return strings.ToUpper(in) } +// AllowEmptyEnv tells Viper to consider set, +// but empty environment variables as valid values instead of falling back. +// For backward compatibility reasons this is false by default. +func AllowEmptyEnv(allowEmptyEnv bool) { v.AllowEmptyEnv(allowEmptyEnv) } +func (v *Viper) AllowEmptyEnv(allowEmptyEnv bool) { + v.allowEmptyEnv = allowEmptyEnv +} + // TODO: should getEnv logic be moved into find(). Can generalize the use of // rewriting keys many things, Ex: Get('someKey') -> some_key -// (cammel case to snake case for JSON keys perhaps) +// (camel case to snake case for JSON keys perhaps) -// getEnv s a wrapper around os.Getenv which replaces characters in the original -// key. This allows env vars which have different keys then the config object -// keys -func (v *Viper) getEnv(key string) string { +// getEnv is a wrapper around os.Getenv which replaces characters in the original +// key. This allows env vars which have different keys than the config object +// keys. +func (v *Viper) getEnv(key string) (string, bool) { if v.envKeyReplacer != nil { key = v.envKeyReplacer.Replace(key) } - return os.Getenv(key) + + val, ok := os.LookupEnv(key) + + return val, ok && (v.allowEmptyEnv || val != "") } -// Return the file used to populate the config registry +// ConfigFileUsed returns the file used to populate the config registry. func ConfigFileUsed() string { return v.ConfigFileUsed() } func (v *Viper) ConfigFileUsed() string { return v.configFile } -// Add a path for Viper to search for the config file in. +// AddConfigPath adds a path for Viper to search for the config file in. // Can be called multiple times to define multiple search paths. func AddConfigPath(in string) { v.AddConfigPath(in) } func (v *Viper) AddConfigPath(in string) { @@ -399,23 +487,22 @@ func (v *Viper) providerPathExists(p *defaultRemoteProvider) bool { return false } +// searchMap recursively searches for a value for path in source map. +// Returns nil if not found. +// Note: This assumes that the path entries and map keys are lower cased. func (v *Viper) searchMap(source map[string]interface{}, path []string) interface{} { - if len(path) == 0 { return source } - var ok bool - var next interface{} - for k, v := range source { - if strings.ToLower(k) == strings.ToLower(path[0]) { - ok = true - next = v - break + next, ok := source[path[0]] + if ok { + // Fast path + if len(path) == 1 { + return next } - } - if ok { + // Nested case switch next.(type) { case map[interface{}]interface{}: return v.searchMap(cast.ToStringMap(next), path[1:]) @@ -424,11 +511,125 @@ func (v *Viper) searchMap(source map[string]interface{}, path []string) interfac // if the type of `next` is the same as the type being asserted return v.searchMap(next.(map[string]interface{}), path[1:]) default: - return next + // got a value but nested key expected, return "nil" for not found + return nil + } + } + return nil +} + +// searchMapWithPathPrefixes recursively searches for a value for path in source map. +// +// While searchMap() considers each path element as a single map key, this +// function searches for, and prioritizes, merged path elements. +// e.g., if in the source, "foo" is defined with a sub-key "bar", and "foo.bar" +// is also defined, this latter value is returned for path ["foo", "bar"]. +// +// This should be useful only at config level (other maps may not contain dots +// in their keys). +// +// Note: This assumes that the path entries and map keys are lower cased. +func (v *Viper) searchMapWithPathPrefixes(source map[string]interface{}, path []string) interface{} { + if len(path) == 0 { + return source + } + + // search for path prefixes, starting from the longest one + for i := len(path); i > 0; i-- { + prefixKey := strings.ToLower(strings.Join(path[0:i], v.keyDelim)) + + next, ok := source[prefixKey] + if ok { + // Fast path + if i == len(path) { + return next + } + + // Nested case + var val interface{} + switch next.(type) { + case map[interface{}]interface{}: + val = v.searchMapWithPathPrefixes(cast.ToStringMap(next), path[i:]) + case map[string]interface{}: + // Type assertion is safe here since it is only reached + // if the type of `next` is the same as the type being asserted + val = v.searchMapWithPathPrefixes(next.(map[string]interface{}), path[i:]) + default: + // got a value but nested key expected, do nothing and look for next prefix + } + if val != nil { + return val + } + } + } + + // not found + return nil +} + +// isPathShadowedInDeepMap makes sure the given path is not shadowed somewhere +// on its path in the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInDeepMap(path []string, m map[string]interface{}) string { + var parentVal interface{} + for i := 1; i < len(path); i++ { + parentVal = v.searchMap(m, path[0:i]) + if parentVal == nil { + // not found, no need to add more path elements + return "" + } + switch parentVal.(type) { + case map[interface{}]interface{}: + continue + case map[string]interface{}: + continue + default: + // parentVal is a regular value which shadows "path" + return strings.Join(path[0:i], v.keyDelim) + } + } + return "" +} + +// isPathShadowedInFlatMap makes sure the given path is not shadowed somewhere +// in a sub-path of the map. +// e.g., if "foo.bar" has a value in the given map, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInFlatMap(path []string, mi interface{}) string { + // unify input map + var m map[string]interface{} + switch mi.(type) { + case map[string]string, map[string]FlagValue: + m = cast.ToStringMap(mi) + default: + return "" + } + + // scan paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := m[parentKey]; ok { + return parentKey } - } else { - return nil } + return "" +} + +// isPathShadowedInAutoEnv makes sure the given path is not shadowed somewhere +// in the environment, when automatic env is on. +// e.g., if "foo.bar" has a value in the environment, it “shadows” +// "foo.bar.baz" in a lower-priority map +func (v *Viper) isPathShadowedInAutoEnv(path []string) string { + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if _, ok := v.getEnv(v.mergeWithEnvPrefix(parentKey)); ok { + return parentKey + } + } + return "" } // SetTypeByDefaultValue enables or disables the inference of a key value's @@ -455,8 +656,8 @@ func GetViper() *Viper { return v } -// Viper is essentially repository for configurations -// Get can retrieve any value given the key to use +// Get can retrieve any value given the key to use. +// Get is case-insensitive for a key. // Get has the behavior of returning the value associated with the first // place from where it is set. Viper will check in the following order: // override, flag, env, config file, key/value store, default @@ -464,151 +665,134 @@ func GetViper() *Viper { // Get returns an interface. For a specific value use one of the Get____ methods. func Get(key string) interface{} { return v.Get(key) } func (v *Viper) Get(key string) interface{} { - path := strings.Split(key, v.keyDelim) - lcaseKey := strings.ToLower(key) val := v.find(lcaseKey) - - if val == nil { - source := v.find(strings.ToLower(path[0])) - if source != nil { - if reflect.TypeOf(source).Kind() == reflect.Map { - val = v.searchMap(cast.ToStringMap(source), path[1:]) - } - } - } - - // if no other value is returned and a flag does exist for the value, - // get the flag's value even if the flag's value has not changed - if val == nil { - if flag, exists := v.pflags[lcaseKey]; exists { - jww.TRACE.Println(key, "get pflag default", val) - switch flag.ValueType() { - case "int", "int8", "int16", "int32", "int64": - val = cast.ToInt(flag.ValueString()) - case "bool": - val = cast.ToBool(flag.ValueString()) - default: - val = flag.ValueString() - } - } - } - if val == nil { return nil } - var valType interface{} - if !v.typeByDefValue { - valType = val - } else { - defVal, defExists := v.defaults[lcaseKey] - if defExists { + if v.typeByDefValue { + // TODO(bep) this branch isn't covered by a single test. + valType := val + path := strings.Split(lcaseKey, v.keyDelim) + defVal := v.searchMap(v.defaults, path) + if defVal != nil { valType = defVal - } else { - valType = val } - } - switch valType.(type) { - case bool: - return cast.ToBool(val) - case string: - return cast.ToString(val) - case int64, int32, int16, int8, int: - return cast.ToInt(val) - case float64, float32: - return cast.ToFloat64(val) - case time.Time: - return cast.ToTime(val) - case time.Duration: - return cast.ToDuration(val) - case []string: - return cast.ToStringSlice(val) + switch valType.(type) { + case bool: + return cast.ToBool(val) + case string: + return cast.ToString(val) + case int32, int16, int8, int: + return cast.ToInt(val) + case int64: + return cast.ToInt64(val) + case float64, float32: + return cast.ToFloat64(val) + case time.Time: + return cast.ToTime(val) + case time.Duration: + return cast.ToDuration(val) + case []string: + return cast.ToStringSlice(val) + } } + return val } -// Returns new Viper instance representing a sub tree of this instance +// Sub returns new Viper instance representing a sub tree of this instance. +// Sub is case-insensitive for a key. func Sub(key string) *Viper { return v.Sub(key) } func (v *Viper) Sub(key string) *Viper { subv := New() data := v.Get(key) + if data == nil { + return nil + } + if reflect.TypeOf(data).Kind() == reflect.Map { subv.config = cast.ToStringMap(data) return subv - } else { - return nil } + return nil } -// Returns the value associated with the key as a string +// GetString returns the value associated with the key as a string. func GetString(key string) string { return v.GetString(key) } func (v *Viper) GetString(key string) string { return cast.ToString(v.Get(key)) } -// Returns the value associated with the key as a boolean +// GetBool returns the value associated with the key as a boolean. func GetBool(key string) bool { return v.GetBool(key) } func (v *Viper) GetBool(key string) bool { return cast.ToBool(v.Get(key)) } -// Returns the value associated with the key as an integer +// GetInt returns the value associated with the key as an integer. func GetInt(key string) int { return v.GetInt(key) } func (v *Viper) GetInt(key string) int { return cast.ToInt(v.Get(key)) } -// Returns the value associated with the key as an integer +// GetInt32 returns the value associated with the key as an integer. +func GetInt32(key string) int32 { return v.GetInt32(key) } +func (v *Viper) GetInt32(key string) int32 { + return cast.ToInt32(v.Get(key)) +} + +// GetInt64 returns the value associated with the key as an integer. func GetInt64(key string) int64 { return v.GetInt64(key) } func (v *Viper) GetInt64(key string) int64 { return cast.ToInt64(v.Get(key)) } -// Returns the value associated with the key as a float64 +// GetFloat64 returns the value associated with the key as a float64. func GetFloat64(key string) float64 { return v.GetFloat64(key) } func (v *Viper) GetFloat64(key string) float64 { return cast.ToFloat64(v.Get(key)) } -// Returns the value associated with the key as time +// GetTime returns the value associated with the key as time. func GetTime(key string) time.Time { return v.GetTime(key) } func (v *Viper) GetTime(key string) time.Time { return cast.ToTime(v.Get(key)) } -// Returns the value associated with the key as a duration +// GetDuration returns the value associated with the key as a duration. func GetDuration(key string) time.Duration { return v.GetDuration(key) } func (v *Viper) GetDuration(key string) time.Duration { return cast.ToDuration(v.Get(key)) } -// Returns the value associated with the key as a slice of strings +// GetStringSlice returns the value associated with the key as a slice of strings. func GetStringSlice(key string) []string { return v.GetStringSlice(key) } func (v *Viper) GetStringSlice(key string) []string { return cast.ToStringSlice(v.Get(key)) } -// Returns the value associated with the key as a map of interfaces +// GetStringMap returns the value associated with the key as a map of interfaces. func GetStringMap(key string) map[string]interface{} { return v.GetStringMap(key) } func (v *Viper) GetStringMap(key string) map[string]interface{} { return cast.ToStringMap(v.Get(key)) } -// Returns the value associated with the key as a map of strings +// GetStringMapString returns the value associated with the key as a map of strings. func GetStringMapString(key string) map[string]string { return v.GetStringMapString(key) } func (v *Viper) GetStringMapString(key string) map[string]string { return cast.ToStringMapString(v.Get(key)) } -// Returns the value associated with the key as a map to a slice of strings. +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. func GetStringMapStringSlice(key string) map[string][]string { return v.GetStringMapStringSlice(key) } func (v *Viper) GetStringMapStringSlice(key string) map[string][]string { return cast.ToStringMapStringSlice(v.Get(key)) } -// Returns the size of the value associated with the given key +// GetSizeInBytes returns the size of the value associated with the given key // in bytes. func GetSizeInBytes(key string) uint { return v.GetSizeInBytes(key) } func (v *Viper) GetSizeInBytes(key string) uint { @@ -616,37 +800,55 @@ func (v *Viper) GetSizeInBytes(key string) uint { return parseSizeInBytes(sizeStr) } -// Takes a single key and unmarshals it into a Struct -func UnmarshalKey(key string, rawVal interface{}) error { return v.UnmarshalKey(key, rawVal) } -func (v *Viper) UnmarshalKey(key string, rawVal interface{}) error { - return mapstructure.Decode(v.Get(key), rawVal) +// UnmarshalKey takes a single key and unmarshals it into a Struct. +func UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { + return v.UnmarshalKey(key, rawVal, opts...) +} +func (v *Viper) UnmarshalKey(key string, rawVal interface{}, opts ...DecoderConfigOption) error { + err := decode(v.Get(key), defaultDecoderConfig(rawVal, opts...)) + + if err != nil { + return err + } + + return nil } -// Unmarshals the config into a Struct. Make sure that the tags +// Unmarshal unmarshals the config into a Struct. Make sure that the tags // on the fields of the structure are properly set. -func Unmarshal(rawVal interface{}) error { return v.Unmarshal(rawVal) } -func (v *Viper) Unmarshal(rawVal interface{}) error { - err := mapstructure.WeakDecode(v.AllSettings(), rawVal) +func Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { + return v.Unmarshal(rawVal, opts...) +} +func (v *Viper) Unmarshal(rawVal interface{}, opts ...DecoderConfigOption) error { + err := decode(v.AllSettings(), defaultDecoderConfig(rawVal, opts...)) if err != nil { return err } - v.insensitiviseMaps() - return nil } -// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality -// while erroring on non existing vals in the destination struct -func weakDecodeExact(input, output interface{}) error { - config := &mapstructure.DecoderConfig{ - ErrorUnused: true, +// defaultDecoderConfig returns default mapsstructure.DecoderConfig with suppot +// of time.Duration values & string slices +func defaultDecoderConfig(output interface{}, opts ...DecoderConfigOption) *mapstructure.DecoderConfig { + c := &mapstructure.DecoderConfig{ Metadata: nil, Result: output, WeaklyTypedInput: true, + DecodeHook: mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + ), + } + for _, opt := range opts { + opt(c) } + return c +} +// A wrapper around mapstructure.Decode that mimics the WeakDecode functionality +func decode(input interface{}, config *mapstructure.DecoderConfig) error { decoder, err := mapstructure.NewDecoder(config) if err != nil { return err @@ -654,41 +856,42 @@ func weakDecodeExact(input, output interface{}) error { return decoder.Decode(input) } -// Unmarshals the config into a Struct, erroring if a field is non-existant -// in the destination struct +// UnmarshalExact unmarshals the config into a Struct, erroring if a field is nonexistent +// in the destination struct. func (v *Viper) UnmarshalExact(rawVal interface{}) error { - err := weakDecodeExact(v.AllSettings(), rawVal) + config := defaultDecoderConfig(rawVal) + config.ErrorUnused = true + + err := decode(v.AllSettings(), config) if err != nil { return err } - v.insensitiviseMaps() - return nil } -// Bind a full flag set to the configuration, using each flag's long +// BindPFlags binds a full flag set to the configuration, using each flag's long // name as the config key. -func BindPFlags(flags *pflag.FlagSet) (err error) { return v.BindPFlags(flags) } -func (v *Viper) BindPFlags(flags *pflag.FlagSet) (err error) { +func BindPFlags(flags *pflag.FlagSet) error { return v.BindPFlags(flags) } +func (v *Viper) BindPFlags(flags *pflag.FlagSet) error { return v.BindFlagValues(pflagValueSet{flags}) } -// Bind a specific key to a pflag (as used by cobra). +// BindPFlag binds a specific key to a pflag (as used by cobra). // Example (where serverCmd is a Cobra instance): // // serverCmd.Flags().Int("port", 1138, "Port to run Application server on") // Viper.BindPFlag("port", serverCmd.Flags().Lookup("port")) // -func BindPFlag(key string, flag *pflag.Flag) (err error) { return v.BindPFlag(key, flag) } -func (v *Viper) BindPFlag(key string, flag *pflag.Flag) (err error) { +func BindPFlag(key string, flag *pflag.Flag) error { return v.BindPFlag(key, flag) } +func (v *Viper) BindPFlag(key string, flag *pflag.Flag) error { return v.BindFlagValue(key, pflagValue{flag}) } -// Bind a full FlagValue set to the configuration, using each flag's long +// BindFlagValues binds a full FlagValue set to the configuration, using each flag's long // name as the config key. -func BindFlagValues(flags FlagValueSet) (err error) { return v.BindFlagValues(flags) } +func BindFlagValues(flags FlagValueSet) error { return v.BindFlagValues(flags) } func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { flags.VisitAll(func(flag FlagValue) { if err = v.BindFlagValue(flag.Name(), flag); err != nil { @@ -698,14 +901,14 @@ func (v *Viper) BindFlagValues(flags FlagValueSet) (err error) { return nil } -// Bind a specific key to a FlagValue. -// Example(where serverCmd is a Cobra instance): +// BindFlagValue binds a specific key to a FlagValue. +// Example (where serverCmd is a Cobra instance): // // serverCmd.Flags().Int("port", 1138, "Port to run Application server on") // Viper.BindFlagValue("port", serverCmd.Flags().Lookup("port")) // -func BindFlagValue(key string, flag FlagValue) (err error) { return v.BindFlagValue(key, flag) } -func (v *Viper) BindFlagValue(key string, flag FlagValue) (err error) { +func BindFlagValue(key string, flag FlagValue) error { return v.BindFlagValue(key, flag) } +func (v *Viper) BindFlagValue(key string, flag FlagValue) error { if flag == nil { return fmt.Errorf("flag for %q is nil", key) } @@ -713,12 +916,12 @@ func (v *Viper) BindFlagValue(key string, flag FlagValue) (err error) { return nil } -// Binds a Viper key to a ENV variable -// ENV variables are case sensitive +// BindEnv binds a Viper key to a ENV variable. +// ENV variables are case sensitive. // If only a key is provided, it will use the env key matching the key, uppercased. // EnvPrefix will be used when set when env name is not provided. -func BindEnv(input ...string) (err error) { return v.BindEnv(input...) } -func (v *Viper) BindEnv(input ...string) (err error) { +func BindEnv(input ...string) error { return v.BindEnv(input...) } +func (v *Viper) BindEnv(input ...string) error { var key, envkey string if len(input) == 0 { return fmt.Errorf("BindEnv missing key to bind to") @@ -737,113 +940,149 @@ func (v *Viper) BindEnv(input ...string) (err error) { return nil } -// Given a key, find the value +// Given a key, find the value. // Viper will check in the following order: -// flag, env, config file, key/value store, default -// Viper will check to see if an alias exists first -func (v *Viper) find(key string) interface{} { - var val interface{} - var exists bool +// flag, env, config file, key/value store, default. +// Viper will check to see if an alias exists first. +// Note: this assumes a lower-cased key given. +func (v *Viper) find(lcaseKey string) interface{} { + + var ( + val interface{} + exists bool + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + ) + + // compute the path through the nested maps to the nested value + if nested && v.isPathShadowedInDeepMap(path, castMapStringToMapInterface(v.aliases)) != "" { + return nil + } // if the requested key is an alias, then return the proper key - key = v.realKey(key) + lcaseKey = v.realKey(lcaseKey) + path = strings.Split(lcaseKey, v.keyDelim) + nested = len(path) > 1 + + // Set() override first + val = v.searchMap(v.override, path) + if val != nil { + return val + } + if nested && v.isPathShadowedInDeepMap(path, v.override) != "" { + return nil + } - // PFlag Override first - flag, exists := v.pflags[key] + // PFlag override next + flag, exists := v.pflags[lcaseKey] if exists && flag.HasChanged() { - jww.TRACE.Println(key, "found in override (via pflag):", flag.ValueString()) switch flag.ValueType() { case "int", "int8", "int16", "int32", "int64": return cast.ToInt(flag.ValueString()) case "bool": return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res default: return flag.ValueString() } } - - val, exists = v.override[key] - if exists { - jww.TRACE.Println(key, "found in override:", val) - return val + if nested && v.isPathShadowedInFlatMap(path, v.pflags) != "" { + return nil } + // Env override next if v.automaticEnvApplied { // even if it hasn't been registered, if automaticEnv is used, // check any Get request - if val = v.getEnv(v.mergeWithEnvPrefix(key)); val != "" { - jww.TRACE.Println(key, "found in environment with val:", val) + if val, ok := v.getEnv(v.mergeWithEnvPrefix(lcaseKey)); ok { return val } + if nested && v.isPathShadowedInAutoEnv(path) != "" { + return nil + } } - - envkey, exists := v.env[key] + envkey, exists := v.env[lcaseKey] if exists { - jww.TRACE.Println(key, "registered as env var", envkey) - if val = v.getEnv(envkey); val != "" { - jww.TRACE.Println(envkey, "found in environment with val:", val) + if val, ok := v.getEnv(envkey); ok { return val - } else { - jww.TRACE.Println(envkey, "env value unset:") } } + if nested && v.isPathShadowedInFlatMap(path, v.env) != "" { + return nil + } - val, exists = v.config[key] - if exists { - jww.TRACE.Println(key, "found in config:", val) + // Config file next + val = v.searchMapWithPathPrefixes(v.config, path) + if val != nil { return val } - - // Test for nested config parameter - if strings.Contains(key, v.keyDelim) { - path := strings.Split(key, v.keyDelim) - - source := v.find(path[0]) - if source != nil { - if reflect.TypeOf(source).Kind() == reflect.Map { - val := v.searchMap(cast.ToStringMap(source), path[1:]) - jww.TRACE.Println(key, "found in nested config:", val) - return val - } - } + if nested && v.isPathShadowedInDeepMap(path, v.config) != "" { + return nil } - val, exists = v.kvstore[key] - if exists { - jww.TRACE.Println(key, "found in key/value store:", val) + // K/V store next + val = v.searchMap(v.kvstore, path) + if val != nil { return val } + if nested && v.isPathShadowedInDeepMap(path, v.kvstore) != "" { + return nil + } - val, exists = v.defaults[key] - if exists { - jww.TRACE.Println(key, "found in defaults:", val) + // Default next + val = v.searchMap(v.defaults, path) + if val != nil { return val } + if nested && v.isPathShadowedInDeepMap(path, v.defaults) != "" { + return nil + } + + // last chance: if no other value is returned and a flag does exist for the value, + // get the flag's value even if the flag's value has not changed + if flag, exists := v.pflags[lcaseKey]; exists { + switch flag.ValueType() { + case "int", "int8", "int16", "int32", "int64": + return cast.ToInt(flag.ValueString()) + case "bool": + return cast.ToBool(flag.ValueString()) + case "stringSlice": + s := strings.TrimPrefix(flag.ValueString(), "[") + s = strings.TrimSuffix(s, "]") + res, _ := readAsCSV(s) + return res + default: + return flag.ValueString() + } + } + // last item, no need to check shadowing return nil } -// Check to see if the key has been set in any of the data locations +func readAsCSV(val string) ([]string, error) { + if val == "" { + return []string{}, nil + } + stringReader := strings.NewReader(val) + csvReader := csv.NewReader(stringReader) + return csvReader.Read() +} + +// IsSet checks to see if the key has been set in any of the data locations. +// IsSet is case-insensitive for a key. func IsSet(key string) bool { return v.IsSet(key) } func (v *Viper) IsSet(key string) bool { - path := strings.Split(key, v.keyDelim) - lcaseKey := strings.ToLower(key) val := v.find(lcaseKey) - - if val == nil { - source := v.find(strings.ToLower(path[0])) - if source != nil { - if reflect.TypeOf(source).Kind() == reflect.Map { - val = v.searchMap(cast.ToStringMap(source), path[1:]) - } - } - } - return val != nil } -// Have Viper check ENV variables for all +// AutomaticEnv has Viper check ENV variables for all. // keys set in config, default & flags func AutomaticEnv() { v.AutomaticEnv() } func (v *Viper) AutomaticEnv() { @@ -902,12 +1141,11 @@ func (v *Viper) realKey(key string) string { if exists { jww.DEBUG.Println("Alias", key, "to", newkey) return v.realKey(newkey) - } else { - return key } + return key } -// Check to see if the given key (or an alias) is in the config file +// InConfig checks to see if the given key (or an alias) is in the config file. func InConfig(key string) bool { return v.InConfig(key) } func (v *Viper) InConfig(key string) bool { // if the requested key is an alias, then return the proper key @@ -917,53 +1155,86 @@ func (v *Viper) InConfig(key string) bool { return exists } -// Set the default value for this key. +// SetDefault sets the default value for this key. +// SetDefault is case-insensitive for a key. // Default only used when no value is provided by the user via flag, config or ENV. func SetDefault(key string, value interface{}) { v.SetDefault(key, value) } func (v *Viper) SetDefault(key string, value interface{}) { // If alias passed in, then set the proper default key = v.realKey(strings.ToLower(key)) - v.defaults[key] = value + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.defaults, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value } -// Sets the value for the key in the override regiser. +// Set sets the value for the key in the override register. +// Set is case-insensitive for a key. // Will be used instead of values obtained via -// flags, config file, ENV, default, or key/value store +// flags, config file, ENV, default, or key/value store. func Set(key string, value interface{}) { v.Set(key, value) } func (v *Viper) Set(key string, value interface{}) { // If alias passed in, then set the proper override key = v.realKey(strings.ToLower(key)) - v.override[key] = value + value = toCaseInsensitiveValue(value) + + path := strings.Split(key, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(v.override, path[0:len(path)-1]) + + // set innermost value + deepestMap[lastKey] = value } -// Viper will discover and load the configuration file from disk +// ReadInConfig will discover and load the configuration file from disk // and key/value stores, searching in one of the defined paths. func ReadInConfig() error { return v.ReadInConfig() } func (v *Viper) ReadInConfig() error { jww.INFO.Println("Attempting to read in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + if !stringInSlice(v.getConfigType(), SupportedExts) { return UnsupportedConfigError(v.getConfigType()) } - file, err := afero.ReadFile(v.fs, v.getConfigFile()) + jww.DEBUG.Println("Reading file: ", filename) + file, err := afero.ReadFile(v.fs, filename) if err != nil { return err } - v.config = make(map[string]interface{}) + config := make(map[string]interface{}) - return v.unmarshalReader(bytes.NewReader(file), v.config) + err = v.unmarshalReader(bytes.NewReader(file), config) + if err != nil { + return err + } + + v.config = config + return nil } // MergeInConfig merges a new configuration with an existing config. func MergeInConfig() error { return v.MergeInConfig() } func (v *Viper) MergeInConfig() error { jww.INFO.Println("Attempting to merge in config file") + filename, err := v.getConfigFile() + if err != nil { + return err + } + if !stringInSlice(v.getConfigType(), SupportedExts) { return UnsupportedConfigError(v.getConfigType()) } - file, err := afero.ReadFile(v.fs, v.getConfigFile()) + file, err := afero.ReadFile(v.fs, filename) if err != nil { return err } @@ -971,7 +1242,7 @@ func (v *Viper) MergeInConfig() error { return v.MergeConfig(bytes.NewReader(file)) } -// Viper will read a configuration file, setting existing keys to nil if the +// ReadConfig will read a configuration file, setting existing keys to nil if the // key does not exist in the file. func ReadConfig(in io.Reader) error { return v.ReadConfig(in) } func (v *Viper) ReadConfig(in io.Reader) error { @@ -982,17 +1253,214 @@ func (v *Viper) ReadConfig(in io.Reader) error { // MergeConfig merges a new configuration with an existing config. func MergeConfig(in io.Reader) error { return v.MergeConfig(in) } func (v *Viper) MergeConfig(in io.Reader) error { - if v.config == nil { - v.config = make(map[string]interface{}) - } cfg := make(map[string]interface{}) if err := v.unmarshalReader(in, cfg); err != nil { return err } + return v.MergeConfigMap(cfg) +} + +// MergeConfigMap merges the configuration from the map given with an existing config. +// Note that the map given may be modified. +func MergeConfigMap(cfg map[string]interface{}) error { return v.MergeConfigMap(cfg) } +func (v *Viper) MergeConfigMap(cfg map[string]interface{}) error { + if v.config == nil { + v.config = make(map[string]interface{}) + } + insensitiviseMap(cfg) mergeMaps(cfg, v.config, nil) return nil } +// WriteConfig writes the current configuration to a file. +func WriteConfig() error { return v.WriteConfig() } +func (v *Viper) WriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, true) +} + +// SafeWriteConfig writes current configuration to file only if the file does not exist. +func SafeWriteConfig() error { return v.SafeWriteConfig() } +func (v *Viper) SafeWriteConfig() error { + filename, err := v.getConfigFile() + if err != nil { + return err + } + return v.writeConfig(filename, false) +} + +// WriteConfigAs writes current configuration to a given filename. +func WriteConfigAs(filename string) error { return v.WriteConfigAs(filename) } +func (v *Viper) WriteConfigAs(filename string) error { + return v.writeConfig(filename, true) +} + +// SafeWriteConfigAs writes current configuration to a given filename if it does not exist. +func SafeWriteConfigAs(filename string) error { return v.SafeWriteConfigAs(filename) } +func (v *Viper) SafeWriteConfigAs(filename string) error { + return v.writeConfig(filename, false) +} + +func writeConfig(filename string, force bool) error { return v.writeConfig(filename, force) } +func (v *Viper) writeConfig(filename string, force bool) error { + jww.INFO.Println("Attempting to write configuration to file.") + ext := filepath.Ext(filename) + if len(ext) <= 1 { + return fmt.Errorf("Filename: %s requires valid extension.", filename) + } + configType := ext[1:] + if !stringInSlice(configType, SupportedExts) { + return UnsupportedConfigError(configType) + } + if v.config == nil { + v.config = make(map[string]interface{}) + } + var flags int + if force == true { + flags = os.O_CREATE | os.O_TRUNC | os.O_WRONLY + } else { + if _, err := os.Stat(filename); os.IsNotExist(err) { + flags = os.O_WRONLY + } else { + return fmt.Errorf("File: %s exists. Use WriteConfig to overwrite.", filename) + } + } + f, err := v.fs.OpenFile(filename, flags, os.FileMode(0644)) + if err != nil { + return err + } + return v.marshalWriter(f, configType) +} + +// Unmarshal a Reader into a map. +// Should probably be an unexported function. +func unmarshalReader(in io.Reader, c map[string]interface{}) error { + return v.unmarshalReader(in, c) +} +func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { + buf := new(bytes.Buffer) + buf.ReadFrom(in) + + switch strings.ToLower(v.getConfigType()) { + case "yaml", "yml": + if err := yaml.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "json": + if err := json.Unmarshal(buf.Bytes(), &c); err != nil { + return ConfigParseError{err} + } + + case "hcl": + obj, err := hcl.Parse(string(buf.Bytes())) + if err != nil { + return ConfigParseError{err} + } + if err = hcl.DecodeObject(&c, obj); err != nil { + return ConfigParseError{err} + } + + case "toml": + tree, err := toml.LoadReader(buf) + if err != nil { + return ConfigParseError{err} + } + tmap := tree.ToMap() + for k, v := range tmap { + c[k] = v + } + + case "properties", "props", "prop": + v.properties = properties.NewProperties() + var err error + if v.properties, err = properties.Load(buf.Bytes(), properties.UTF8); err != nil { + return ConfigParseError{err} + } + for _, key := range v.properties.Keys() { + value, _ := v.properties.Get(key) + // recursively build nested maps + path := strings.Split(key, ".") + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(c, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value + } + } + + insensitiviseMap(c) + return nil +} + +// Marshal a map into Writer. +func marshalWriter(f afero.File, configType string) error { + return v.marshalWriter(f, configType) +} +func (v *Viper) marshalWriter(f afero.File, configType string) error { + c := v.AllSettings() + switch configType { + case "json": + b, err := json.MarshalIndent(c, "", " ") + if err != nil { + return ConfigMarshalError{err} + } + _, err = f.WriteString(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + + case "hcl": + b, err := json.Marshal(c) + ast, err := hcl.Parse(string(b)) + if err != nil { + return ConfigMarshalError{err} + } + err = printer.Fprint(f, ast.Node) + if err != nil { + return ConfigMarshalError{err} + } + + case "prop", "props", "properties": + if v.properties == nil { + v.properties = properties.NewProperties() + } + p := v.properties + for _, key := range v.AllKeys() { + _, _, err := p.Set(key, v.GetString(key)) + if err != nil { + return ConfigMarshalError{err} + } + } + _, err := p.WriteComment(f, "#", properties.UTF8) + if err != nil { + return ConfigMarshalError{err} + } + + case "toml": + t, err := toml.TreeFromMap(c) + if err != nil { + return ConfigMarshalError{err} + } + s := t.String() + if _, err := f.WriteString(s); err != nil { + return ConfigMarshalError{err} + } + + case "yaml", "yml": + b, err := yaml.Marshal(c) + if err != nil { + return ConfigMarshalError{err} + } + if _, err = f.WriteString(string(b)); err != nil { + return ConfigMarshalError{err} + } + } + return nil +} + func keyExists(k string, m map[string]interface{}) string { lk := strings.ToLower(k) for mk := range m { @@ -1013,6 +1481,22 @@ func castToMapStringInterface( return tgt } +func castMapStringToMapInterface(src map[string]string) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + +func castMapFlagToMapInterface(src map[string]FlagValue) map[string]interface{} { + tgt := map[string]interface{}{} + for k, v := range src { + tgt[k] = v + } + return tgt +} + // mergeMaps merges two maps. The `itgt` parameter is for handling go-yaml's // insistence on parsing nested structures as `map[interface{}]interface{}` // instead of using a `string` as the key for nest structures beyond one level @@ -1073,50 +1557,23 @@ func mergeMaps( } } -// func ReadBufConfig(buf *bytes.Buffer) error { return v.ReadBufConfig(buf) } -// func (v *Viper) ReadBufConfig(buf *bytes.Buffer) error { -// v.config = make(map[string]interface{}) -// return v.unmarshalReader(buf, v.config) -// } - -// Attempts to get configuration from a remote source +// ReadRemoteConfig attempts to get configuration from a remote source // and read it in the remote configuration registry. func ReadRemoteConfig() error { return v.ReadRemoteConfig() } func (v *Viper) ReadRemoteConfig() error { - err := v.getKeyValueConfig() - if err != nil { - return err - } - return nil + return v.getKeyValueConfig() } func WatchRemoteConfig() error { return v.WatchRemoteConfig() } func (v *Viper) WatchRemoteConfig() error { - err := v.watchKeyValueConfig() - if err != nil { - return err - } - return nil + return v.watchKeyValueConfig() } -// Unmarshall a Reader into a map -// Should probably be an unexported function -func unmarshalReader(in io.Reader, c map[string]interface{}) error { - return v.unmarshalReader(in, c) +func (v *Viper) WatchRemoteConfigOnChannel() error { + return v.watchKeyValueConfigOnChannel() } -func (v *Viper) unmarshalReader(in io.Reader, c map[string]interface{}) error { - return unmarshallConfigReader(in, c, v.getConfigType()) -} - -func (v *Viper) insensitiviseMaps() { - insensitiviseMap(v.config) - insensitiviseMap(v.defaults) - insensitiviseMap(v.override) - insensitiviseMap(v.kvstore) -} - -// retrieve the first found remote configuration +// Retrieve the first found remote configuration. func (v *Viper) getKeyValueConfig() error { if RemoteConfig == nil { return RemoteConfigError("Enable the remote features by doing a blank import of the viper/remote package: '_ github.com/spf13/viper/remote'") @@ -1133,8 +1590,7 @@ func (v *Viper) getKeyValueConfig() error { return RemoteConfigError("No Files Found") } -func (v *Viper) getRemoteConfig(provider *defaultRemoteProvider) (map[string]interface{}, error) { - +func (v *Viper) getRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { reader, err := RemoteConfig.Get(provider) if err != nil { return nil, err @@ -1143,7 +1599,24 @@ func (v *Viper) getRemoteConfig(provider *defaultRemoteProvider) (map[string]int return v.kvstore, err } -// retrieve the first found remote configuration +// Retrieve the first found remote configuration. +func (v *Viper) watchKeyValueConfigOnChannel() error { + for _, rp := range v.remoteProviders { + respc, _ := RemoteConfig.WatchChannel(rp) + //Todo: Add quit channel + go func(rc <-chan *RemoteResponse) { + for { + b := <-rc + reader := bytes.NewReader(b.Value) + v.unmarshalReader(reader, v.kvstore) + } + }(respc) + return nil + } + return RemoteConfigError("No Files Found") +} + +// Retrieve the first found remote configuration. func (v *Viper) watchKeyValueConfig() error { for _, rp := range v.remoteProviders { val, err := v.watchRemoteConfig(rp) @@ -1156,7 +1629,7 @@ func (v *Viper) watchKeyValueConfig() error { return RemoteConfigError("No Files Found") } -func (v *Viper) watchRemoteConfig(provider *defaultRemoteProvider) (map[string]interface{}, error) { +func (v *Viper) watchRemoteConfig(provider RemoteProvider) (map[string]interface{}, error) { reader, err := RemoteConfig.Watch(provider) if err != nil { return nil, err @@ -1165,65 +1638,115 @@ func (v *Viper) watchRemoteConfig(provider *defaultRemoteProvider) (map[string]i return v.kvstore, err } -// Return all keys regardless where they are set +// AllKeys returns all keys holding a value, regardless of where they are set. +// Nested keys are returned with a v.keyDelim (= ".") separator func AllKeys() []string { return v.AllKeys() } func (v *Viper) AllKeys() []string { - m := map[string]struct{}{} - - for key, _ := range v.defaults { - m[strings.ToLower(key)] = struct{}{} - } - - for key, _ := range v.pflags { - m[strings.ToLower(key)] = struct{}{} - } - - for key, _ := range v.env { - m[strings.ToLower(key)] = struct{}{} - } - - for key, _ := range v.config { - m[strings.ToLower(key)] = struct{}{} + m := map[string]bool{} + // add all paths, by order of descending priority to ensure correct shadowing + m = v.flattenAndMergeMap(m, castMapStringToMapInterface(v.aliases), "") + m = v.flattenAndMergeMap(m, v.override, "") + m = v.mergeFlatMap(m, castMapFlagToMapInterface(v.pflags)) + m = v.mergeFlatMap(m, castMapStringToMapInterface(v.env)) + m = v.flattenAndMergeMap(m, v.config, "") + m = v.flattenAndMergeMap(m, v.kvstore, "") + m = v.flattenAndMergeMap(m, v.defaults, "") + + // convert set of paths to list + a := []string{} + for x := range m { + a = append(a, x) } + return a +} - for key, _ := range v.kvstore { - m[strings.ToLower(key)] = struct{}{} +// flattenAndMergeMap recursively flattens the given map into a map[string]bool +// of key paths (used as a set, easier to manipulate than a []string): +// - each path is merged into a single key string, delimited with v.keyDelim (= ".") +// - if a path is shadowed by an earlier value in the initial shadow map, +// it is skipped. +// The resulting set of paths is merged to the given shadow set at the same time. +func (v *Viper) flattenAndMergeMap(shadow map[string]bool, m map[string]interface{}, prefix string) map[string]bool { + if shadow != nil && prefix != "" && shadow[prefix] { + // prefix is shadowed => nothing more to flatten + return shadow } - - for key, _ := range v.override { - m[strings.ToLower(key)] = struct{}{} + if shadow == nil { + shadow = make(map[string]bool) } - for key, _ := range v.aliases { - m[strings.ToLower(key)] = struct{}{} + var m2 map[string]interface{} + if prefix != "" { + prefix += v.keyDelim } - - a := []string{} - for x, _ := range m { - a = append(a, x) + for k, val := range m { + fullKey := prefix + k + switch val.(type) { + case map[string]interface{}: + m2 = val.(map[string]interface{}) + case map[interface{}]interface{}: + m2 = cast.ToStringMap(val) + default: + // immediate value + shadow[strings.ToLower(fullKey)] = true + continue + } + // recursively merge to shadow map + shadow = v.flattenAndMergeMap(shadow, m2, fullKey) + } + return shadow +} + +// mergeFlatMap merges the given maps, excluding values of the second map +// shadowed by values from the first map. +func (v *Viper) mergeFlatMap(shadow map[string]bool, m map[string]interface{}) map[string]bool { + // scan keys +outer: + for k, _ := range m { + path := strings.Split(k, v.keyDelim) + // scan intermediate paths + var parentKey string + for i := 1; i < len(path); i++ { + parentKey = strings.Join(path[0:i], v.keyDelim) + if shadow[parentKey] { + // path is shadowed, continue + continue outer + } + } + // add key + shadow[strings.ToLower(k)] = true } - - return a + return shadow } -// Return all settings as a map[string]interface{} +// AllSettings merges all settings and returns them as a map[string]interface{}. func AllSettings() map[string]interface{} { return v.AllSettings() } func (v *Viper) AllSettings() map[string]interface{} { m := map[string]interface{}{} - for _, x := range v.AllKeys() { - m[x] = v.Get(x) + // start from the list of keys, and construct the map one value at a time + for _, k := range v.AllKeys() { + value := v.Get(k) + if value == nil { + // should not happen, since AllKeys() returns only keys holding a value, + // check just in case anything changes + continue + } + path := strings.Split(k, v.keyDelim) + lastKey := strings.ToLower(path[len(path)-1]) + deepestMap := deepSearch(m, path[0:len(path)-1]) + // set innermost value + deepestMap[lastKey] = value } - return m } -// Se the filesystem to use to read configuration. +// SetFs sets the filesystem to use to read configuration. func SetFs(fs afero.Fs) { v.SetFs(fs) } func (v *Viper) SetFs(fs afero.Fs) { v.fs = fs } -// Name for the config file. +// SetConfigName sets name for the config file. // Does not include extension. func SetConfigName(in string) { v.SetConfigName(in) } func (v *Viper) SetConfigName(in string) { @@ -1233,7 +1756,7 @@ func (v *Viper) SetConfigName(in string) { } } -// Sets the type of the configuration returned by the +// SetConfigType sets the type of the configuration returned by the // remote source, e.g. "json". func SetConfigType(in string) { v.SetConfigType(in) } func (v *Viper) SetConfigType(in string) { @@ -1247,36 +1770,36 @@ func (v *Viper) getConfigType() string { return v.configType } - cf := v.getConfigFile() + cf, err := v.getConfigFile() + if err != nil { + return "" + } + ext := filepath.Ext(cf) if len(ext) > 1 { return ext[1:] - } else { - return "" } -} -func (v *Viper) getConfigFile() string { - // if explicitly set, then use it - if v.configFile != "" { - return v.configFile - } + return "" +} - cf, err := v.findConfigFile() - if err != nil { - return "" +func (v *Viper) getConfigFile() (string, error) { + if v.configFile == "" { + cf, err := v.findConfigFile() + if err != nil { + return "", err + } + v.configFile = cf } - - v.configFile = cf - return v.getConfigFile() + return v.configFile, nil } func (v *Viper) searchInPath(in string) (filename string) { jww.DEBUG.Println("Searching for config in ", in) for _, ext := range SupportedExts { jww.DEBUG.Println("Checking for", filepath.Join(in, v.configName+"."+ext)) - if b, _ := exists(filepath.Join(in, v.configName+"."+ext)); b { + if b, _ := exists(v.fs, filepath.Join(in, v.configName+"."+ext)); b { jww.DEBUG.Println("Found: ", filepath.Join(in, v.configName+"."+ext)) return filepath.Join(in, v.configName+"."+ext) } @@ -1285,10 +1808,9 @@ func (v *Viper) searchInPath(in string) (filename string) { return "" } -// search all configPaths for any config file. -// Returns the first path that exists (and is a config file) +// Search all configPaths for any config file. +// Returns the first path that exists (and is a config file). func (v *Viper) findConfigFile() (string, error) { - jww.INFO.Println("Searching for config in ", v.configPaths) for _, cp := range v.configPaths { @@ -1300,11 +1822,10 @@ func (v *Viper) findConfigFile() (string, error) { return "", ConfigFileNotFoundError{v.configName, fmt.Sprintf("%s", v.configPaths)} } -// Prints all configuration registries for debugging +// Debug prints all configuration registries for debugging // purposes. func Debug() { v.Debug() } func (v *Viper) Debug() { - fmt.Println("Aliases:") fmt.Printf("Aliases:\n%#v\n", v.aliases) fmt.Printf("Override:\n%#v\n", v.override) fmt.Printf("PFlags:\n%#v\n", v.pflags) diff --git a/vendor/github.com/spf13/viper/viper_test.go b/vendor/github.com/spf13/viper/viper_test.go index 0c0c7e59bd41..f4263d386ba0 100644 --- a/vendor/github.com/spf13/viper/viper_test.go +++ b/vendor/github.com/spf13/viper/viper_test.go @@ -7,18 +7,29 @@ package viper import ( "bytes" + "encoding/json" "fmt" + "io" "io/ioutil" "os" + "os/exec" "path" "reflect" + "runtime" "sort" "strings" + "sync" "testing" "time" + "github.com/fsnotify/fsnotify" + "github.com/mitchellh/mapstructure" + "github.com/spf13/afero" + "github.com/spf13/cast" + "github.com/spf13/pflag" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) var yamlExample = []byte(`Hacker: true @@ -104,8 +115,9 @@ var remoteExample = []byte(`{ func initConfigs() { Reset() + var r io.Reader SetConfigType("yaml") - r := bytes.NewReader(yamlExample) + r = bytes.NewReader(yamlExample) unmarshalReader(r, v.config) SetConfigType("json") @@ -129,12 +141,18 @@ func initConfigs() { unmarshalReader(remote, v.kvstore) } -func initYAML() { +func initConfig(typ, config string) { Reset() - SetConfigType("yaml") - r := bytes.NewReader(yamlExample) + SetConfigType(typ) + r := strings.NewReader(config) - unmarshalReader(r, v.config) + if err := unmarshalReader(r, v.config); err != nil { + panic(err) + } +} + +func initYAML() { + initConfig("yaml", string(yamlExample)) } func initJSON() { @@ -233,7 +251,9 @@ func (s *stringValue) String() string { func TestBasics(t *testing.T) { SetConfigFile("/tmp/config.yaml") - assert.Equal(t, "/tmp/config.yaml", v.getConfigFile()) + filename, err := v.getConfigFile() + assert.Equal(t, "/tmp/config.yaml", filename) + assert.NoError(t, err) } func TestDefault(t *testing.T) { @@ -250,7 +270,7 @@ func TestDefault(t *testing.T) { assert.Equal(t, "leather", Get("clothing.jacket")) } -func TestUnmarshalling(t *testing.T) { +func TestUnmarshaling(t *testing.T) { SetConfigType("yaml") r := bytes.NewReader(yamlExample) @@ -259,7 +279,7 @@ func TestUnmarshalling(t *testing.T) { assert.False(t, InConfig("state")) assert.Equal(t, "steve", Get("name")) assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, Get("hobbies")) - assert.Equal(t, map[interface{}]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[interface{}]interface{}{"size": "large"}}, Get("clothing")) + assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, Get("clothing")) assert.Equal(t, 35, Get("age")) } @@ -368,6 +388,36 @@ func TestEnv(t *testing.T) { } +func TestEmptyEnv(t *testing.T) { + initJSON() + + BindEnv("type") // Empty environment variable + BindEnv("name") // Bound, but not set environment variable + + os.Clearenv() + + os.Setenv("TYPE", "") + + assert.Equal(t, "donut", Get("type")) + assert.Equal(t, "Cake", Get("name")) +} + +func TestEmptyEnv_Allowed(t *testing.T) { + initJSON() + + AllowEmptyEnv(true) + + BindEnv("type") // Empty environment variable + BindEnv("name") // Bound, but not set environment variable + + os.Clearenv() + + os.Setenv("TYPE", "") + + assert.Equal(t, "", Get("type")) + assert.Equal(t, "Cake", Get("name")) +} + func TestEnvPrefix(t *testing.T) { initJSON() @@ -405,7 +455,7 @@ func TestAutoEnvWithPrefix(t *testing.T) { assert.Equal(t, "13", Get("bar")) } -func TestSetEnvReplacer(t *testing.T) { +func TestSetEnvKeyReplacer(t *testing.T) { Reset() AutomaticEnv() @@ -420,9 +470,9 @@ func TestSetEnvReplacer(t *testing.T) { func TestAllKeys(t *testing.T) { initConfigs() - ks := sort.StringSlice{"title", "newkey", "owner", "name", "beard", "ppu", "batters", "hobbies", "clothing", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"} + ks := sort.StringSlice{"title", "newkey", "owner.organization", "owner.dob", "owner.bio", "name", "beard", "ppu", "batters.batter", "hobbies", "clothing.jacket", "clothing.trousers", "clothing.pants.size", "age", "hacker", "id", "type", "eyes", "p_id", "p_ppu", "p_batters.batter.type", "p_type", "p_name", "foos"} dob, _ := time.Parse(time.RFC3339, "1979-05-27T07:32:00Z") - all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "Bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[interface{}]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[interface{}]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters.batter.type": "Regular", "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}} + all := map[string]interface{}{"owner": map[string]interface{}{"organization": "MongoDB", "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob}, "title": "TOML Example", "ppu": 0.55, "eyes": "brown", "clothing": map[string]interface{}{"trousers": "denim", "jacket": "leather", "pants": map[string]interface{}{"size": "large"}}, "id": "0001", "batters": map[string]interface{}{"batter": []interface{}{map[string]interface{}{"type": "Regular"}, map[string]interface{}{"type": "Chocolate"}, map[string]interface{}{"type": "Blueberry"}, map[string]interface{}{"type": "Devil's Food"}}}, "hacker": true, "beard": true, "hobbies": []interface{}{"skateboarding", "snowboarding", "go"}, "age": 35, "type": "donut", "newkey": "remote", "name": "Cake", "p_id": "0001", "p_ppu": "0.55", "p_name": "Cake", "p_batters": map[string]interface{}{"batter": map[string]interface{}{"type": "Regular"}}, "p_type": "donut", "foos": []map[string]interface{}{map[string]interface{}{"foo": []map[string]interface{}{map[string]interface{}{"key": 1}, map[string]interface{}{"key": 2}, map[string]interface{}{"key": 3}, map[string]interface{}{"key": 4}}}}} var allkeys sort.StringSlice allkeys = AllKeys() @@ -433,13 +483,25 @@ func TestAllKeys(t *testing.T) { assert.Equal(t, all, AllSettings()) } -func TestCaseInSensitive(t *testing.T) { - assert.Equal(t, true, Get("hacker")) - Set("Title", "Checking Case") - assert.Equal(t, "Checking Case", Get("tItle")) +func TestAllKeysWithEnv(t *testing.T) { + v := New() + + // bind and define environment variables (including a nested one) + v.BindEnv("id") + v.BindEnv("foo.bar") + v.SetEnvKeyReplacer(strings.NewReplacer(".", "_")) + os.Setenv("ID", "13") + os.Setenv("FOO_BAR", "baz") + + expectedKeys := sort.StringSlice{"id", "foo.bar"} + expectedKeys.Sort() + keys := sort.StringSlice(v.AllKeys()) + keys.Sort() + assert.Equal(t, expectedKeys, keys) } func TestAliasesOfAliases(t *testing.T) { + Set("Title", "Checking Case") RegisterAlias("Foo", "Bar") RegisterAlias("Bar", "Title") assert.Equal(t, "Checking Case", Get("FOO")) @@ -453,10 +515,12 @@ func TestRecursiveAliases(t *testing.T) { func TestUnmarshal(t *testing.T) { SetDefault("port", 1313) Set("name", "Steve") + Set("duration", "1s1ms") type config struct { - Port int - Name string + Port int + Name string + Duration time.Duration } var C config @@ -466,17 +530,54 @@ func TestUnmarshal(t *testing.T) { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &C, &config{Name: "Steve", Port: 1313}) + assert.Equal(t, &config{Name: "Steve", Port: 1313, Duration: time.Second + time.Millisecond}, &C) Set("port", 1234) err = Unmarshal(&C) if err != nil { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &C, &config{Name: "Steve", Port: 1234}) + assert.Equal(t, &config{Name: "Steve", Port: 1234, Duration: time.Second + time.Millisecond}, &C) +} + +func TestUnmarshalWithDecoderOptions(t *testing.T) { + Set("credentials", "{\"foo\":\"bar\"}") + + opt := DecodeHook(mapstructure.ComposeDecodeHookFunc( + mapstructure.StringToTimeDurationHookFunc(), + mapstructure.StringToSliceHookFunc(","), + // Custom Decode Hook Function + func(rf reflect.Kind, rt reflect.Kind, data interface{}) (interface{}, error) { + if rf != reflect.String || rt != reflect.Map { + return data, nil + } + m := map[string]string{} + raw := data.(string) + if raw == "" { + return m, nil + } + return m, json.Unmarshal([]byte(raw), &m) + }, + )) + + type config struct { + Credentials map[string]string + } + + var C config + + err := Unmarshal(&C, opt) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + + assert.Equal(t, &config{ + Credentials: map[string]string{"foo": "bar"}, + }, &C) } func TestBindPFlags(t *testing.T) { + v := New() // create independent Viper object flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) var testValues = map[string]*string{ @@ -491,11 +592,11 @@ func TestBindPFlags(t *testing.T) { "endpoint": "/public", } - for name, _ := range testValues { + for name := range testValues { testValues[name] = flagSet.String(name, "", "test") } - err := BindPFlags(flagSet) + err := v.BindPFlags(flagSet) if err != nil { t.Fatalf("error binding flag set, %v", err) } @@ -506,11 +607,57 @@ func TestBindPFlags(t *testing.T) { }) for name, expected := range mutatedTestValues { - assert.Equal(t, Get(name), expected) + assert.Equal(t, expected, v.Get(name)) } } +func TestBindPFlagsStringSlice(t *testing.T) { + tests := []struct { + Expected []string + Value string + }{ + {nil, ""}, + {[]string{"jeden"}, "jeden"}, + {[]string{"dwa", "trzy"}, "dwa,trzy"}, + {[]string{"cztery", "piec , szesc"}, "cztery,\"piec , szesc\""}, + } + + v := New() // create independent Viper object + defaultVal := []string{"default"} + v.SetDefault("stringslice", defaultVal) + + for _, testValue := range tests { + flagSet := pflag.NewFlagSet("test", pflag.ContinueOnError) + flagSet.StringSlice("stringslice", testValue.Expected, "test") + + for _, changed := range []bool{true, false} { + flagSet.VisitAll(func(f *pflag.Flag) { + f.Value.Set(testValue.Value) + f.Changed = changed + }) + + err := v.BindPFlags(flagSet) + if err != nil { + t.Fatalf("error binding flag set, %v", err) + } + + type TestStr struct { + StringSlice []string + } + val := &TestStr{} + if err := v.Unmarshal(val); err != nil { + t.Fatalf("%+#v cannot unmarshal: %s", testValue.Value, err) + } + if changed { + assert.Equal(t, testValue.Expected, val.StringSlice) + } else { + assert.Equal(t, defaultVal, val.StringSlice) + } + } + } +} + func TestBindPFlag(t *testing.T) { var testString = "testing" var testValue = newStringValue(testString, &testString) @@ -533,7 +680,6 @@ func TestBindPFlag(t *testing.T) { } func TestBoundCaseSensitivity(t *testing.T) { - assert.Equal(t, "brown", Get("eyes")) BindEnv("eYEs", "TURTLE_EYES") @@ -630,19 +776,19 @@ func TestFindsNestedKeys(t *testing.T) { "age": 35, "owner": map[string]interface{}{ "organization": "MongoDB", - "Bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "bio": "MongoDB Chief Developer Advocate & Hacker at Large", "dob": dob, }, - "owner.Bio": "MongoDB Chief Developer Advocate & Hacker at Large", + "owner.bio": "MongoDB Chief Developer Advocate & Hacker at Large", "type": "donut", "id": "0001", "name": "Cake", "hacker": true, "ppu": 0.55, - "clothing": map[interface{}]interface{}{ + "clothing": map[string]interface{}{ "jacket": "leather", "trousers": "denim", - "pants": map[interface{}]interface{}{ + "pants": map[string]interface{}{ "size": "large", }, }, @@ -688,7 +834,7 @@ func TestReadBufConfig(t *testing.T) { assert.False(t, v.InConfig("state")) assert.Equal(t, "steve", v.Get("name")) assert.Equal(t, []interface{}{"skateboarding", "snowboarding", "go"}, v.Get("hobbies")) - assert.Equal(t, map[interface{}]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[interface{}]interface{}{"size": "large"}}, v.Get("clothing")) + assert.Equal(t, map[string]interface{}{"jacket": "leather", "trousers": "denim", "pants": map[string]interface{}{"size": "large"}}, v.Get("clothing")) assert.Equal(t, 35, v.Get("age")) } @@ -738,7 +884,27 @@ func TestWrongDirsSearchNotFound(t *testing.T) { v.AddConfigPath(`thispathaintthere`) err := v.ReadInConfig() - assert.Equal(t, reflect.TypeOf(UnsupportedConfigError("")), reflect.TypeOf(err)) + assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) + + // Even though config did not load and the error might have + // been ignored by the client, the default still loads + assert.Equal(t, `default`, v.GetString(`key`)) +} + +func TestWrongDirsSearchNotFoundForMerge(t *testing.T) { + + _, config, cleanup := initDirs(t) + defer cleanup() + + v := New() + v.SetConfigName(config) + v.SetDefault(`key`, `default`) + + v.AddConfigPath(`whattayoutalkingbout`) + v.AddConfigPath(`thispathaintthere`) + + err := v.MergeInConfig() + assert.Equal(t, reflect.TypeOf(ConfigFileNotFoundError{"", ""}), reflect.TypeOf(err)) // Even though config did not load and the error might have // been ignored by the client, the default still loads @@ -757,7 +923,194 @@ func TestSub(t *testing.T) { assert.Equal(t, v.Get("clothing.pants.size"), subv.Get("size")) subv = v.Sub("clothing.pants.size") - assert.Equal(t, subv, (*Viper)(nil)) + assert.Equal(t, (*Viper)(nil), subv) + + subv = v.Sub("missing.key") + assert.Equal(t, (*Viper)(nil), subv) +} + +var hclWriteExpected = []byte(`"foos" = { + "foo" = { + "key" = 1 + } + + "foo" = { + "key" = 2 + } + + "foo" = { + "key" = 3 + } + + "foo" = { + "key" = 4 + } +} + +"id" = "0001" + +"name" = "Cake" + +"ppu" = 0.55 + +"type" = "donut"`) + +func TestWriteConfigHCL(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("hcl") + err := v.ReadConfig(bytes.NewBuffer(hclExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.hcl"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.hcl") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, hclWriteExpected, read) +} + +var jsonWriteExpected = []byte(`{ + "batters": { + "batter": [ + { + "type": "Regular" + }, + { + "type": "Chocolate" + }, + { + "type": "Blueberry" + }, + { + "type": "Devil's Food" + } + ] + }, + "id": "0001", + "name": "Cake", + "ppu": 0.55, + "type": "donut" +}`) + +func TestWriteConfigJson(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("json") + err := v.ReadConfig(bytes.NewBuffer(jsonExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.json"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.json") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, jsonWriteExpected, read) +} + +var propertiesWriteExpected = []byte(`p_id = 0001 +p_type = donut +p_name = Cake +p_ppu = 0.55 +p_batters.batter.type = Regular +`) + +func TestWriteConfigProperties(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("properties") + err := v.ReadConfig(bytes.NewBuffer(propertiesExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.properties"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.properties") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, propertiesWriteExpected, read) +} + +func TestWriteConfigTOML(t *testing.T) { + fs := afero.NewMemMapFs() + v := New() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("toml") + err := v.ReadConfig(bytes.NewBuffer(tomlExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.toml"); err != nil { + t.Fatal(err) + } + + // The TOML String method does not order the contents. + // Therefore, we must read the generated file and compare the data. + v2 := New() + v2.SetFs(fs) + v2.SetConfigName("c") + v2.SetConfigType("toml") + v2.SetConfigFile("c.toml") + err = v2.ReadInConfig() + if err != nil { + t.Fatal(err) + } + + assert.Equal(t, v.GetString("title"), v2.GetString("title")) + assert.Equal(t, v.GetString("owner.bio"), v2.GetString("owner.bio")) + assert.Equal(t, v.GetString("owner.dob"), v2.GetString("owner.dob")) + assert.Equal(t, v.GetString("owner.organization"), v2.GetString("owner.organization")) +} + +var yamlWriteExpected = []byte(`age: 35 +beard: true +clothing: + jacket: leather + pants: + size: large + trousers: denim +eyes: brown +hacker: true +hobbies: +- skateboarding +- snowboarding +- go +name: steve +`) + +func TestWriteConfigYAML(t *testing.T) { + v := New() + fs := afero.NewMemMapFs() + v.SetFs(fs) + v.SetConfigName("c") + v.SetConfigType("yaml") + err := v.ReadConfig(bytes.NewBuffer(yamlExample)) + if err != nil { + t.Fatal(err) + } + if err := v.WriteConfigAs("c.yaml"); err != nil { + t.Fatal(err) + } + read, err := afero.ReadFile(fs, "c.yaml") + if err != nil { + t.Fatal(err) + } + assert.Equal(t, yamlWriteExpected, read) } var yamlMergeExampleTgt = []byte(` @@ -792,8 +1145,8 @@ func TestMergeConfig(t *testing.T) { t.Fatalf("pop != 37890, = %d", pop) } - if pop := v.GetInt("hello.lagrenum"); pop != 765432101234567 { - t.Fatalf("lagrenum != 765432101234567, = %d", pop) + if pop := v.GetInt32("hello.pop"); pop != int32(37890) { + t.Fatalf("pop != 37890, = %d", pop) } if pop := v.GetInt64("hello.lagrenum"); pop != int64(765432101234567) { @@ -816,8 +1169,8 @@ func TestMergeConfig(t *testing.T) { t.Fatalf("pop != 45000, = %d", pop) } - if pop := v.GetInt("hello.lagrenum"); pop != 7654321001234567 { - t.Fatalf("lagrenum != 7654321001234567, = %d", pop) + if pop := v.GetInt32("hello.pop"); pop != int32(45000) { + t.Fatalf("pop != 45000, = %d", pop) } if pop := v.GetInt64("hello.lagrenum"); pop != int64(7654321001234567) { @@ -877,33 +1230,405 @@ func TestMergeConfigNoMerge(t *testing.T) { } } +func TestMergeConfigMap(t *testing.T) { + v := New() + v.SetConfigType("yml") + if err := v.ReadConfig(bytes.NewBuffer(yamlMergeExampleTgt)); err != nil { + t.Fatal(err) + } + + assert := func(i int) { + large := v.GetInt("hello.lagrenum") + pop := v.GetInt("hello.pop") + if large != 765432101234567 { + t.Fatal("Got large num:", large) + } + + if pop != i { + t.Fatal("Got pop:", pop) + } + } + + assert(37890) + + update := map[string]interface{}{ + "Hello": map[string]interface{}{ + "Pop": 1234, + }, + "World": map[interface{}]interface{}{ + "Rock": 345, + }, + } + + if err := v.MergeConfigMap(update); err != nil { + t.Fatal(err) + } + + if rock := v.GetInt("world.rock"); rock != 345 { + t.Fatal("Got rock:", rock) + } + + assert(1234) + +} + func TestUnmarshalingWithAliases(t *testing.T) { - SetDefault("Id", 1) - Set("name", "Steve") - Set("lastname", "Owen") + v := New() + v.SetDefault("ID", 1) + v.Set("name", "Steve") + v.Set("lastname", "Owen") - RegisterAlias("UserID", "Id") - RegisterAlias("Firstname", "name") - RegisterAlias("Surname", "lastname") + v.RegisterAlias("UserID", "ID") + v.RegisterAlias("Firstname", "name") + v.RegisterAlias("Surname", "lastname") type config struct { - Id int + ID int FirstName string Surname string } var C config - - err := Unmarshal(&C) + err := v.Unmarshal(&C) if err != nil { t.Fatalf("unable to decode into struct, %v", err) } - assert.Equal(t, &C, &config{Id: 1, FirstName: "Steve", Surname: "Owen"}) + assert.Equal(t, &config{ID: 1, FirstName: "Steve", Surname: "Owen"}, &C) } func TestSetConfigNameClearsFileCache(t *testing.T) { SetConfigFile("/tmp/config.yaml") SetConfigName("default") - assert.Empty(t, v.getConfigFile()) + f, err := v.getConfigFile() + if err == nil { + t.Fatalf("config file cache should have been cleared") + } + assert.Empty(t, f) +} + +func TestShadowedNestedValue(t *testing.T) { + + config := `name: steve +clothing: + jacket: leather + trousers: denim + pants: + size: large +` + initConfig("yaml", config) + + assert.Equal(t, "steve", GetString("name")) + + polyester := "polyester" + SetDefault("clothing.shirt", polyester) + SetDefault("clothing.jacket.price", 100) + + assert.Equal(t, "leather", GetString("clothing.jacket")) + assert.Nil(t, Get("clothing.jacket.price")) + assert.Equal(t, polyester, GetString("clothing.shirt")) + + clothingSettings := AllSettings()["clothing"].(map[string]interface{}) + assert.Equal(t, "leather", clothingSettings["jacket"]) + assert.Equal(t, polyester, clothingSettings["shirt"]) +} + +func TestDotParameter(t *testing.T) { + initJSON() + // shoud take precedence over batters defined in jsonExample + r := bytes.NewReader([]byte(`{ "batters.batter": [ { "type": "Small" } ] }`)) + unmarshalReader(r, v.config) + + actual := Get("batters.batter") + expected := []interface{}{map[string]interface{}{"type": "Small"}} + assert.Equal(t, expected, actual) +} + +func TestCaseInsensitive(t *testing.T) { + for _, config := range []struct { + typ string + content string + }{ + {"yaml", ` +aBcD: 1 +eF: + gH: 2 + iJk: 3 + Lm: + nO: 4 + P: + Q: 5 + R: 6 +`}, + {"json", `{ + "aBcD": 1, + "eF": { + "iJk": 3, + "Lm": { + "P": { + "Q": 5, + "R": 6 + }, + "nO": 4 + }, + "gH": 2 + } +}`}, + {"toml", `aBcD = 1 +[eF] +gH = 2 +iJk = 3 +[eF.Lm] +nO = 4 +[eF.Lm.P] +Q = 5 +R = 6 +`}, + } { + doTestCaseInsensitive(t, config.typ, config.content) + } +} + +func TestCaseInsensitiveSet(t *testing.T) { + Reset() + m1 := map[string]interface{}{ + "Foo": 32, + "Bar": map[interface{}]interface { + }{ + "ABc": "A", + "cDE": "B"}, + } + + m2 := map[string]interface{}{ + "Foo": 52, + "Bar": map[interface{}]interface { + }{ + "bCd": "A", + "eFG": "B"}, + } + + Set("Given1", m1) + Set("Number1", 42) + + SetDefault("Given2", m2) + SetDefault("Number2", 52) + + // Verify SetDefault + if v := Get("number2"); v != 52 { + t.Fatalf("Expected 52 got %q", v) + } + + if v := Get("given2.foo"); v != 52 { + t.Fatalf("Expected 52 got %q", v) + } + + if v := Get("given2.bar.bcd"); v != "A" { + t.Fatalf("Expected A got %q", v) + } + + if _, ok := m2["Foo"]; !ok { + t.Fatal("Input map changed") + } + + // Verify Set + if v := Get("number1"); v != 42 { + t.Fatalf("Expected 42 got %q", v) + } + + if v := Get("given1.foo"); v != 32 { + t.Fatalf("Expected 32 got %q", v) + } + + if v := Get("given1.bar.abc"); v != "A" { + t.Fatalf("Expected A got %q", v) + } + + if _, ok := m1["Foo"]; !ok { + t.Fatal("Input map changed") + } +} + +func TestParseNested(t *testing.T) { + type duration struct { + Delay time.Duration + } + + type item struct { + Name string + Delay time.Duration + Nested duration + } + + config := `[[parent]] + delay="100ms" + [parent.nested] + delay="200ms" +` + initConfig("toml", config) + + var items []item + err := v.UnmarshalKey("parent", &items) + if err != nil { + t.Fatalf("unable to decode into struct, %v", err) + } + + assert.Equal(t, 1, len(items)) + assert.Equal(t, 100*time.Millisecond, items[0].Delay) + assert.Equal(t, 200*time.Millisecond, items[0].Nested.Delay) +} + +func doTestCaseInsensitive(t *testing.T, typ, config string) { + initConfig(typ, config) + Set("RfD", true) + assert.Equal(t, true, Get("rfd")) + assert.Equal(t, true, Get("rFD")) + assert.Equal(t, 1, cast.ToInt(Get("abcd"))) + assert.Equal(t, 1, cast.ToInt(Get("Abcd"))) + assert.Equal(t, 2, cast.ToInt(Get("ef.gh"))) + assert.Equal(t, 3, cast.ToInt(Get("ef.ijk"))) + assert.Equal(t, 4, cast.ToInt(Get("ef.lm.no"))) + assert.Equal(t, 5, cast.ToInt(Get("ef.lm.p.q"))) + +} + +func newViperWithConfigFile(t *testing.T) (*Viper, string, func()) { + watchDir, err := ioutil.TempDir("", "") + require.Nil(t, err) + configFile := path.Join(watchDir, "config.yaml") + err = ioutil.WriteFile(configFile, []byte("foo: bar\n"), 0640) + require.Nil(t, err) + cleanup := func() { + os.RemoveAll(watchDir) + } + v := New() + v.SetConfigFile(configFile) + err = v.ReadInConfig() + require.Nil(t, err) + require.Equal(t, "bar", v.Get("foo")) + return v, configFile, cleanup +} + +func newViperWithSymlinkedConfigFile(t *testing.T) (*Viper, string, string, func()) { + watchDir, err := ioutil.TempDir("", "") + require.Nil(t, err) + dataDir1 := path.Join(watchDir, "data1") + err = os.Mkdir(dataDir1, 0777) + require.Nil(t, err) + realConfigFile := path.Join(dataDir1, "config.yaml") + t.Logf("Real config file location: %s\n", realConfigFile) + err = ioutil.WriteFile(realConfigFile, []byte("foo: bar\n"), 0640) + require.Nil(t, err) + cleanup := func() { + os.RemoveAll(watchDir) + } + // now, symlink the tm `data1` dir to `data` in the baseDir + os.Symlink(dataDir1, path.Join(watchDir, "data")) + // and link the `/datadir1/config.yaml` to `/config.yaml` + configFile := path.Join(watchDir, "config.yaml") + os.Symlink(path.Join(watchDir, "data", "config.yaml"), configFile) + t.Logf("Config file location: %s\n", path.Join(watchDir, "config.yaml")) + // init Viper + v := New() + v.SetConfigFile(configFile) + err = v.ReadInConfig() + require.Nil(t, err) + require.Equal(t, "bar", v.Get("foo")) + return v, watchDir, configFile, cleanup +} + +func TestWatchFile(t *testing.T) { + if runtime.GOOS == "linux" { + // TODO(bep) FIX ME + t.Skip("Skip test on Linux ...") + } + + t.Run("file content changed", func(t *testing.T) { + // given a `config.yaml` file being watched + v, configFile, cleanup := newViperWithConfigFile(t) + defer cleanup() + _, err := os.Stat(configFile) + require.NoError(t, err) + t.Logf("test config file: %s\n", configFile) + wg := sync.WaitGroup{} + wg.Add(1) + v.OnConfigChange(func(in fsnotify.Event) { + t.Logf("config file changed") + wg.Done() + }) + v.WatchConfig() + // when overwriting the file and waiting for the custom change notification handler to be triggered + err = ioutil.WriteFile(configFile, []byte("foo: baz\n"), 0640) + wg.Wait() + // then the config value should have changed + require.Nil(t, err) + assert.Equal(t, "baz", v.Get("foo")) + }) + + t.Run("link to real file changed (à la Kubernetes)", func(t *testing.T) { + // skip if not executed on Linux + if runtime.GOOS != "linux" { + t.Skipf("Skipping test as symlink replacements don't work on non-linux environment...") + } + v, watchDir, _, _ := newViperWithSymlinkedConfigFile(t) + // defer cleanup() + wg := sync.WaitGroup{} + v.WatchConfig() + v.OnConfigChange(func(in fsnotify.Event) { + t.Logf("config file changed") + wg.Done() + }) + wg.Add(1) + // when link to another `config.yaml` file + dataDir2 := path.Join(watchDir, "data2") + err := os.Mkdir(dataDir2, 0777) + require.Nil(t, err) + configFile2 := path.Join(dataDir2, "config.yaml") + err = ioutil.WriteFile(configFile2, []byte("foo: baz\n"), 0640) + require.Nil(t, err) + // change the symlink using the `ln -sfn` command + err = exec.Command("ln", "-sfn", dataDir2, path.Join(watchDir, "data")).Run() + require.Nil(t, err) + wg.Wait() + // then + require.Nil(t, err) + assert.Equal(t, "baz", v.Get("foo")) + }) + +} + +func BenchmarkGetBool(b *testing.B) { + key := "BenchmarkGetBool" + v = New() + v.Set(key, true) + + for i := 0; i < b.N; i++ { + if !v.GetBool(key) { + b.Fatal("GetBool returned false") + } + } +} + +func BenchmarkGet(b *testing.B) { + key := "BenchmarkGet" + v = New() + v.Set(key, true) + + for i := 0; i < b.N; i++ { + if !v.Get(key).(bool) { + b.Fatal("Get returned false") + } + } +} + +// This is the "perfect result" for the above. +func BenchmarkGetBoolFromMap(b *testing.B) { + m := make(map[string]bool) + key := "BenchmarkGetBool" + m[key] = true + + for i := 0; i < b.N; i++ { + if !m[key] { + b.Fatal("Map value was false") + } + } } diff --git a/vendor/github.com/stretchr/objx/.codeclimate.yml b/vendor/github.com/stretchr/objx/.codeclimate.yml new file mode 100644 index 000000000000..559fa399c130 --- /dev/null +++ b/vendor/github.com/stretchr/objx/.codeclimate.yml @@ -0,0 +1,21 @@ +engines: + gofmt: + enabled: true + golint: + enabled: true + govet: + enabled: true + +exclude_patterns: +- ".github/" +- "vendor/" +- "codegen/" +- "*.yml" +- ".*.yml" +- "*.md" +- "Gopkg.*" +- "doc.go" +- "type_specific_codegen_test.go" +- "type_specific_codegen.go" +- ".gitignore" +- "LICENSE" diff --git a/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md b/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md new file mode 100644 index 000000000000..5099d59c97ea --- /dev/null +++ b/vendor/github.com/stretchr/objx/.github/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at hanzei@mailbox.org. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/vendor/github.com/stretchr/objx/.github/PULL_REQUEST_TEMPLATE.md b/vendor/github.com/stretchr/objx/.github/PULL_REQUEST_TEMPLATE.md new file mode 100644 index 000000000000..d02e63b972a6 --- /dev/null +++ b/vendor/github.com/stretchr/objx/.github/PULL_REQUEST_TEMPLATE.md @@ -0,0 +1,9 @@ + + +#### Summary + + +#### Checklist +[Place an '[x]' (no spaces) in all applicable fields. Please remove unrelated fields.] +- [ ] Tests are passing: `task test` +- [ ] Code style is correct: `task lint` diff --git a/vendor/github.com/stretchr/objx/.gitignore b/vendor/github.com/stretchr/objx/.gitignore index 00268614f045..ea58090bd21e 100644 --- a/vendor/github.com/stretchr/objx/.gitignore +++ b/vendor/github.com/stretchr/objx/.gitignore @@ -1,22 +1,11 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a +# Binaries for programs and plugins +*.exe +*.dll *.so +*.dylib -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* +# Test binary, build with `go test -c` +*.test -_testmain.go - -*.exe +# Output of the go coverage tool, specifically when used with LiteIDE +*.out diff --git a/vendor/github.com/stretchr/objx/.travis.yml b/vendor/github.com/stretchr/objx/.travis.yml new file mode 100644 index 000000000000..cde6eb2affdf --- /dev/null +++ b/vendor/github.com/stretchr/objx/.travis.yml @@ -0,0 +1,30 @@ +language: go +go: + - "1.10.x" + - "1.11.x" + - "1.12.x" + - master + +matrix: + allow_failures: + - go: master +fast_finish: true + +env: + global: + - CC_TEST_REPORTER_ID=68feaa3410049ce73e145287acbcdacc525087a30627f96f04e579e75bd71c00 + +before_script: + - curl -L https://codeclimate.com/downloads/test-reporter/test-reporter-latest-linux-amd64 > ./cc-test-reporter + - chmod +x ./cc-test-reporter + - ./cc-test-reporter before-build + +install: + - curl -sL https://taskfile.dev/install.sh | sh + +script: + - diff -u <(echo -n) <(./bin/task lint) + - ./bin/task test-coverage + +after_script: + - ./cc-test-reporter after-build --exit-code $TRAVIS_TEST_RESULT diff --git a/vendor/github.com/stretchr/objx/LICENSE.md b/vendor/github.com/stretchr/objx/LICENSE similarity index 94% rename from vendor/github.com/stretchr/objx/LICENSE.md rename to vendor/github.com/stretchr/objx/LICENSE index 2199945813c8..44d4d9d5a7c3 100644 --- a/vendor/github.com/stretchr/objx/LICENSE.md +++ b/vendor/github.com/stretchr/objx/LICENSE @@ -1,8 +1,7 @@ -objx - by Mat Ryer and Tyler Bunnell - -The MIT License (MIT) +The MIT License Copyright (c) 2014 Stretchr, Inc. +Copyright (c) 2017-2018 objx contributors Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/vendor/github.com/stretchr/objx/README.md b/vendor/github.com/stretchr/objx/README.md index 4aa180687a7a..246660b21a9f 100644 --- a/vendor/github.com/stretchr/objx/README.md +++ b/vendor/github.com/stretchr/objx/README.md @@ -1,3 +1,80 @@ -# objx +# Objx +[![Build Status](https://travis-ci.org/stretchr/objx.svg?branch=master)](https://travis-ci.org/stretchr/objx) +[![Go Report Card](https://goreportcard.com/badge/github.com/stretchr/objx)](https://goreportcard.com/report/github.com/stretchr/objx) +[![Maintainability](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/maintainability)](https://codeclimate.com/github/stretchr/objx/maintainability) +[![Test Coverage](https://api.codeclimate.com/v1/badges/1d64bc6c8474c2074f2b/test_coverage)](https://codeclimate.com/github/stretchr/objx/test_coverage) +[![Sourcegraph](https://sourcegraph.com/github.com/stretchr/objx/-/badge.svg)](https://sourcegraph.com/github.com/stretchr/objx) +[![GoDoc](https://godoc.org/github.com/stretchr/objx?status.svg)](https://godoc.org/github.com/stretchr/objx) - * Jump into the [API Documentation](http://godoc.org/github.com/stretchr/objx) +Objx - Go package for dealing with maps, slices, JSON and other data. + +Get started: + +- Install Objx with [one line of code](#installation), or [update it with another](#staying-up-to-date) +- Check out the API Documentation http://godoc.org/github.com/stretchr/objx + +## Overview +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes a powerful `Get` method (among others) that allows you to easily and quickly get access to data within the map, without having to worry too much about type assertions, missing data, default values etc. + +### Pattern +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, manipulating and selecting that data. You can find out more by exploring the index below. + +### Reading data +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +### Ranging +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } + +## Installation +To install Objx, use go get: + + go get github.com/stretchr/objx + +### Staying up to date +To update Objx to the latest version, run: + + go get -u github.com/stretchr/objx + +### Supported go versions +We support the lastest three major Go versions, which are 1.10, 1.11 and 1.12 at the moment. + +## Contributing +Please feel free to submit issues, fork the repository and send pull requests! diff --git a/vendor/github.com/stretchr/objx/Taskfile.yml b/vendor/github.com/stretchr/objx/Taskfile.yml new file mode 100644 index 000000000000..a749ac5492e5 --- /dev/null +++ b/vendor/github.com/stretchr/objx/Taskfile.yml @@ -0,0 +1,30 @@ +version: '2' + +env: + GOFLAGS: -mod=vendor + +tasks: + default: + deps: [test] + + lint: + desc: Checks code style + cmds: + - gofmt -d -s *.go + - go vet ./... + silent: true + + lint-fix: + desc: Fixes code style + cmds: + - gofmt -w -s *.go + + test: + desc: Runs go tests + cmds: + - go test -race ./... + + test-coverage: + desc: Runs go tests and calucates test coverage + cmds: + - go test -race -coverprofile=c.out ./... diff --git a/vendor/github.com/stretchr/objx/accessors.go b/vendor/github.com/stretchr/objx/accessors.go index 721bcac79939..676316281154 100644 --- a/vendor/github.com/stretchr/objx/accessors.go +++ b/vendor/github.com/stretchr/objx/accessors.go @@ -1,15 +1,22 @@ package objx import ( - "fmt" "regexp" "strconv" "strings" ) -// arrayAccesRegexString is the regex used to extract the array number -// from the access path -const arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` +const ( + // PathSeparator is the character used to separate the elements + // of the keypath. + // + // For example, `location.address.city` + PathSeparator string = "." + + // arrayAccesRegexString is the regex used to extract the array number + // from the access path + arrayAccesRegexString = `^(.+)\[([0-9]+)\]$` +) // arrayAccesRegex is the compiled arrayAccesRegexString var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) @@ -28,7 +35,7 @@ var arrayAccesRegex = regexp.MustCompile(arrayAccesRegexString) // // o.Get("books[1].chapters[2].title") func (m Map) Get(selector string) *Value { - rawObj := access(m, selector, nil, false, false) + rawObj := access(m, selector, nil, false) return &Value{data: rawObj} } @@ -43,137 +50,70 @@ func (m Map) Get(selector string) *Value { // // o.Set("books[1].chapters[2].title","Time to Go") func (m Map) Set(selector string, value interface{}) Map { - access(m, selector, value, true, false) + access(m, selector, value, true) return m } +// getIndex returns the index, which is hold in s by two braches. +// It also returns s withour the index part, e.g. name[1] will return (1, name). +// If no index is found, -1 is returned +func getIndex(s string) (int, string) { + arrayMatches := arrayAccesRegex.FindStringSubmatch(s) + if len(arrayMatches) > 0 { + // Get the key into the map + selector := arrayMatches[1] + // Get the index into the array at the key + // We know this cannt fail because arrayMatches[2] is an int for sure + index, _ := strconv.Atoi(arrayMatches[2]) + return index, selector + } + return -1, s +} + // access accesses the object using the selector and performs the // appropriate action. -func access(current, selector, value interface{}, isSet, panics bool) interface{} { - - switch selector.(type) { - case int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: +func access(current interface{}, selector string, value interface{}, isSet bool) interface{} { + selSegs := strings.SplitN(selector, PathSeparator, 2) + thisSel := selSegs[0] + index := -1 - if array, ok := current.([]interface{}); ok { - index := intFromInterface(selector) - - if index >= len(array) { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - return nil - } - - return array[index] - } - - return nil - - case string: - - selStr := selector.(string) - selSegs := strings.SplitN(selStr, PathSeparator, 2) - thisSel := selSegs[0] - index := -1 - var err error - - // https://github.com/stretchr/objx/issues/12 - if strings.Contains(thisSel, "[") { - - arrayMatches := arrayAccesRegex.FindStringSubmatch(thisSel) - - if len(arrayMatches) > 0 { - - // Get the key into the map - thisSel = arrayMatches[1] - - // Get the index into the array at the key - index, err = strconv.Atoi(arrayMatches[2]) - - if err != nil { - // This should never happen. If it does, something has gone - // seriously wrong. Panic. - panic("objx: Array index is not an integer. Must use array[int].") - } + if strings.Contains(thisSel, "[") { + index, thisSel = getIndex(thisSel) + } - } + if curMap, ok := current.(Map); ok { + current = map[string]interface{}(curMap) + } + // get the object in question + switch current.(type) { + case map[string]interface{}: + curMSI := current.(map[string]interface{}) + if len(selSegs) <= 1 && isSet { + curMSI[thisSel] = value + return nil } - if curMap, ok := current.(Map); ok { - current = map[string]interface{}(curMap) + _, ok := curMSI[thisSel].(map[string]interface{}) + if (curMSI[thisSel] == nil || !ok) && index == -1 && isSet { + curMSI[thisSel] = map[string]interface{}{} } - // get the object in question - switch current.(type) { - case map[string]interface{}: - curMSI := current.(map[string]interface{}) - if len(selSegs) <= 1 && isSet { - curMSI[thisSel] = value - return nil + current = curMSI[thisSel] + default: + current = nil + } + // do we need to access the item of an array? + if index > -1 { + if array, ok := current.([]interface{}); ok { + if index < len(array) { + current = array[index] } else { - current = curMSI[thisSel] + current = nil } - default: - current = nil } - - if current == nil && panics { - panic(fmt.Sprintf("objx: '%v' invalid on object.", selector)) - } - - // do we need to access the item of an array? - if index > -1 { - if array, ok := current.([]interface{}); ok { - if index < len(array) { - current = array[index] - } else { - if panics { - panic(fmt.Sprintf("objx: Index %d is out of range. Slice only contains %d items.", index, len(array))) - } - current = nil - } - } - } - - if len(selSegs) > 1 { - current = access(current, selSegs[1], value, isSet, panics) - } - } - - return current - -} - -// intFromInterface converts an interface object to the largest -// representation of an unsigned integer using a type switch and -// assertions -func intFromInterface(selector interface{}) int { - var value int - switch selector.(type) { - case int: - value = selector.(int) - case int8: - value = int(selector.(int8)) - case int16: - value = int(selector.(int16)) - case int32: - value = int(selector.(int32)) - case int64: - value = int(selector.(int64)) - case uint: - value = int(selector.(uint)) - case uint8: - value = int(selector.(uint8)) - case uint16: - value = int(selector.(uint16)) - case uint32: - value = int(selector.(uint32)) - case uint64: - value = int(selector.(uint64)) - default: - panic("objx: array access argument is not an integer type (this should never happen)") + if len(selSegs) > 1 { + current = access(current, selSegs[1], value, isSet) } - - return value + return current } diff --git a/vendor/github.com/stretchr/objx/accessors_test.go b/vendor/github.com/stretchr/objx/accessors_test.go index ce5d8e4aa1a3..c8f9ec08dd5d 100644 --- a/vendor/github.com/stretchr/objx/accessors_test.go +++ b/vendor/github.com/stretchr/objx/accessors_test.go @@ -1,145 +1,191 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) func TestAccessorsAccessGetSingleField(t *testing.T) { + m := objx.Map{"name": "Tyler"} - current := map[string]interface{}{"name": "Tyler"} - assert.Equal(t, "Tyler", access(current, "name", nil, false, true)) - + assert.Equal(t, "Tyler", m.Get("name").Data()) } -func TestAccessorsAccessGetDeep(t *testing.T) { - current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} - assert.Equal(t, "Tyler", access(current, "name.first", nil, false, true)) - assert.Equal(t, "Bunnell", access(current, "name.last", nil, false, true)) +func TestAccessorsAccessGetSingleFieldInt(t *testing.T) { + m := objx.Map{"name": 10} + assert.Equal(t, 10, m.Get("name").Data()) } -func TestAccessorsAccessGetDeepDeep(t *testing.T) { - - current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} - assert.Equal(t, 4, access(current, "one.two.three.four", nil, false, true)) +func TestAccessorsAccessGetDeep(t *testing.T) { + m := objx.Map{ + "name": objx.Map{ + "first": "Tyler", + "last": "Bunnell", + }, + } + + assert.Equal(t, "Tyler", m.Get("name.first").Data()) + assert.Equal(t, "Bunnell", m.Get("name.last").Data()) } -func TestAccessorsAccessGetInsideArray(t *testing.T) { - - current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} - assert.Equal(t, "Tyler", access(current, "names[0].first", nil, false, true)) - assert.Equal(t, "Bunnell", access(current, "names[0].last", nil, false, true)) - assert.Equal(t, "Capitol", access(current, "names[1].first", nil, false, true)) - assert.Equal(t, "Bollocks", access(current, "names[1].last", nil, false, true)) - - assert.Panics(t, func() { - access(current, "names[2]", nil, false, true) - }) - assert.Nil(t, access(current, "names[2]", nil, false, false)) +func TestAccessorsAccessGetDeepDeep(t *testing.T) { + m := objx.Map{ + "one": objx.Map{ + "two": objx.Map{ + "three": objx.Map{ + "four": 4, + }, + }, + }, + } + + assert.Equal(t, 4, m.Get("one.two.three.four").Data()) } -func TestAccessorsAccessGetFromArrayWithInt(t *testing.T) { - - current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} - one := access(current, 0, nil, false, false) - two := access(current, 1, nil, false, false) - three := access(current, 2, nil, false, false) - - assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) - assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) - assert.Nil(t, three) - +func TestAccessorsAccessGetInsideArray(t *testing.T) { + m := objx.Map{ + "names": []interface{}{ + objx.Map{ + "first": "Tyler", + "last": "Bunnell", + }, + objx.Map{ + "first": "Capitol", + "last": "Bollocks", + }, + }, + } + + assert.Equal(t, "Tyler", m.Get("names[0].first").Data()) + assert.Equal(t, "Bunnell", m.Get("names[0].last").Data()) + assert.Equal(t, "Capitol", m.Get("names[1].first").Data()) + assert.Equal(t, "Bollocks", m.Get("names[1].last").Data()) + + assert.Nil(t, m.Get("names[2]").Data()) + assert.Nil(t, m.Get("names[]").Data()) + assert.Nil(t, m.Get("names1]]").Data()) + assert.Nil(t, m.Get("names[1]]").Data()) + assert.Nil(t, m.Get("names[[1]]").Data()) + assert.Nil(t, m.Get("names[[1]").Data()) + assert.Nil(t, m.Get("names[[1").Data()) } func TestAccessorsGet(t *testing.T) { + m := objx.Map{"name": "Tyler"} - current := New(map[string]interface{}{"name": "Tyler"}) - assert.Equal(t, "Tyler", current.Get("name").data) - + assert.Equal(t, "Tyler", m.Get("name").Data()) } func TestAccessorsAccessSetSingleField(t *testing.T) { + m := objx.Map{"name": "Tyler"} - current := map[string]interface{}{"name": "Tyler"} - access(current, "name", "Mat", true, false) - assert.Equal(t, current["name"], "Mat") - - access(current, "age", 29, true, true) - assert.Equal(t, current["age"], 29) + m.Set("name", "Mat") + m.Set("age", 29) + assert.Equal(t, m.Get("name").Data(), "Mat") + assert.Equal(t, m.Get("age").Data(), 29) } func TestAccessorsAccessSetSingleFieldNotExisting(t *testing.T) { + m := objx.Map{ + "first": "Tyler", + "last": "Bunnell", + } - current := map[string]interface{}{} - access(current, "name", "Mat", true, false) - assert.Equal(t, current["name"], "Mat") + m.Set("name", "Mat") + assert.Equal(t, m.Get("name").Data(), "Mat") } func TestAccessorsAccessSetDeep(t *testing.T) { - - current := map[string]interface{}{"name": map[string]interface{}{"first": "Tyler", "last": "Bunnell"}} - - access(current, "name.first", "Mat", true, true) - access(current, "name.last", "Ryer", true, true) - - assert.Equal(t, "Mat", access(current, "name.first", nil, false, true)) - assert.Equal(t, "Ryer", access(current, "name.last", nil, false, true)) - + m := objx.Map{ + "name": objx.Map{ + "first": "Tyler", + "last": "Bunnell", + }, + } + + m.Set("name.first", "Mat") + m.Set("name.last", "Ryer") + + assert.Equal(t, "Mat", m.Get("name.first").Data()) + assert.Equal(t, "Ryer", m.Get("name.last").Data()) } -func TestAccessorsAccessSetDeepDeep(t *testing.T) { - - current := map[string]interface{}{"one": map[string]interface{}{"two": map[string]interface{}{"three": map[string]interface{}{"four": 4}}}} - - access(current, "one.two.three.four", 5, true, true) - - assert.Equal(t, 5, access(current, "one.two.three.four", nil, false, true)) +func TestAccessorsAccessSetDeepDeep(t *testing.T) { + m := objx.Map{ + "one": objx.Map{ + "two": objx.Map{ + "three": objx.Map{ + "four": 4, + }, + }, + }, + } + + m.Set("one.two.three.four", 5) + + assert.Equal(t, 5, m.Get("one.two.three.four").Data()) } -func TestAccessorsAccessSetArray(t *testing.T) { - current := map[string]interface{}{"names": []interface{}{"Tyler"}} +func TestAccessorsAccessSetDeepDeepWithoutExisting(t *testing.T) { + m := objx.Map{} - access(current, "names[0]", "Mat", true, true) + m.Set("one.two.three.four", 5) + m.Set("one.two.three.five", 6) - assert.Equal(t, "Mat", access(current, "names[0]", nil, false, true)) + assert.Equal(t, 5, m.Get("one.two.three.four").Data()) + assert.Equal(t, 6, m.Get("one.two.three.five").Data()) -} -func TestAccessorsAccessSetInsideArray(t *testing.T) { - - current := map[string]interface{}{"names": []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}}} - - access(current, "names[0].first", "Mat", true, true) - access(current, "names[0].last", "Ryer", true, true) - access(current, "names[1].first", "Captain", true, true) - access(current, "names[1].last", "Underpants", true, true) - - assert.Equal(t, "Mat", access(current, "names[0].first", nil, false, true)) - assert.Equal(t, "Ryer", access(current, "names[0].last", nil, false, true)) - assert.Equal(t, "Captain", access(current, "names[1].first", nil, false, true)) - assert.Equal(t, "Underpants", access(current, "names[1].last", nil, false, true)) + m.Set("one.two", 7) + assert.Equal(t, 7, m.Get("one.two").Data()) + assert.Equal(t, nil, m.Get("one.two.three.four").Data()) + m.Set("one.two.three", 8) + assert.Equal(t, 8, m.Get("one.two.three").Data()) } -func TestAccessorsAccessSetFromArrayWithInt(t *testing.T) { - - current := []interface{}{map[string]interface{}{"first": "Tyler", "last": "Bunnell"}, map[string]interface{}{"first": "Capitol", "last": "Bollocks"}} - one := access(current, 0, nil, false, false) - two := access(current, 1, nil, false, false) - three := access(current, 2, nil, false, false) +func TestAccessorsAccessSetArray(t *testing.T) { + m := objx.Map{ + "names": []interface{}{"Tyler"}, + } + m.Set("names[0]", "Mat") - assert.Equal(t, "Tyler", one.(map[string]interface{})["first"]) - assert.Equal(t, "Capitol", two.(map[string]interface{})["first"]) - assert.Nil(t, three) + assert.Equal(t, "Mat", m.Get("names[0]").Data()) +} +func TestAccessorsAccessSetInsideArray(t *testing.T) { + m := objx.Map{ + "names": []interface{}{ + objx.Map{ + "first": "Tyler", + "last": "Bunnell", + }, + objx.Map{ + "first": "Capitol", + "last": "Bollocks", + }, + }, + } + + m.Set("names[0].first", "Mat") + m.Set("names[0].last", "Ryer") + m.Set("names[1].first", "Captain") + m.Set("names[1].last", "Underpants") + + assert.Equal(t, "Mat", m.Get("names[0].first").Data()) + assert.Equal(t, "Ryer", m.Get("names[0].last").Data()) + assert.Equal(t, "Captain", m.Get("names[1].first").Data()) + assert.Equal(t, "Underpants", m.Get("names[1].last").Data()) } func TestAccessorsSet(t *testing.T) { + m := objx.Map{"name": "Tyler"} - current := New(map[string]interface{}{"name": "Tyler"}) - current.Set("name", "Mat") - assert.Equal(t, "Mat", current.Get("name").data) + m.Set("name", "Mat") + assert.Equal(t, "Mat", m.Get("name").Data()) } diff --git a/vendor/github.com/stretchr/objx/codegen/template.txt b/vendor/github.com/stretchr/objx/codegen/template.txt index b396900b8af2..af47531496a8 100644 --- a/vendor/github.com/stretchr/objx/codegen/template.txt +++ b/vendor/github.com/stretchr/objx/codegen/template.txt @@ -1,56 +1,55 @@ /* - {4} ({1} and []{1}) - -------------------------------------------------- + {4} ({1} and []{1}) */ // {4} gets the value as a {1}, returns the optionalDefault // value or a system default object if the value is the wrong type. func (v *Value) {4}(optionalDefault ...{1}) {1} { - if s, ok := v.data.({1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return {3} + if s, ok := v.data.({1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return {3} } // Must{4} gets the value as a {1}. // // Panics if the object is not a {1}. func (v *Value) Must{4}() {1} { - return v.data.({1}) + return v.data.({1}) } // {4}Slice gets the value as a []{1}, returns the optionalDefault // value or nil if the value is not a []{1}. func (v *Value) {4}Slice(optionalDefault ...[]{1}) []{1} { - if s, ok := v.data.([]{1}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil + if s, ok := v.data.([]{1}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil } // Must{4}Slice gets the value as a []{1}. // // Panics if the object is not a []{1}. func (v *Value) Must{4}Slice() []{1} { - return v.data.([]{1}) + return v.data.([]{1}) } // Is{4} gets whether the object contained is a {1} or not. func (v *Value) Is{4}() bool { - _, ok := v.data.({1}) - return ok + _, ok := v.data.({1}) + return ok } // Is{4}Slice gets whether the object contained is a []{1} or not. func (v *Value) Is{4}Slice() bool { - _, ok := v.data.([]{1}) - return ok + _, ok := v.data.([]{1}) + return ok } // Each{4} calls the specified callback for each object @@ -58,229 +57,68 @@ func (v *Value) Is{4}Slice() bool { // // Panics if the object is the wrong type. func (v *Value) Each{4}(callback func(int, {1}) bool) *Value { - - for index, val := range v.Must{4}Slice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - + for index, val := range v.Must{4}Slice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v } // Where{4} uses the specified decider function to select items // from the []{1}. The object contained in the result will contain // only the selected items. func (v *Value) Where{4}(decider func(int, {1}) bool) *Value { - - var selected []{1} - - v.Each{4}(func(index int, val {1}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data:selected} - + var selected []{1} + v.Each{4}(func(index int, val {1}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data:selected} } // Group{4} uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]{1}. func (v *Value) Group{4}(grouper func(int, {1}) string) *Value { - - groups := make(map[string][]{1}) - - v.Each{4}(func(index int, val {1}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]{1}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data:groups} - + groups := make(map[string][]{1}) + v.Each{4}(func(index int, val {1}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]{1}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data:groups} } // Replace{4} uses the specified function to replace each {1}s // by iterating each item. The data in the returned result will be a // []{1} containing the replaced items. func (v *Value) Replace{4}(replacer func(int, {1}) {1}) *Value { - - arr := v.Must{4}Slice() - replaced := make([]{1}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data:replaced} - + arr := v.Must{4}Slice() + replaced := make([]{1}, len(arr)) + v.Each{4}(func(index int, val {1}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data:replaced} } // Collect{4} uses the specified collector function to collect a value // for each of the {1}s in the slice. The data returned will be a // []interface{}. func (v *Value) Collect{4}(collector func(int, {1}) interface{}) *Value { - - arr := v.Must{4}Slice() - collected := make([]interface{}, len(arr)) - - v.Each{4}(func(index int, val {1}) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data:collected} -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func Test{4}(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}()) - assert.Equal(t, val, New(m).Get("value").Must{4}()) - assert.Equal(t, {1}({3}), New(m).Get("nothing").{4}()) - assert.Equal(t, val, New(m).Get("nothing").{4}({2})) - - assert.Panics(t, func() { - New(m).Get("age").Must{4}() - }) - -} - -func Test{4}Slice(t *testing.T) { - - val := {1}( {2} ) - m := map[string]interface{}{"value": []{1}{ val }, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").{4}Slice()[0]) - assert.Equal(t, val, New(m).Get("value").Must{4}Slice()[0]) - assert.Equal(t, []{1}(nil), New(m).Get("nothing").{4}Slice()) - assert.Equal(t, val, New(m).Get("nothing").{4}Slice( []{1}{ {1}({2}) } )[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").Must{4}Slice() - }) - -} - -func TestIs{4}(t *testing.T) { - - var v *Value - - v = &Value{data: {1}({2})} - assert.True(t, v.Is{4}()) - - v = &Value{data: []{1}{ {1}({2}) }} - assert.True(t, v.Is{4}Slice()) - -} - -func TestEach{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - count := 0 - replacedVals := make([]{1}, 0) - assert.Equal(t, v, v.Each{4}(func(i int, val {1}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.Must{4}Slice()[0]) - assert.Equal(t, replacedVals[1], v.Must{4}Slice()[1]) - assert.Equal(t, replacedVals[2], v.Must{4}Slice()[2]) - -} - -func TestWhere{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - selected := v.Where{4}(func(i int, val {1}) bool { - return i%2==0 - }).Must{4}Slice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroup{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - grouped := v.Group{4}(func(i int, val {1}) string { - return fmt.Sprintf("%v", i%2==0) - }).data.(map[string][]{1}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplace{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - rawArr := v.Must{4}Slice() - - replaced := v.Replace{4}(func(index int, val {1}) {1} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.Must{4}Slice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollect{4}(t *testing.T) { - - v := &Value{data: []{1}{ {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}) }} - - collected := v.Collect{4}(func(index int, val {1}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - + arr := v.Must{4}Slice() + collected := make([]interface{}, len(arr)) + v.Each{4}(func(index int, val {1}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data:collected} } diff --git a/vendor/github.com/stretchr/objx/codegen/template_test.txt b/vendor/github.com/stretchr/objx/codegen/template_test.txt new file mode 100644 index 000000000000..e42d7fb1e881 --- /dev/null +++ b/vendor/github.com/stretchr/objx/codegen/template_test.txt @@ -0,0 +1,120 @@ +/* + Tests for {4} ({1} and []{1}) +*/ +func Test{4}(t *testing.T) { + val := {1}({2}) + m := objx.Map{"value": val, "nothing": nil} + + assert.Equal(t, val, m.Get("value").{4}()) + assert.Equal(t, val, m.Get("value").Must{4}()) + assert.Equal(t, {1}({3}), m.Get("nothing").{4}()) + assert.Equal(t, val, m.Get("nothing").{4}({2})) + assert.Panics(t, func() { + m.Get("age").Must{4}() + }) +} + +func Test{4}Slice(t *testing.T) { + val := {1}({2}) + m := objx.Map{"value": []{1}{ val }, "nothing": nil} + + assert.Equal(t, val, m.Get("value").{4}Slice()[0]) + assert.Equal(t, val, m.Get("value").Must{4}Slice()[0]) + assert.Equal(t, []{1}(nil), m.Get("nothing").{4}Slice()) + assert.Equal(t, val, m.Get("nothing").{4}Slice([]{1}{{1}({2})})[0]) + assert.Panics(t, func() { + m.Get("nothing").Must{4}Slice() + }) +} + +func TestIs{4}(t *testing.T) { + m := objx.Map{"data": {1}({2})} + + assert.True(t, m.Get("data").Is{4}()) +} + +func TestIs{4}Slice(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2})}} + + assert.True(t, m.Get("data").Is{4}Slice()) +} + +func TestEach{4}(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2})}} + count := 0 + replacedVals := make([]{1}, 0) + assert.Equal(t, m.Get("data"), m.Get("data").Each{4}(func(i int, val {1}) bool { + count++ + replacedVals = append(replacedVals, val) + + // abort early + return i != 2 + })) + + assert.Equal(t, count, 3) + assert.Equal(t, replacedVals[0], m.Get("data").Must{4}Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").Must{4}Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").Must{4}Slice()[2]) +} + +func TestWhere{4}(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2})}} + + selected := m.Get("data").Where{4}(func(i int, val {1}) bool { + return i%2 == 0 + }).Must{4}Slice() + + assert.Equal(t, 3, len(selected)) +} + +func TestGroup{4}(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2})}} + + grouped := m.Get("data").Group{4}(func(i int, val {1}) string { + return fmt.Sprintf("%v", i%2==0) + }).Data().(map[string][]{1}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) +} + +func TestReplace{4}(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2})}} + rawArr := m.Get("data").Must{4}Slice() + + replaced := m.Get("data").Replace{4}(func(index int, val {1}) {1} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + replacedArr := replaced.Must{4}Slice() + + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } +} + +func TestCollect{4}(t *testing.T) { + m := objx.Map{"data": []{1}{{1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2}), {1}({2})}} + + collected := m.Get("data").Collect{4}(func(index int, val {1}) interface{} { + return index + }) + collectedArr := collected.MustInterSlice() + + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } +} diff --git a/vendor/github.com/stretchr/objx/codegen/types_list.txt b/vendor/github.com/stretchr/objx/codegen/types_list.txt index 069d43d8ecf8..5d1e492a04dd 100644 --- a/vendor/github.com/stretchr/objx/codegen/types_list.txt +++ b/vendor/github.com/stretchr/objx/codegen/types_list.txt @@ -1,6 +1,5 @@ Interface,interface{},"something",nil,Inter -Map,map[string]interface{},map[string]interface{}{"name":"Tyler"},nil,MSI -ObjxMap,(Map),New(1),New(nil),ObjxMap +ObjxMap,(objx.Map),objx.New(1),objx.New(nil),ObjxMap Bool,bool,true,false,Bool String,string,"hello","",Str Int,int,1,0,Int diff --git a/vendor/github.com/stretchr/objx/constants.go b/vendor/github.com/stretchr/objx/constants.go deleted file mode 100644 index f9eb42a25e0b..000000000000 --- a/vendor/github.com/stretchr/objx/constants.go +++ /dev/null @@ -1,13 +0,0 @@ -package objx - -const ( - // PathSeparator is the character used to separate the elements - // of the keypath. - // - // For example, `location.address.city` - PathSeparator string = "." - - // SignatureSeparator is the character that is used to - // separate the Base64 string from the security signature. - SignatureSeparator = "_" -) diff --git a/vendor/github.com/stretchr/objx/conversions.go b/vendor/github.com/stretchr/objx/conversions.go index 9cdfa9f9f617..080aa46e4723 100644 --- a/vendor/github.com/stretchr/objx/conversions.go +++ b/vendor/github.com/stretchr/objx/conversions.go @@ -7,20 +7,113 @@ import ( "errors" "fmt" "net/url" + "strconv" ) +// SignatureSeparator is the character that is used to +// separate the Base64 string from the security signature. +const SignatureSeparator = "_" + +// URLValuesSliceKeySuffix is the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +var urlValuesSliceKeySuffix = "[]" + +const ( + URLValuesSliceKeySuffixEmpty = "" + URLValuesSliceKeySuffixArray = "[]" + URLValuesSliceKeySuffixIndex = "[i]" +) + +// SetURLValuesSliceKeySuffix sets the character that is used to +// specify a suffic for slices parsed by URLValues. +// If the suffix is set to "[i]", then the index of the slice +// is used in place of i +// Ex: Suffix "[]" would have the form a[]=b&a[]=c +// OR Suffix "[i]" would have the form a[0]=b&a[1]=c +// OR Suffix "" would have the form a=b&a=c +func SetURLValuesSliceKeySuffix(s string) error { + if s == URLValuesSliceKeySuffixEmpty || s == URLValuesSliceKeySuffixArray || s == URLValuesSliceKeySuffixIndex { + urlValuesSliceKeySuffix = s + return nil + } + + return errors.New("objx: Invalid URLValuesSliceKeySuffix provided.") +} + // JSON converts the contained object to a JSON string // representation func (m Map) JSON() (string, error) { + for k, v := range m { + m[k] = cleanUp(v) + } result, err := json.Marshal(m) - if err != nil { err = errors.New("objx: JSON encode failed with: " + err.Error()) } - return string(result), err +} + +func cleanUpInterfaceArray(in []interface{}) []interface{} { + result := make([]interface{}, len(in)) + for i, v := range in { + result[i] = cleanUp(v) + } + return result +} + +func cleanUpInterfaceMap(in map[interface{}]interface{}) Map { + result := Map{} + for k, v := range in { + result[fmt.Sprintf("%v", k)] = cleanUp(v) + } + return result +} + +func cleanUpStringMap(in map[string]interface{}) Map { + result := Map{} + for k, v := range in { + result[k] = cleanUp(v) + } + return result +} + +func cleanUpMSIArray(in []map[string]interface{}) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} +func cleanUpMapArray(in []Map) []Map { + result := make([]Map, len(in)) + for i, v := range in { + result[i] = cleanUpStringMap(v) + } + return result +} + +func cleanUp(v interface{}) interface{} { + switch v := v.(type) { + case []interface{}: + return cleanUpInterfaceArray(v) + case []map[string]interface{}: + return cleanUpMSIArray(v) + case map[interface{}]interface{}: + return cleanUpInterfaceMap(v) + case Map: + return cleanUpStringMap(v) + case []Map: + return cleanUpMapArray(v) + default: + return v + } } // MustJSON converts the contained object to a JSON string @@ -36,7 +129,6 @@ func (m Map) MustJSON() string { // Base64 converts the contained object to a Base64 string // representation of the JSON string representation func (m Map) Base64() (string, error) { - var buf bytes.Buffer jsonData, err := m.JSON() @@ -45,11 +137,10 @@ func (m Map) Base64() (string, error) { } encoder := base64.NewEncoder(base64.StdEncoding, &buf) - encoder.Write([]byte(jsonData)) - encoder.Close() + _, _ = encoder.Write([]byte(jsonData)) + _ = encoder.Close() return buf.String(), nil - } // MustBase64 converts the contained object to a Base64 string @@ -67,16 +158,13 @@ func (m Map) MustBase64() string { // representation of the JSON string representation and signs it // using the provided key. func (m Map) SignedBase64(key string) (string, error) { - base64, err := m.Base64() if err != nil { return "", err } sig := HashWithKey(base64, key) - return base64 + SignatureSeparator + sig, nil - } // MustSignedBase64 converts the contained object to a Base64 string @@ -98,17 +186,92 @@ func (m Map) MustSignedBase64(key string) string { // URLValues creates a url.Values object from an Obj. This // function requires that the wrapped object be a map[string]interface{} func (m Map) URLValues() url.Values { - vals := make(url.Values) - for k, v := range m { - //TODO: can this be done without sprintf? - vals.Set(k, fmt.Sprintf("%v", v)) - } + m.parseURLValues(m, vals, "") return vals } +func (m Map) parseURLValues(queryMap Map, vals url.Values, key string) { + useSliceIndex := false + if urlValuesSliceKeySuffix == "[i]" { + useSliceIndex = true + } + + for k, v := range queryMap { + val := &Value{data: v} + switch { + case val.IsObjxMap(): + if key == "" { + m.parseURLValues(val.ObjxMap(), vals, k) + } else { + m.parseURLValues(val.ObjxMap(), vals, key+"["+k+"]") + } + case val.IsObjxMapSlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustObjxMapSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(sv, vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustObjxMapSlice() { + m.parseURLValues(sv, vals, sliceKey) + } + } + case val.IsMSISlice(): + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.MustMSISlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + m.parseURLValues(New(sv), vals, sk) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + for _, sv := range val.MustMSISlice() { + m.parseURLValues(New(sv), vals, sliceKey) + } + } + case val.IsStrSlice(), val.IsBoolSlice(), + val.IsFloat32Slice(), val.IsFloat64Slice(), + val.IsIntSlice(), val.IsInt8Slice(), val.IsInt16Slice(), val.IsInt32Slice(), val.IsInt64Slice(), + val.IsUintSlice(), val.IsUint8Slice(), val.IsUint16Slice(), val.IsUint32Slice(), val.IsUint64Slice(): + + sliceKey := k + if key != "" { + sliceKey = key + "[" + k + "]" + } + + if useSliceIndex { + for i, sv := range val.StringSlice() { + sk := sliceKey + "[" + strconv.FormatInt(int64(i), 10) + "]" + vals.Set(sk, sv) + } + } else { + sliceKey = sliceKey + urlValuesSliceKeySuffix + vals[sliceKey] = val.StringSlice() + } + + default: + if key == "" { + vals.Set(k, val.String()) + } else { + vals.Set(key+"["+k+"]", val.String()) + } + } + } +} + // URLQuery gets an encoded URL query representing the given // Obj. This function requires that the wrapped object be a // map[string]interface{} diff --git a/vendor/github.com/stretchr/objx/conversions_test.go b/vendor/github.com/stretchr/objx/conversions_test.go index e9ccd2987b6e..7c4464ce65d7 100644 --- a/vendor/github.com/stretchr/objx/conversions_test.go +++ b/vendor/github.com/stretchr/objx/conversions_test.go @@ -1,28 +1,41 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" + "net/url" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestConversionJSON(t *testing.T) { - jsonString := `{"name":"Mat"}` - o := MustFromJSON(jsonString) + o := objx.MustFromJSON(jsonString) result, err := o.JSON() - if assert.NoError(t, err) { - assert.Equal(t, jsonString, result) - } - + require.NoError(t, err) + assert.Equal(t, jsonString, result) assert.Equal(t, jsonString, o.MustJSON()) + i := objx.Map{ + "a": map[interface{}]interface{}{"b": objx.Map{"c": map[interface{}]interface{}{"d": "e"}}, + "f": []objx.Map{{"g": map[interface{}]interface{}{"h": "i"}}}, + "j": []map[string]interface{}{{"k": map[interface{}]interface{}{"l": "m"}}}, + "n": []interface{}{objx.Map{"o": "p"}}, + }, + } + + jsonString = `{"a":{"b":{"c":{"d":"e"}},"f":[{"g":{"h":"i"}}],"j":[{"k":{"l":"m"}}],"n":[{"o":"p"}]}}` + result, err = i.JSON() + require.NoError(t, err) + assert.Equal(t, jsonString, result) + assert.Equal(t, jsonString, i.MustJSON()) } func TestConversionJSONWithError(t *testing.T) { - - o := MSI() + o := objx.MSI() o["test"] = func() {} assert.Panics(t, func() { @@ -32,26 +45,20 @@ func TestConversionJSONWithError(t *testing.T) { _, err := o.JSON() assert.Error(t, err) - } func TestConversionBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) + o := objx.Map{"name": "Mat"} result, err := o.Base64() - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) - } - + require.NoError(t, err) + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", result) assert.Equal(t, "eyJuYW1lIjoiTWF0In0=", o.MustBase64()) - } func TestConversionBase64WithError(t *testing.T) { - - o := MSI() + o := objx.MSI() o["test"] = func() {} assert.Panics(t, func() { @@ -61,26 +68,20 @@ func TestConversionBase64WithError(t *testing.T) { _, err := o.Base64() assert.Error(t, err) - } func TestConversionSignedBase64(t *testing.T) { - - o := New(map[string]interface{}{"name": "Mat"}) + o := objx.Map{"name": "Mat"} result, err := o.SignedBase64("key") - if assert.NoError(t, err) { - assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) - } - + require.NoError(t, err) + assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", result) assert.Equal(t, "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6", o.MustSignedBase64("key")) - } func TestConversionSignedBase64WithError(t *testing.T) { - - o := MSI() + o := objx.MSI() o["test"] = func() {} assert.Panics(t, func() { @@ -90,5 +91,95 @@ func TestConversionSignedBase64WithError(t *testing.T) { _, err := o.SignedBase64("key") assert.Error(t, err) +} +func TestConversionURLValues(t *testing.T) { + m := getURLQueryMap() + u := m.URLValues() + + assert.Equal(t, url.Values{ + "abc": []string{"123"}, + "name": []string{"Mat"}, + "data[age]": []string{"30"}, + "data[height]": []string{"162"}, + "data[arr][]": []string{"1", "2"}, + "stats[]": []string{"1", "2"}, + "bools[]": []string{"true", "false"}, + "mapSlice[][age]": []string{"40"}, + "mapSlice[][height]": []string{"152"}, + "msiData[age]": []string{"30"}, + "msiData[height]": []string{"162"}, + "msiData[arr][]": []string{"1", "2"}, + "msiSlice[][age]": []string{"40"}, + "msiSlice[][height]": []string{"152"}, + }, u) +} + +func TestConversionURLQuery(t *testing.T) { + m := getURLQueryMap() + u, err := m.URLQuery() + + assert.Nil(t, err) + require.NotNil(t, u) + + ue, err := url.QueryUnescape(u) + assert.Nil(t, err) + require.NotNil(t, ue) + + assert.Equal(t, "abc=123&bools[]=true&bools[]=false&data[age]=30&data[arr][]=1&data[arr][]=2&data[height]=162&mapSlice[][age]=40&mapSlice[][height]=152&msiData[age]=30&msiData[arr][]=1&msiData[arr][]=2&msiData[height]=162&msiSlice[][age]=40&msiSlice[][height]=152&name=Mat&stats[]=1&stats[]=2", ue) +} + +func TestConversionURLQueryNoSliceKeySuffix(t *testing.T) { + m := getURLQueryMap() + objx.SetURLValuesSliceKeySuffix(objx.URLValuesSliceKeySuffixEmpty) + u, err := m.URLQuery() + + assert.Nil(t, err) + require.NotNil(t, u) + + ue, err := url.QueryUnescape(u) + assert.Nil(t, err) + require.NotNil(t, ue) + + assert.Equal(t, "abc=123&bools=true&bools=false&data[age]=30&data[arr]=1&data[arr]=2&data[height]=162&mapSlice[age]=40&mapSlice[height]=152&msiData[age]=30&msiData[arr]=1&msiData[arr]=2&msiData[height]=162&msiSlice[age]=40&msiSlice[height]=152&name=Mat&stats=1&stats=2", ue) +} + +func TestConversionURLQueryIndexSliceKeySuffix(t *testing.T) { + m := getURLQueryMap() + m.Set("mapSlice", []objx.Map{{"age": 40, "sex": "male"}, {"height": 152}}) + objx.SetURLValuesSliceKeySuffix(objx.URLValuesSliceKeySuffixIndex) + u, err := m.URLQuery() + + assert.Nil(t, err) + require.NotNil(t, u) + + ue, err := url.QueryUnescape(u) + assert.Nil(t, err) + require.NotNil(t, ue) + + assert.Equal(t, "abc=123&bools[0]=true&bools[1]=false&data[age]=30&data[arr][0]=1&data[arr][1]=2&data[height]=162&mapSlice[0][age]=40&mapSlice[0][sex]=male&mapSlice[1][height]=152&msiData[age]=30&msiData[arr][0]=1&msiData[arr][1]=2&msiData[height]=162&msiSlice[0][age]=40&msiSlice[1][height]=152&name=Mat&stats[0]=1&stats[1]=2", ue) +} + +func TestValidityURLQuerySliceKeySuffix(t *testing.T) { + err := objx.SetURLValuesSliceKeySuffix("") + assert.Nil(t, err) + err = objx.SetURLValuesSliceKeySuffix("[]") + assert.Nil(t, err) + err = objx.SetURLValuesSliceKeySuffix("[i]") + assert.Nil(t, err) + err = objx.SetURLValuesSliceKeySuffix("{}") + assert.Error(t, err) +} + +func getURLQueryMap() objx.Map { + return objx.Map{ + "abc": 123, + "name": "Mat", + "data": objx.Map{"age": 30, "height": 162, "arr": []int{1, 2}}, + "mapSlice": []objx.Map{{"age": 40}, {"height": 152}}, + "msiData": map[string]interface{}{"age": 30, "height": 162, "arr": []int{1, 2}}, + "msiSlice": []map[string]interface{}{{"age": 40}, {"height": 152}}, + "stats": []string{"1", "2"}, + "bools": []bool{true, false}, + } } diff --git a/vendor/github.com/stretchr/objx/doc.go b/vendor/github.com/stretchr/objx/doc.go index 47bf85e46349..6d6af1a83abf 100644 --- a/vendor/github.com/stretchr/objx/doc.go +++ b/vendor/github.com/stretchr/objx/doc.go @@ -1,72 +1,66 @@ -// objx - Go package for dealing with maps, slices, JSON and other data. -// -// Overview -// -// Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes -// a powerful `Get` method (among others) that allows you to easily and quickly get -// access to data within the map, without having to worry too much about type assertions, -// missing data, default values etc. -// -// Pattern -// -// Objx uses a preditable pattern to make access data from within `map[string]interface{}'s -// easy. -// -// Call one of the `objx.` functions to create your `objx.Map` to get going: -// -// m, err := objx.FromJSON(json) -// -// NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, -// the rest will be optimistic and try to figure things out without panicking. -// -// Use `Get` to access the value you're interested in. You can use dot and array -// notation too: -// -// m.Get("places[0].latlng") -// -// Once you have saught the `Value` you're interested in, you can use the `Is*` methods -// to determine its type. -// -// if m.Get("code").IsStr() { /* ... */ } -// -// Or you can just assume the type, and use one of the strong type methods to -// extract the real value: -// -// m.Get("code").Int() -// -// If there's no value there (or if it's the wrong type) then a default value -// will be returned, or you can be explicit about the default value. -// -// Get("code").Int(-1) -// -// If you're dealing with a slice of data as a value, Objx provides many useful -// methods for iterating, manipulating and selecting that data. You can find out more -// by exploring the index below. -// -// Reading data -// -// A simple example of how to use Objx: -// -// // use MustFromJSON to make an objx.Map from some JSON -// m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) -// -// // get the details -// name := m.Get("name").Str() -// age := m.Get("age").Int() -// -// // get their nickname (or use their name if they -// // don't have one) -// nickname := m.Get("nickname").Str(name) -// -// Ranging -// -// Since `objx.Map` is a `map[string]interface{}` you can treat it as such. For -// example, to `range` the data, do what you would expect: -// -// m := objx.MustFromJSON(json) -// for key, value := range m { -// -// /* ... do your magic ... */ -// -// } +/* +Objx - Go package for dealing with maps, slices, JSON and other data. + +Overview + +Objx provides the `objx.Map` type, which is a `map[string]interface{}` that exposes +a powerful `Get` method (among others) that allows you to easily and quickly get +access to data within the map, without having to worry too much about type assertions, +missing data, default values etc. + +Pattern + +Objx uses a preditable pattern to make access data from within `map[string]interface{}` easy. +Call one of the `objx.` functions to create your `objx.Map` to get going: + + m, err := objx.FromJSON(json) + +NOTE: Any methods or functions with the `Must` prefix will panic if something goes wrong, +the rest will be optimistic and try to figure things out without panicking. + +Use `Get` to access the value you're interested in. You can use dot and array +notation too: + + m.Get("places[0].latlng") + +Once you have sought the `Value` you're interested in, you can use the `Is*` methods to determine its type. + + if m.Get("code").IsStr() { // Your code... } + +Or you can just assume the type, and use one of the strong type methods to extract the real value: + + m.Get("code").Int() + +If there's no value there (or if it's the wrong type) then a default value will be returned, +or you can be explicit about the default value. + + Get("code").Int(-1) + +If you're dealing with a slice of data as a value, Objx provides many useful methods for iterating, +manipulating and selecting that data. You can find out more by exploring the index below. + +Reading data + +A simple example of how to use Objx: + + // Use MustFromJSON to make an objx.Map from some JSON + m := objx.MustFromJSON(`{"name": "Mat", "age": 30}`) + + // Get the details + name := m.Get("name").Str() + age := m.Get("age").Int() + + // Get their nickname (or use their name if they don't have one) + nickname := m.Get("nickname").Str(name) + +Ranging + +Since `objx.Map` is a `map[string]interface{}` you can treat it as such. +For example, to `range` the data, do what you would expect: + + m := objx.MustFromJSON(json) + for key, value := range m { + // Your code... + } +*/ package objx diff --git a/vendor/github.com/stretchr/objx/fixture_test.go b/vendor/github.com/stretchr/objx/fixture_test.go index 27f7d9049a0d..cefe8cdc6971 100644 --- a/vendor/github.com/stretchr/objx/fixture_test.go +++ b/vendor/github.com/stretchr/objx/fixture_test.go @@ -1,8 +1,10 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) var fixtures = []struct { @@ -79,20 +81,16 @@ var fixtures = []struct { } func TestFixtures(t *testing.T) { - for _, fixture := range fixtures { - - m := MustFromJSON(fixture.data) + m := objx.MustFromJSON(fixture.data) // get the value t.Logf("Running get fixture: \"%s\" (%v)", fixture.name, fixture) value := m.Get(fixture.get.(string)) // make sure it matches - assert.Equal(t, fixture.output, value.data, + assert.Equal(t, fixture.output, value.Data(), "Get fixture \"%s\" failed: %v", fixture.name, fixture, ) - } - } diff --git a/vendor/github.com/stretchr/objx/go.mod b/vendor/github.com/stretchr/objx/go.mod new file mode 100644 index 000000000000..31ec5a7d9484 --- /dev/null +++ b/vendor/github.com/stretchr/objx/go.mod @@ -0,0 +1,8 @@ +module github.com/stretchr/objx + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 // indirect + github.com/stretchr/testify v1.3.0 +) diff --git a/vendor/github.com/stretchr/objx/go.sum b/vendor/github.com/stretchr/objx/go.sum new file mode 100644 index 000000000000..4f89841505b8 --- /dev/null +++ b/vendor/github.com/stretchr/objx/go.sum @@ -0,0 +1,8 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/vendor/github.com/stretchr/objx/map.go b/vendor/github.com/stretchr/objx/map.go index eb6ed8e285cc..95149c06a6d3 100644 --- a/vendor/github.com/stretchr/objx/map.go +++ b/vendor/github.com/stretchr/objx/map.go @@ -27,7 +27,7 @@ func (m Map) Value() *Value { } // Nil represents a nil Map. -var Nil Map = New(nil) +var Nil = New(nil) // New creates a new Map containing the map[string]interface{} in the data argument. // If the data argument is not a map[string]interface, New attempts to call the @@ -47,9 +47,8 @@ func New(data interface{}) Map { // // The arguments follow a key, value pattern. // -// Panics // -// Panics if any key arugment is non-string or if there are an odd number of arguments. +// Returns nil if any key argument is non-string or if there are an odd number of arguments. // // Example // @@ -58,32 +57,25 @@ func New(data interface{}) Map { // m := objx.MSI("name", "Mat", "age", 29, "subobj", objx.MSI("active", true)) // // // creates an Map equivalent to -// m := objx.New(map[string]interface{}{"name": "Mat", "age": 29, "subobj": map[string]interface{}{"active": true}}) +// m := objx.Map{"name": "Mat", "age": 29, "subobj": objx.Map{"active": true}} func MSI(keyAndValuePairs ...interface{}) Map { - - newMap := make(map[string]interface{}) + newMap := Map{} keyAndValuePairsLen := len(keyAndValuePairs) - if keyAndValuePairsLen%2 != 0 { - panic("objx: MSI must have an even number of arguments following the 'key, value' pattern.") + return nil } - for i := 0; i < keyAndValuePairsLen; i = i + 2 { - key := keyAndValuePairs[i] value := keyAndValuePairs[i+1] // make sure the key is a string keyString, keyStringOK := key.(string) if !keyStringOK { - panic("objx: MSI must follow 'string, interface{}' pattern. " + keyString + " is not a valid key.") + return nil } - newMap[keyString] = value - } - - return New(newMap) + return newMap } // ****** Conversion Constructors @@ -94,11 +86,9 @@ func MSI(keyAndValuePairs ...interface{}) Map { // Panics if the JSON is invalid. func MustFromJSON(jsonString string) Map { o, err := FromJSON(jsonString) - if err != nil { panic("objx: MustFromJSON failed with error: " + err.Error()) } - return o } @@ -107,16 +97,50 @@ func MustFromJSON(jsonString string) Map { // // Returns an error if the JSON is invalid. func FromJSON(jsonString string) (Map, error) { - - var data interface{} - err := json.Unmarshal([]byte(jsonString), &data) - + var m Map + err := json.Unmarshal([]byte(jsonString), &m) if err != nil { return Nil, err } + m.tryConvertFloat64() + return m, nil +} - return New(data), nil +func (m Map) tryConvertFloat64() { + for k, v := range m { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + m[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + m[k] = t + case []interface{}: + m[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } +} +func tryConvertFloat64InSlice(s []interface{}) []interface{} { + for k, v := range s { + switch v.(type) { + case float64: + f := v.(float64) + if float64(int(f)) == f { + s[k] = int(f) + } + case map[string]interface{}: + t := New(v) + t.tryConvertFloat64() + s[k] = t + case []interface{}: + s[k] = tryConvertFloat64InSlice(v.([]interface{})) + } + } + return s } // FromBase64 creates a new Obj containing the data specified @@ -124,14 +148,11 @@ func FromJSON(jsonString string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func FromBase64(base64String string) (Map, error) { - decoder := base64.NewDecoder(base64.StdEncoding, strings.NewReader(base64String)) - decoded, err := ioutil.ReadAll(decoder) if err != nil { return nil, err } - return FromJSON(string(decoded)) } @@ -140,13 +161,10 @@ func FromBase64(base64String string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromBase64(base64String string) Map { - result, err := FromBase64(base64String) - if err != nil { panic("objx: MustFromBase64 failed with error: " + err.Error()) } - return result } @@ -157,14 +175,13 @@ func MustFromBase64(base64String string) Map { func FromSignedBase64(base64String, key string) (Map, error) { parts := strings.Split(base64String, SignatureSeparator) if len(parts) != 2 { - return nil, errors.New("objx: Signed base64 string is malformed.") + return nil, errors.New("objx: Signed base64 string is malformed") } sig := HashWithKey(parts[0], key) if parts[1] != sig { - return nil, errors.New("objx: Signature for base64 data does not match.") + return nil, errors.New("objx: Signature for base64 data does not match") } - return FromBase64(parts[0]) } @@ -173,13 +190,10 @@ func FromSignedBase64(base64String, key string) (Map, error) { // // The string is an encoded JSON string returned by Base64 func MustFromSignedBase64(base64String, key string) Map { - result, err := FromSignedBase64(base64String, key) - if err != nil { panic("objx: MustFromSignedBase64 failed with error: " + err.Error()) } - return result } @@ -188,19 +202,15 @@ func MustFromSignedBase64(base64String, key string) Map { // // For queries with multiple values, the first value is selected. func FromURLQuery(query string) (Map, error) { - vals, err := url.ParseQuery(query) - if err != nil { return nil, err } - - m := make(map[string]interface{}) + m := Map{} for k, vals := range vals { m[k] = vals[0] } - - return New(m), nil + return m, nil } // MustFromURLQuery generates a new Obj by parsing the specified @@ -210,13 +220,9 @@ func FromURLQuery(query string) (Map, error) { // // Panics if it encounters an error func MustFromURLQuery(query string) Map { - o, err := FromURLQuery(query) - if err != nil { panic("objx: MustFromURLQuery failed with error: " + err.Error()) } - return o - } diff --git a/vendor/github.com/stretchr/objx/map_for_test.go b/vendor/github.com/stretchr/objx/map_for_test.go deleted file mode 100644 index 6beb50675697..000000000000 --- a/vendor/github.com/stretchr/objx/map_for_test.go +++ /dev/null @@ -1,10 +0,0 @@ -package objx - -var TestMap map[string]interface{} = map[string]interface{}{ - "name": "Tyler", - "address": map[string]interface{}{ - "city": "Salt Lake City", - "state": "UT", - }, - "numbers": []interface{}{"one", "two", "three", "four", "five"}, -} diff --git a/vendor/github.com/stretchr/objx/map_test.go b/vendor/github.com/stretchr/objx/map_test.go index 1f8b45c61701..333d519b1668 100644 --- a/vendor/github.com/stretchr/objx/map_test.go +++ b/vendor/github.com/stretchr/objx/map_test.go @@ -1,147 +1,227 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) -type Convertable struct { - name string +var TestMap = objx.Map{ + "name": "Tyler", + "address": objx.Map{ + "city": "Salt Lake City", + "state": "UT", + }, + "numbers": []interface{}{"one", "two", "three", "four", "five"}, } -func (c *Convertable) MSI() map[string]interface{} { - return map[string]interface{}{"name": c.name} +type Convertable struct { + name string } type Unconvertable struct { name string } -func TestMapCreation(t *testing.T) { +func (c *Convertable) MSI() map[string]interface{} { + return objx.Map{"name": c.name} +} - o := New(nil) +func TestMapCreation(t *testing.T) { + o := objx.New(nil) assert.Nil(t, o) - o = New("Tyler") + o = objx.New("Tyler") assert.Nil(t, o) unconvertable := &Unconvertable{name: "Tyler"} - o = New(unconvertable) + o = objx.New(unconvertable) assert.Nil(t, o) convertable := &Convertable{name: "Tyler"} - o = New(convertable) - if assert.NotNil(t, convertable) { - assert.Equal(t, "Tyler", o["name"], "Tyler") - } + o = objx.New(convertable) + require.NotNil(t, convertable) + assert.Equal(t, "Tyler", o["name"]) - o = MSI() - if assert.NotNil(t, o) { - assert.NotNil(t, o) - } + o = objx.MSI() + assert.NotNil(t, o) + + o = objx.MSI("name", "Tyler") + require.NotNil(t, o) + assert.Equal(t, o["name"], "Tyler") + + o = objx.MSI(1, "a") + assert.Nil(t, o) + + o = objx.MSI("a") + assert.Nil(t, o) + + o = objx.MSI("a", "b", "c") + assert.Nil(t, o) +} - o = MSI("name", "Tyler") - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, o["name"], "Tyler") - } +func TestMapValure(t *testing.T) { + m := objx.Map{ + "a": 1, } + v := m.Value() + assert.Equal(t, m, v.ObjxMap()) } func TestMapMustFromJSONWithError(t *testing.T) { - - _, err := FromJSON(`"name":"Mat"}`) + _, err := objx.FromJSON(`"name":"Mat"}`) assert.Error(t, err) - } func TestMapFromJSON(t *testing.T) { + o := objx.MustFromJSON(`{"name":"Mat"}`) - o := MustFromJSON(`{"name":"Mat"}`) - - if assert.NotNil(t, o) { - if assert.NotNil(t, o) { - assert.Equal(t, "Mat", o["name"]) - } - } - + require.NotNil(t, o) + assert.Equal(t, "Mat", o["name"]) } func TestMapFromJSONWithError(t *testing.T) { - - var m Map + var m objx.Map assert.Panics(t, func() { - m = MustFromJSON(`"name":"Mat"}`) + m = objx.MustFromJSON(`"name":"Mat"}`) }) - assert.Nil(t, m) +} +func TestConversionJSONInt(t *testing.T) { + jsonString := + `{ + "a": 1, + "b": { + "data": 1 + }, + "c": [1], + "d": [[1]] + }` + m, err := objx.FromJSON(jsonString) + + assert.Nil(t, err) + require.NotNil(t, m) + assert.Equal(t, 1, m.Get("a").Int()) + assert.Equal(t, 1, m.Get("b.data").Int()) + + assert.True(t, m.Get("c").IsInterSlice()) + assert.Equal(t, 1, m.Get("c").InterSlice()[0]) + + assert.True(t, m.Get("d").IsInterSlice()) + assert.Equal(t, []interface{}{1}, m.Get("d").InterSlice()[0]) } -func TestMapFromBase64String(t *testing.T) { +func TestJSONSliceInt(t *testing.T) { + jsonString := + `{ + "a": [ + {"b": 1}, + {"c": 2} + ] + }` + m, err := objx.FromJSON(jsonString) + + assert.Nil(t, err) + require.NotNil(t, m) + assert.Equal(t, []objx.Map{{"b": 1}, {"c": 2}}, m.Get("a").ObjxMapSlice()) +} - base64String := "eyJuYW1lIjoiTWF0In0=" +func TestJSONSliceMixed(t *testing.T) { + jsonString := + `{ + "a": [ + {"b": 1}, + "a" + ] + }` + m, err := objx.FromJSON(jsonString) - o, err := FromBase64(base64String) + assert.Nil(t, err) + require.NotNil(t, m) - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } + assert.Nil(t, m.Get("a").ObjxMapSlice()) +} - assert.Equal(t, MustFromBase64(base64String).Get("name").Str(), "Mat") +func TestMapFromBase64String(t *testing.T) { + base64String := "eyJuYW1lIjoiTWF0In0=" + o, err := objx.FromBase64(base64String) + require.NoError(t, err) + assert.Equal(t, o.Get("name").Str(), "Mat") + assert.Equal(t, objx.MustFromBase64(base64String).Get("name").Str(), "Mat") } func TestMapFromBase64StringWithError(t *testing.T) { - base64String := "eyJuYW1lIjoiTWFasd0In0=" - - _, err := FromBase64(base64String) + _, err := objx.FromBase64(base64String) assert.Error(t, err) - assert.Panics(t, func() { - MustFromBase64(base64String) + objx.MustFromBase64(base64String) }) - } func TestMapFromSignedBase64String(t *testing.T) { - base64String := "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - o, err := FromSignedBase64(base64String, "key") - - if assert.NoError(t, err) { - assert.Equal(t, o.Get("name").Str(), "Mat") - } - - assert.Equal(t, MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") + o, err := objx.FromSignedBase64(base64String, "key") + require.NoError(t, err) + assert.Equal(t, o.Get("name").Str(), "Mat") + assert.Equal(t, objx.MustFromSignedBase64(base64String, "key").Get("name").Str(), "Mat") } func TestMapFromSignedBase64StringWithError(t *testing.T) { - base64String := "eyJuYW1lasdIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6" - - _, err := FromSignedBase64(base64String, "key") - + _, err := objx.FromSignedBase64(base64String, "key") assert.Error(t, err) + assert.Panics(t, func() { + objx.MustFromSignedBase64(base64String, "key") + }) + base64String = "eyJuYW1lasdIjoiTWF0In0=67ee82916f90b2c0d68c903266e8998c9ef0c3d6" + _, err = objx.FromSignedBase64(base64String, "key") + assert.Error(t, err) assert.Panics(t, func() { - MustFromSignedBase64(base64String, "key") + objx.MustFromSignedBase64(base64String, "key") }) + base64String = "eyJuYW1lIjoiTWF0In0=_67ee82916f90b2c0d68c903266e8998c9ef0c3d6_junk" + _, err = objx.FromSignedBase64(base64String, "key") + assert.Error(t, err) + assert.Panics(t, func() { + objx.MustFromSignedBase64(base64String, "key") + }) } func TestMapFromURLQuery(t *testing.T) { + m, err := objx.FromURLQuery("name=tyler&state=UT") - m, err := FromURLQuery("name=tyler&state=UT") - if assert.NoError(t, err) && assert.NotNil(t, m) { - assert.Equal(t, "tyler", m.Get("name").Str()) - assert.Equal(t, "UT", m.Get("state").Str()) - } + assert.NoError(t, err) + require.NotNil(t, m) + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) +} +func TestMapMustFromURLQuery(t *testing.T) { + m := objx.MustFromURLQuery("name=tyler&state=UT") + + require.NotNil(t, m) + assert.Equal(t, "tyler", m.Get("name").Str()) + assert.Equal(t, "UT", m.Get("state").Str()) +} + +func TestMapFromURLQueryWithError(t *testing.T) { + m, err := objx.FromURLQuery("%") + + assert.Error(t, err) + assert.Nil(t, m) + assert.Panics(t, func() { + objx.MustFromURLQuery("%") + }) } diff --git a/vendor/github.com/stretchr/objx/mutations.go b/vendor/github.com/stretchr/objx/mutations.go index b35c86392bf8..c3400a3f709a 100644 --- a/vendor/github.com/stretchr/objx/mutations.go +++ b/vendor/github.com/stretchr/objx/mutations.go @@ -2,32 +2,23 @@ package objx // Exclude returns a new Map with the keys in the specified []string // excluded. -func (d Map) Exclude(exclude []string) Map { - +func (m Map) Exclude(exclude []string) Map { excluded := make(Map) - for k, v := range d { - var shouldInclude bool = true - for _, toExclude := range exclude { - if k == toExclude { - shouldInclude = false - break - } - } - if shouldInclude { + for k, v := range m { + if !contains(exclude, k) { excluded[k] = v } } - return excluded } // Copy creates a shallow copy of the Obj. func (m Map) Copy() Map { - copied := make(map[string]interface{}) + copied := Map{} for k, v := range m { copied[k] = v } - return New(copied) + return copied } // Merge blends the specified map with a copy of this map and returns the result. @@ -38,31 +29,28 @@ func (m Map) Merge(merge Map) Map { return m.Copy().MergeHere(merge) } -// Merge blends the specified map with this map and returns the current map. +// MergeHere blends the specified map with this map and returns the current map. // -// Keys that appear in both will be selected from the specified map. The original map +// Keys that appear in both will be selected from the specified map. The original map // will be modified. This method requires that // the wrapped object be a map[string]interface{} func (m Map) MergeHere(merge Map) Map { - for k, v := range merge { m[k] = v } - return m - } // Transform builds a new Obj giving the transformer a chance // to change the keys and values as it goes. This method requires that // the wrapped object be a map[string]interface{} func (m Map) Transform(transformer func(key string, value interface{}) (string, interface{})) Map { - newMap := make(map[string]interface{}) + newMap := Map{} for k, v := range m { modifiedKey, modifiedVal := transformer(k, v) newMap[modifiedKey] = modifiedVal } - return New(newMap) + return newMap } // TransformKeys builds a new map using the specified key mapping. @@ -71,11 +59,19 @@ func (m Map) Transform(transformer func(key string, value interface{}) (string, // This method requires that the wrapped object be a map[string]interface{} func (m Map) TransformKeys(mapping map[string]string) Map { return m.Transform(func(key string, value interface{}) (string, interface{}) { - if newKey, ok := mapping[key]; ok { return newKey, value } - return key, value }) } + +// Checks if a string slice contains a string +func contains(s []string, e string) bool { + for _, a := range s { + if a == e { + return true + } + } + return false +} diff --git a/vendor/github.com/stretchr/objx/mutations_test.go b/vendor/github.com/stretchr/objx/mutations_test.go index e20ee23bc4bb..40901ceba3a7 100644 --- a/vendor/github.com/stretchr/objx/mutations_test.go +++ b/vendor/github.com/stretchr/objx/mutations_test.go @@ -1,77 +1,106 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" + "strings" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestExclude(t *testing.T) { + m := objx.Map{ + "name": "Mat", + "age": 29, + "secret": "ABC", + } - d := make(Map) - d["name"] = "Mat" - d["age"] = 29 - d["secret"] = "ABC" + excluded := m.Exclude([]string{"secret"}) - excluded := d.Exclude([]string{"secret"}) - - assert.Equal(t, d["name"], excluded["name"]) - assert.Equal(t, d["age"], excluded["age"]) + assert.Equal(t, m["name"], excluded["name"]) + assert.Equal(t, m["age"], excluded["age"]) assert.False(t, excluded.Has("secret"), "secret should be excluded") - } func TestCopy(t *testing.T) { + m1 := objx.Map{ + "name": "Tyler", + "location": "UT", + } - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" + m2 := m1.Copy() + require.NotNil(t, m2) + m2["name"] = "Mat" - d1Obj := New(d1) - d2Obj := d1Obj.Copy() - - d2Obj["name"] = "Mat" - - assert.Equal(t, d1Obj.Get("name").Str(), "Tyler") - assert.Equal(t, d2Obj.Get("name").Str(), "Mat") + assert.Equal(t, m1.Get("name").Str(), "Tyler") + assert.Equal(t, m2.Get("name").Str(), "Mat") } func TestMerge(t *testing.T) { - - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) - - merged := dObj.Merge(d1Obj) - - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Empty(t, dObj.Get("location").Str()) - + m1 := objx.Map{ + "name": "Mat", + } + m2 := objx.Map{ + "name": "Tyler", + "location": "UT", + } + + merged := m1.Merge(m2) + + assert.Equal(t, merged.Get("name").Str(), m2.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), m2.Get("location").Str()) + assert.Empty(t, m1.Get("location").Str()) } func TestMergeHere(t *testing.T) { + m1 := objx.Map{ + "name": "Mat", + } + m2 := objx.Map{ + "name": "Tyler", + "location": "UT", + } + + merged := m1.MergeHere(m2) + + assert.Equal(t, m1, merged, "With MergeHere, it should return the first modified map") + assert.Equal(t, merged.Get("name").Str(), m2.Get("name").Str()) + assert.Equal(t, merged.Get("location").Str(), m2.Get("location").Str()) + assert.Equal(t, merged.Get("location").Str(), m1.Get("location").Str()) +} - d := make(map[string]interface{}) - d["name"] = "Mat" - - d1 := make(map[string]interface{}) - d1["name"] = "Tyler" - d1["location"] = "UT" - - dObj := New(d) - d1Obj := New(d1) +func TestTransform(t *testing.T) { + m := objx.Map{ + "name": "Mat", + "location": "UK", + } + r := m.Transform(keyToUpper) + assert.Equal(t, objx.Map{ + "NAME": "Mat", + "LOCATION": "UK", + }, r) +} - merged := dObj.MergeHere(d1Obj) +func TestTransformKeys(t *testing.T) { + m := objx.Map{ + "a": "1", + "b": "2", + "c": "3", + } + mapping := map[string]string{ + "a": "d", + "b": "e", + } + r := m.TransformKeys(mapping) + assert.Equal(t, objx.Map{ + "c": "3", + "d": "1", + "e": "2", + }, r) +} - assert.Equal(t, dObj, merged, "With MergeHere, it should return the first modified map") - assert.Equal(t, merged.Get("name").Str(), d1Obj.Get("name").Str()) - assert.Equal(t, merged.Get("location").Str(), d1Obj.Get("location").Str()) - assert.Equal(t, merged.Get("location").Str(), dObj.Get("location").Str()) +func keyToUpper(s string, v interface{}) (string, interface{}) { + return strings.ToUpper(s), v } diff --git a/vendor/github.com/stretchr/objx/security.go b/vendor/github.com/stretchr/objx/security.go index fdd6be9cfb35..692be8e2a9fe 100644 --- a/vendor/github.com/stretchr/objx/security.go +++ b/vendor/github.com/stretchr/objx/security.go @@ -5,10 +5,8 @@ import ( "encoding/hex" ) -// HashWithKey hashes the specified string using the security -// key. +// HashWithKey hashes the specified string using the security key func HashWithKey(data, key string) string { - hash := sha1.New() - hash.Write([]byte(data + ":" + key)) - return hex.EncodeToString(hash.Sum(nil)) + d := sha1.Sum([]byte(data + ":" + key)) + return hex.EncodeToString(d[:]) } diff --git a/vendor/github.com/stretchr/objx/security_test.go b/vendor/github.com/stretchr/objx/security_test.go index 8f0898f62ca3..8c623db91da4 100644 --- a/vendor/github.com/stretchr/objx/security_test.go +++ b/vendor/github.com/stretchr/objx/security_test.go @@ -1,12 +1,12 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) func TestHashWithKey(t *testing.T) { - - assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", HashWithKey("abc", "def")) - + assert.Equal(t, "0ce84d8d01f2c7b6e0882b784429c54d280ea2d9", objx.HashWithKey("abc", "def")) } diff --git a/vendor/github.com/stretchr/objx/simple_example_test.go b/vendor/github.com/stretchr/objx/simple_example_test.go index 5408c7fd3d3d..403753d65253 100644 --- a/vendor/github.com/stretchr/objx/simple_example_test.go +++ b/vendor/github.com/stretchr/objx/simple_example_test.go @@ -1,21 +1,23 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" ) func TestSimpleExample(t *testing.T) { - // build a map from a JSON object - o := MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) + o := objx.MustFromJSON(`{"name":"Mat","foods":["indian","chinese"], "location":{"county":"hobbiton","city":"the shire"}}`) // Map can be used as a straight map[string]interface{} assert.Equal(t, o["name"], "Mat") // Get an Value object v := o.Get("name") - assert.Equal(t, v, &Value{data: "Mat"}) + require.NotNil(t, v) // Test the contained value assert.False(t, v.IsInt()) @@ -37,5 +39,4 @@ func TestSimpleExample(t *testing.T) { // Get a value by using dot notation assert.Equal(t, "hobbiton", o.Get("location.county").Str()) - } diff --git a/vendor/github.com/stretchr/objx/tests_test.go b/vendor/github.com/stretchr/objx/tests_test.go index bcc1eb03d08e..94a8adaf69a9 100644 --- a/vendor/github.com/stretchr/objx/tests_test.go +++ b/vendor/github.com/stretchr/objx/tests_test.go @@ -1,13 +1,14 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) func TestHas(t *testing.T) { - - m := New(TestMap) + m := objx.Map(TestMap) assert.True(t, m.Has("name")) assert.True(t, m.Has("address.state")) @@ -19,6 +20,6 @@ func TestHas(t *testing.T) { assert.False(t, m.Has("numbers[5]")) m = nil - assert.False(t, m.Has("nothing")) + assert.False(t, m.Has("nothing")) } diff --git a/vendor/github.com/stretchr/objx/type_specific.go b/vendor/github.com/stretchr/objx/type_specific.go new file mode 100644 index 000000000000..80f88d9fa29f --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific.go @@ -0,0 +1,346 @@ +package objx + +/* + MSI (map[string]interface{} and []map[string]interface{}) +*/ + +// MSI gets the value as a map[string]interface{}, returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil +} + +// MustMSI gets the value as a map[string]interface{}. +// +// Panics if the object is not a map[string]interface{}. +func (v *Value) MustMSI() map[string]interface{} { + if s, ok := v.data.(Map); ok { + return map[string]interface{}(s) + } + return v.data.(map[string]interface{}) +} + +// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault +// value or nil if the value is not a []map[string]interface{}. +func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { + if s, ok := v.data.([]map[string]interface{}); ok { + return s + } + + s := v.ObjxMapSlice() + if s == nil { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]map[string]interface{}, len(s)) + for i := range s { + result[i] = s[i].Value().MSI() + } + return result +} + +// MustMSISlice gets the value as a []map[string]interface{}. +// +// Panics if the object is not a []map[string]interface{}. +func (v *Value) MustMSISlice() []map[string]interface{} { + if s := v.MSISlice(); s != nil { + return s + } + + return v.data.([]map[string]interface{}) +} + +// IsMSI gets whether the object contained is a map[string]interface{} or not. +func (v *Value) IsMSI() bool { + _, ok := v.data.(map[string]interface{}) + if !ok { + _, ok = v.data.(Map) + } + return ok +} + +// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. +func (v *Value) IsMSISlice() bool { + _, ok := v.data.([]map[string]interface{}) + if !ok { + _, ok = v.data.([]Map) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + return ok +} + +// EachMSI calls the specified callback for each object +// in the []map[string]interface{}. +// +// Panics if the object is the wrong type. +func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { + for index, val := range v.MustMSISlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereMSI uses the specified decider function to select items +// from the []map[string]interface{}. The object contained in the result will contain +// only the selected items. +func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { + var selected []map[string]interface{} + v.EachMSI(func(index int, val map[string]interface{}) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupMSI uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][]map[string]interface{}. +func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { + groups := make(map[string][]map[string]interface{}) + v.EachMSI(func(index int, val map[string]interface{}) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([]map[string]interface{}, 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceMSI uses the specified function to replace each map[string]interface{}s +// by iterating each item. The data in the returned result will be a +// []map[string]interface{} containing the replaced items. +func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { + arr := v.MustMSISlice() + replaced := make([]map[string]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectMSI uses the specified collector function to collect a value +// for each of the map[string]interface{}s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { + arr := v.MustMSISlice() + collected := make([]interface{}, len(arr)) + v.EachMSI(func(index int, val map[string]interface{}) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} + +/* + ObjxMap ((Map) and [](Map)) +*/ + +// ObjxMap gets the value as a (Map), returns the optionalDefault +// value or a system default object if the value is the wrong type. +func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { + if s, ok := v.data.((Map)); ok { + return s + } + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return New(nil) +} + +// MustObjxMap gets the value as a (Map). +// +// Panics if the object is not a (Map). +func (v *Value) MustObjxMap() Map { + if s, ok := v.data.(map[string]interface{}); ok { + return s + } + return v.data.((Map)) +} + +// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault +// value or nil if the value is not a [](Map). +func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { + if s, ok := v.data.([]Map); ok { + return s + } + + if s, ok := v.data.([]map[string]interface{}); ok { + result := make([]Map, len(s)) + for i := range s { + result[i] = s[i] + } + return result + } + + s, ok := v.data.([]interface{}) + if !ok { + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + return nil + } + + result := make([]Map, len(s)) + for i := range s { + switch s[i].(type) { + case Map: + result[i] = s[i].(Map) + case map[string]interface{}: + result[i] = New(s[i]) + default: + return nil + } + } + return result +} + +// MustObjxMapSlice gets the value as a [](Map). +// +// Panics if the object is not a [](Map). +func (v *Value) MustObjxMapSlice() [](Map) { + if s := v.ObjxMapSlice(); s != nil { + return s + } + return v.data.([](Map)) +} + +// IsObjxMap gets whether the object contained is a (Map) or not. +func (v *Value) IsObjxMap() bool { + _, ok := v.data.((Map)) + if !ok { + _, ok = v.data.(map[string]interface{}) + } + return ok +} + +// IsObjxMapSlice gets whether the object contained is a [](Map) or not. +func (v *Value) IsObjxMapSlice() bool { + _, ok := v.data.([](Map)) + if !ok { + _, ok = v.data.([]map[string]interface{}) + if !ok { + s, ok := v.data.([]interface{}) + if ok { + for i := range s { + switch s[i].(type) { + case Map: + case map[string]interface{}: + default: + return false + } + } + return true + } + } + } + + return ok +} + +// EachObjxMap calls the specified callback for each object +// in the [](Map). +// +// Panics if the object is the wrong type. +func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { + for index, val := range v.MustObjxMapSlice() { + carryon := callback(index, val) + if !carryon { + break + } + } + return v +} + +// WhereObjxMap uses the specified decider function to select items +// from the [](Map). The object contained in the result will contain +// only the selected items. +func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { + var selected [](Map) + v.EachObjxMap(func(index int, val Map) bool { + shouldSelect := decider(index, val) + if !shouldSelect { + selected = append(selected, val) + } + return true + }) + return &Value{data: selected} +} + +// GroupObjxMap uses the specified grouper function to group the items +// keyed by the return of the grouper. The object contained in the +// result will contain a map[string][](Map). +func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { + groups := make(map[string][](Map)) + v.EachObjxMap(func(index int, val Map) bool { + group := grouper(index, val) + if _, ok := groups[group]; !ok { + groups[group] = make([](Map), 0) + } + groups[group] = append(groups[group], val) + return true + }) + return &Value{data: groups} +} + +// ReplaceObjxMap uses the specified function to replace each (Map)s +// by iterating each item. The data in the returned result will be a +// [](Map) containing the replaced items. +func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { + arr := v.MustObjxMapSlice() + replaced := make([](Map), len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + replaced[index] = replacer(index, val) + return true + }) + return &Value{data: replaced} +} + +// CollectObjxMap uses the specified collector function to collect a value +// for each of the (Map)s in the slice. The data returned will be a +// []interface{}. +func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { + arr := v.MustObjxMapSlice() + collected := make([]interface{}, len(arr)) + v.EachObjxMap(func(index int, val Map) bool { + collected[index] = collector(index, val) + return true + }) + return &Value{data: collected} +} diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen.go b/vendor/github.com/stretchr/objx/type_specific_codegen.go index f3ecb29b9503..9859b407f029 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen.go @@ -1,8 +1,7 @@ package objx /* - Inter (interface{} and []interface{}) - -------------------------------------------------- + Inter (interface{} and []interface{}) */ // Inter gets the value as a interface{}, returns the optionalDefault @@ -60,44 +59,35 @@ func (v *Value) IsInterSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInter(callback func(int, interface{}) bool) *Value { - for index, val := range v.MustInterSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInter uses the specified decider function to select items // from the []interface{}. The object contained in the result will contain // only the selected items. func (v *Value) WhereInter(decider func(int, interface{}) bool) *Value { - var selected []interface{} - v.EachInter(func(index int, val interface{}) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInter uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]interface{}. func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { - groups := make(map[string][]interface{}) - v.EachInter(func(index int, val interface{}) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -106,335 +96,37 @@ func (v *Value) GroupInter(grouper func(int, interface{}) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInter uses the specified function to replace each interface{}s // by iterating each item. The data in the returned result will be a // []interface{} containing the replaced items. func (v *Value) ReplaceInter(replacer func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() replaced := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInter uses the specified collector function to collect a value // for each of the interface{}s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInter(collector func(int, interface{}) interface{}) *Value { - arr := v.MustInterSlice() collected := make([]interface{}, len(arr)) - v.EachInter(func(index int, val interface{}) bool { collected[index] = collector(index, val) return true }) - - return &Value{data: collected} -} - -/* - MSI (map[string]interface{} and []map[string]interface{}) - -------------------------------------------------- -*/ - -// MSI gets the value as a map[string]interface{}, returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) MSI(optionalDefault ...map[string]interface{}) map[string]interface{} { - if s, ok := v.data.(map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSI gets the value as a map[string]interface{}. -// -// Panics if the object is not a map[string]interface{}. -func (v *Value) MustMSI() map[string]interface{} { - return v.data.(map[string]interface{}) -} - -// MSISlice gets the value as a []map[string]interface{}, returns the optionalDefault -// value or nil if the value is not a []map[string]interface{}. -func (v *Value) MSISlice(optionalDefault ...[]map[string]interface{}) []map[string]interface{} { - if s, ok := v.data.([]map[string]interface{}); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustMSISlice gets the value as a []map[string]interface{}. -// -// Panics if the object is not a []map[string]interface{}. -func (v *Value) MustMSISlice() []map[string]interface{} { - return v.data.([]map[string]interface{}) -} - -// IsMSI gets whether the object contained is a map[string]interface{} or not. -func (v *Value) IsMSI() bool { - _, ok := v.data.(map[string]interface{}) - return ok -} - -// IsMSISlice gets whether the object contained is a []map[string]interface{} or not. -func (v *Value) IsMSISlice() bool { - _, ok := v.data.([]map[string]interface{}) - return ok -} - -// EachMSI calls the specified callback for each object -// in the []map[string]interface{}. -// -// Panics if the object is the wrong type. -func (v *Value) EachMSI(callback func(int, map[string]interface{}) bool) *Value { - - for index, val := range v.MustMSISlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereMSI uses the specified decider function to select items -// from the []map[string]interface{}. The object contained in the result will contain -// only the selected items. -func (v *Value) WhereMSI(decider func(int, map[string]interface{}) bool) *Value { - - var selected []map[string]interface{} - - v.EachMSI(func(index int, val map[string]interface{}) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupMSI uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][]map[string]interface{}. -func (v *Value) GroupMSI(grouper func(int, map[string]interface{}) string) *Value { - - groups := make(map[string][]map[string]interface{}) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([]map[string]interface{}, 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceMSI uses the specified function to replace each map[string]interface{}s -// by iterating each item. The data in the returned result will be a -// []map[string]interface{} containing the replaced items. -func (v *Value) ReplaceMSI(replacer func(int, map[string]interface{}) map[string]interface{}) *Value { - - arr := v.MustMSISlice() - replaced := make([]map[string]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectMSI uses the specified collector function to collect a value -// for each of the map[string]interface{}s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectMSI(collector func(int, map[string]interface{}) interface{}) *Value { - - arr := v.MustMSISlice() - collected := make([]interface{}, len(arr)) - - v.EachMSI(func(index int, val map[string]interface{}) bool { - collected[index] = collector(index, val) - return true - }) - return &Value{data: collected} } /* - ObjxMap ((Map) and [](Map)) - -------------------------------------------------- -*/ - -// ObjxMap gets the value as a (Map), returns the optionalDefault -// value or a system default object if the value is the wrong type. -func (v *Value) ObjxMap(optionalDefault ...(Map)) Map { - if s, ok := v.data.((Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return New(nil) -} - -// MustObjxMap gets the value as a (Map). -// -// Panics if the object is not a (Map). -func (v *Value) MustObjxMap() Map { - return v.data.((Map)) -} - -// ObjxMapSlice gets the value as a [](Map), returns the optionalDefault -// value or nil if the value is not a [](Map). -func (v *Value) ObjxMapSlice(optionalDefault ...[](Map)) [](Map) { - if s, ok := v.data.([](Map)); ok { - return s - } - if len(optionalDefault) == 1 { - return optionalDefault[0] - } - return nil -} - -// MustObjxMapSlice gets the value as a [](Map). -// -// Panics if the object is not a [](Map). -func (v *Value) MustObjxMapSlice() [](Map) { - return v.data.([](Map)) -} - -// IsObjxMap gets whether the object contained is a (Map) or not. -func (v *Value) IsObjxMap() bool { - _, ok := v.data.((Map)) - return ok -} - -// IsObjxMapSlice gets whether the object contained is a [](Map) or not. -func (v *Value) IsObjxMapSlice() bool { - _, ok := v.data.([](Map)) - return ok -} - -// EachObjxMap calls the specified callback for each object -// in the [](Map). -// -// Panics if the object is the wrong type. -func (v *Value) EachObjxMap(callback func(int, Map) bool) *Value { - - for index, val := range v.MustObjxMapSlice() { - carryon := callback(index, val) - if carryon == false { - break - } - } - - return v - -} - -// WhereObjxMap uses the specified decider function to select items -// from the [](Map). The object contained in the result will contain -// only the selected items. -func (v *Value) WhereObjxMap(decider func(int, Map) bool) *Value { - - var selected [](Map) - - v.EachObjxMap(func(index int, val Map) bool { - shouldSelect := decider(index, val) - if shouldSelect == false { - selected = append(selected, val) - } - return true - }) - - return &Value{data: selected} - -} - -// GroupObjxMap uses the specified grouper function to group the items -// keyed by the return of the grouper. The object contained in the -// result will contain a map[string][](Map). -func (v *Value) GroupObjxMap(grouper func(int, Map) string) *Value { - - groups := make(map[string][](Map)) - - v.EachObjxMap(func(index int, val Map) bool { - group := grouper(index, val) - if _, ok := groups[group]; !ok { - groups[group] = make([](Map), 0) - } - groups[group] = append(groups[group], val) - return true - }) - - return &Value{data: groups} - -} - -// ReplaceObjxMap uses the specified function to replace each (Map)s -// by iterating each item. The data in the returned result will be a -// [](Map) containing the replaced items. -func (v *Value) ReplaceObjxMap(replacer func(int, Map) Map) *Value { - - arr := v.MustObjxMapSlice() - replaced := make([](Map), len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - replaced[index] = replacer(index, val) - return true - }) - - return &Value{data: replaced} - -} - -// CollectObjxMap uses the specified collector function to collect a value -// for each of the (Map)s in the slice. The data returned will be a -// []interface{}. -func (v *Value) CollectObjxMap(collector func(int, Map) interface{}) *Value { - - arr := v.MustObjxMapSlice() - collected := make([]interface{}, len(arr)) - - v.EachObjxMap(func(index int, val Map) bool { - collected[index] = collector(index, val) - return true - }) - - return &Value{data: collected} -} - -/* - Bool (bool and []bool) - -------------------------------------------------- + Bool (bool and []bool) */ // Bool gets the value as a bool, returns the optionalDefault @@ -492,44 +184,35 @@ func (v *Value) IsBoolSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachBool(callback func(int, bool) bool) *Value { - for index, val := range v.MustBoolSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereBool uses the specified decider function to select items // from the []bool. The object contained in the result will contain // only the selected items. func (v *Value) WhereBool(decider func(int, bool) bool) *Value { - var selected []bool - v.EachBool(func(index int, val bool) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupBool uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]bool. func (v *Value) GroupBool(grouper func(int, bool) string) *Value { - groups := make(map[string][]bool) - v.EachBool(func(index int, val bool) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -538,47 +221,37 @@ func (v *Value) GroupBool(grouper func(int, bool) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceBool uses the specified function to replace each bools // by iterating each item. The data in the returned result will be a // []bool containing the replaced items. func (v *Value) ReplaceBool(replacer func(int, bool) bool) *Value { - arr := v.MustBoolSlice() replaced := make([]bool, len(arr)) - v.EachBool(func(index int, val bool) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectBool uses the specified collector function to collect a value // for each of the bools in the slice. The data returned will be a // []interface{}. func (v *Value) CollectBool(collector func(int, bool) interface{}) *Value { - arr := v.MustBoolSlice() collected := make([]interface{}, len(arr)) - v.EachBool(func(index int, val bool) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Str (string and []string) - -------------------------------------------------- + Str (string and []string) */ // Str gets the value as a string, returns the optionalDefault @@ -636,44 +309,35 @@ func (v *Value) IsStrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachStr(callback func(int, string) bool) *Value { - for index, val := range v.MustStrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereStr uses the specified decider function to select items // from the []string. The object contained in the result will contain // only the selected items. func (v *Value) WhereStr(decider func(int, string) bool) *Value { - var selected []string - v.EachStr(func(index int, val string) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupStr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]string. func (v *Value) GroupStr(grouper func(int, string) string) *Value { - groups := make(map[string][]string) - v.EachStr(func(index int, val string) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -682,47 +346,37 @@ func (v *Value) GroupStr(grouper func(int, string) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceStr uses the specified function to replace each strings // by iterating each item. The data in the returned result will be a // []string containing the replaced items. func (v *Value) ReplaceStr(replacer func(int, string) string) *Value { - arr := v.MustStrSlice() replaced := make([]string, len(arr)) - v.EachStr(func(index int, val string) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectStr uses the specified collector function to collect a value // for each of the strings in the slice. The data returned will be a // []interface{}. func (v *Value) CollectStr(collector func(int, string) interface{}) *Value { - arr := v.MustStrSlice() collected := make([]interface{}, len(arr)) - v.EachStr(func(index int, val string) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Int (int and []int) - -------------------------------------------------- + Int (int and []int) */ // Int gets the value as a int, returns the optionalDefault @@ -780,44 +434,35 @@ func (v *Value) IsIntSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt(callback func(int, int) bool) *Value { - for index, val := range v.MustIntSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt uses the specified decider function to select items // from the []int. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt(decider func(int, int) bool) *Value { - var selected []int - v.EachInt(func(index int, val int) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int. func (v *Value) GroupInt(grouper func(int, int) string) *Value { - groups := make(map[string][]int) - v.EachInt(func(index int, val int) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -826,47 +471,37 @@ func (v *Value) GroupInt(grouper func(int, int) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt uses the specified function to replace each ints // by iterating each item. The data in the returned result will be a // []int containing the replaced items. func (v *Value) ReplaceInt(replacer func(int, int) int) *Value { - arr := v.MustIntSlice() replaced := make([]int, len(arr)) - v.EachInt(func(index int, val int) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt uses the specified collector function to collect a value // for each of the ints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt(collector func(int, int) interface{}) *Value { - arr := v.MustIntSlice() collected := make([]interface{}, len(arr)) - v.EachInt(func(index int, val int) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Int8 (int8 and []int8) - -------------------------------------------------- + Int8 (int8 and []int8) */ // Int8 gets the value as a int8, returns the optionalDefault @@ -924,44 +559,35 @@ func (v *Value) IsInt8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt8(callback func(int, int8) bool) *Value { - for index, val := range v.MustInt8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt8 uses the specified decider function to select items // from the []int8. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt8(decider func(int, int8) bool) *Value { - var selected []int8 - v.EachInt8(func(index int, val int8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int8. func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { - groups := make(map[string][]int8) - v.EachInt8(func(index int, val int8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -970,47 +596,37 @@ func (v *Value) GroupInt8(grouper func(int, int8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt8 uses the specified function to replace each int8s // by iterating each item. The data in the returned result will be a // []int8 containing the replaced items. func (v *Value) ReplaceInt8(replacer func(int, int8) int8) *Value { - arr := v.MustInt8Slice() replaced := make([]int8, len(arr)) - v.EachInt8(func(index int, val int8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt8 uses the specified collector function to collect a value // for each of the int8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt8(collector func(int, int8) interface{}) *Value { - arr := v.MustInt8Slice() collected := make([]interface{}, len(arr)) - v.EachInt8(func(index int, val int8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Int16 (int16 and []int16) - -------------------------------------------------- + Int16 (int16 and []int16) */ // Int16 gets the value as a int16, returns the optionalDefault @@ -1068,44 +684,35 @@ func (v *Value) IsInt16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt16(callback func(int, int16) bool) *Value { - for index, val := range v.MustInt16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt16 uses the specified decider function to select items // from the []int16. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt16(decider func(int, int16) bool) *Value { - var selected []int16 - v.EachInt16(func(index int, val int16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int16. func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { - groups := make(map[string][]int16) - v.EachInt16(func(index int, val int16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1114,47 +721,37 @@ func (v *Value) GroupInt16(grouper func(int, int16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt16 uses the specified function to replace each int16s // by iterating each item. The data in the returned result will be a // []int16 containing the replaced items. func (v *Value) ReplaceInt16(replacer func(int, int16) int16) *Value { - arr := v.MustInt16Slice() replaced := make([]int16, len(arr)) - v.EachInt16(func(index int, val int16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt16 uses the specified collector function to collect a value // for each of the int16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt16(collector func(int, int16) interface{}) *Value { - arr := v.MustInt16Slice() collected := make([]interface{}, len(arr)) - v.EachInt16(func(index int, val int16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Int32 (int32 and []int32) - -------------------------------------------------- + Int32 (int32 and []int32) */ // Int32 gets the value as a int32, returns the optionalDefault @@ -1212,44 +809,35 @@ func (v *Value) IsInt32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt32(callback func(int, int32) bool) *Value { - for index, val := range v.MustInt32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt32 uses the specified decider function to select items // from the []int32. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt32(decider func(int, int32) bool) *Value { - var selected []int32 - v.EachInt32(func(index int, val int32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int32. func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { - groups := make(map[string][]int32) - v.EachInt32(func(index int, val int32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1258,47 +846,37 @@ func (v *Value) GroupInt32(grouper func(int, int32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt32 uses the specified function to replace each int32s // by iterating each item. The data in the returned result will be a // []int32 containing the replaced items. func (v *Value) ReplaceInt32(replacer func(int, int32) int32) *Value { - arr := v.MustInt32Slice() replaced := make([]int32, len(arr)) - v.EachInt32(func(index int, val int32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt32 uses the specified collector function to collect a value // for each of the int32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt32(collector func(int, int32) interface{}) *Value { - arr := v.MustInt32Slice() collected := make([]interface{}, len(arr)) - v.EachInt32(func(index int, val int32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Int64 (int64 and []int64) - -------------------------------------------------- + Int64 (int64 and []int64) */ // Int64 gets the value as a int64, returns the optionalDefault @@ -1356,44 +934,35 @@ func (v *Value) IsInt64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachInt64(callback func(int, int64) bool) *Value { - for index, val := range v.MustInt64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereInt64 uses the specified decider function to select items // from the []int64. The object contained in the result will contain // only the selected items. func (v *Value) WhereInt64(decider func(int, int64) bool) *Value { - var selected []int64 - v.EachInt64(func(index int, val int64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupInt64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]int64. func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { - groups := make(map[string][]int64) - v.EachInt64(func(index int, val int64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1402,47 +971,37 @@ func (v *Value) GroupInt64(grouper func(int, int64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceInt64 uses the specified function to replace each int64s // by iterating each item. The data in the returned result will be a // []int64 containing the replaced items. func (v *Value) ReplaceInt64(replacer func(int, int64) int64) *Value { - arr := v.MustInt64Slice() replaced := make([]int64, len(arr)) - v.EachInt64(func(index int, val int64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectInt64 uses the specified collector function to collect a value // for each of the int64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectInt64(collector func(int, int64) interface{}) *Value { - arr := v.MustInt64Slice() collected := make([]interface{}, len(arr)) - v.EachInt64(func(index int, val int64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uint (uint and []uint) - -------------------------------------------------- + Uint (uint and []uint) */ // Uint gets the value as a uint, returns the optionalDefault @@ -1500,44 +1059,35 @@ func (v *Value) IsUintSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint(callback func(int, uint) bool) *Value { - for index, val := range v.MustUintSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint uses the specified decider function to select items // from the []uint. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint(decider func(int, uint) bool) *Value { - var selected []uint - v.EachUint(func(index int, val uint) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint. func (v *Value) GroupUint(grouper func(int, uint) string) *Value { - groups := make(map[string][]uint) - v.EachUint(func(index int, val uint) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1546,47 +1096,37 @@ func (v *Value) GroupUint(grouper func(int, uint) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint uses the specified function to replace each uints // by iterating each item. The data in the returned result will be a // []uint containing the replaced items. func (v *Value) ReplaceUint(replacer func(int, uint) uint) *Value { - arr := v.MustUintSlice() replaced := make([]uint, len(arr)) - v.EachUint(func(index int, val uint) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint uses the specified collector function to collect a value // for each of the uints in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint(collector func(int, uint) interface{}) *Value { - arr := v.MustUintSlice() collected := make([]interface{}, len(arr)) - v.EachUint(func(index int, val uint) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uint8 (uint8 and []uint8) - -------------------------------------------------- + Uint8 (uint8 and []uint8) */ // Uint8 gets the value as a uint8, returns the optionalDefault @@ -1644,44 +1184,35 @@ func (v *Value) IsUint8Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint8(callback func(int, uint8) bool) *Value { - for index, val := range v.MustUint8Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint8 uses the specified decider function to select items // from the []uint8. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint8(decider func(int, uint8) bool) *Value { - var selected []uint8 - v.EachUint8(func(index int, val uint8) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint8 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint8. func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { - groups := make(map[string][]uint8) - v.EachUint8(func(index int, val uint8) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1690,47 +1221,37 @@ func (v *Value) GroupUint8(grouper func(int, uint8) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint8 uses the specified function to replace each uint8s // by iterating each item. The data in the returned result will be a // []uint8 containing the replaced items. func (v *Value) ReplaceUint8(replacer func(int, uint8) uint8) *Value { - arr := v.MustUint8Slice() replaced := make([]uint8, len(arr)) - v.EachUint8(func(index int, val uint8) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint8 uses the specified collector function to collect a value // for each of the uint8s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint8(collector func(int, uint8) interface{}) *Value { - arr := v.MustUint8Slice() collected := make([]interface{}, len(arr)) - v.EachUint8(func(index int, val uint8) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uint16 (uint16 and []uint16) - -------------------------------------------------- + Uint16 (uint16 and []uint16) */ // Uint16 gets the value as a uint16, returns the optionalDefault @@ -1788,44 +1309,35 @@ func (v *Value) IsUint16Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint16(callback func(int, uint16) bool) *Value { - for index, val := range v.MustUint16Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint16 uses the specified decider function to select items // from the []uint16. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint16(decider func(int, uint16) bool) *Value { - var selected []uint16 - v.EachUint16(func(index int, val uint16) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint16 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint16. func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { - groups := make(map[string][]uint16) - v.EachUint16(func(index int, val uint16) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1834,47 +1346,37 @@ func (v *Value) GroupUint16(grouper func(int, uint16) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint16 uses the specified function to replace each uint16s // by iterating each item. The data in the returned result will be a // []uint16 containing the replaced items. func (v *Value) ReplaceUint16(replacer func(int, uint16) uint16) *Value { - arr := v.MustUint16Slice() replaced := make([]uint16, len(arr)) - v.EachUint16(func(index int, val uint16) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint16 uses the specified collector function to collect a value // for each of the uint16s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint16(collector func(int, uint16) interface{}) *Value { - arr := v.MustUint16Slice() collected := make([]interface{}, len(arr)) - v.EachUint16(func(index int, val uint16) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uint32 (uint32 and []uint32) - -------------------------------------------------- + Uint32 (uint32 and []uint32) */ // Uint32 gets the value as a uint32, returns the optionalDefault @@ -1932,44 +1434,35 @@ func (v *Value) IsUint32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint32(callback func(int, uint32) bool) *Value { - for index, val := range v.MustUint32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint32 uses the specified decider function to select items // from the []uint32. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint32(decider func(int, uint32) bool) *Value { - var selected []uint32 - v.EachUint32(func(index int, val uint32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint32. func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { - groups := make(map[string][]uint32) - v.EachUint32(func(index int, val uint32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -1978,47 +1471,37 @@ func (v *Value) GroupUint32(grouper func(int, uint32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint32 uses the specified function to replace each uint32s // by iterating each item. The data in the returned result will be a // []uint32 containing the replaced items. func (v *Value) ReplaceUint32(replacer func(int, uint32) uint32) *Value { - arr := v.MustUint32Slice() replaced := make([]uint32, len(arr)) - v.EachUint32(func(index int, val uint32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint32 uses the specified collector function to collect a value // for each of the uint32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint32(collector func(int, uint32) interface{}) *Value { - arr := v.MustUint32Slice() collected := make([]interface{}, len(arr)) - v.EachUint32(func(index int, val uint32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uint64 (uint64 and []uint64) - -------------------------------------------------- + Uint64 (uint64 and []uint64) */ // Uint64 gets the value as a uint64, returns the optionalDefault @@ -2076,44 +1559,35 @@ func (v *Value) IsUint64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUint64(callback func(int, uint64) bool) *Value { - for index, val := range v.MustUint64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUint64 uses the specified decider function to select items // from the []uint64. The object contained in the result will contain // only the selected items. func (v *Value) WhereUint64(decider func(int, uint64) bool) *Value { - var selected []uint64 - v.EachUint64(func(index int, val uint64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUint64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uint64. func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { - groups := make(map[string][]uint64) - v.EachUint64(func(index int, val uint64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2122,47 +1596,37 @@ func (v *Value) GroupUint64(grouper func(int, uint64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUint64 uses the specified function to replace each uint64s // by iterating each item. The data in the returned result will be a // []uint64 containing the replaced items. func (v *Value) ReplaceUint64(replacer func(int, uint64) uint64) *Value { - arr := v.MustUint64Slice() replaced := make([]uint64, len(arr)) - v.EachUint64(func(index int, val uint64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUint64 uses the specified collector function to collect a value // for each of the uint64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUint64(collector func(int, uint64) interface{}) *Value { - arr := v.MustUint64Slice() collected := make([]interface{}, len(arr)) - v.EachUint64(func(index int, val uint64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Uintptr (uintptr and []uintptr) - -------------------------------------------------- + Uintptr (uintptr and []uintptr) */ // Uintptr gets the value as a uintptr, returns the optionalDefault @@ -2220,44 +1684,35 @@ func (v *Value) IsUintptrSlice() bool { // // Panics if the object is the wrong type. func (v *Value) EachUintptr(callback func(int, uintptr) bool) *Value { - for index, val := range v.MustUintptrSlice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereUintptr uses the specified decider function to select items // from the []uintptr. The object contained in the result will contain // only the selected items. func (v *Value) WhereUintptr(decider func(int, uintptr) bool) *Value { - var selected []uintptr - v.EachUintptr(func(index int, val uintptr) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupUintptr uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]uintptr. func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { - groups := make(map[string][]uintptr) - v.EachUintptr(func(index int, val uintptr) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2266,47 +1721,37 @@ func (v *Value) GroupUintptr(grouper func(int, uintptr) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceUintptr uses the specified function to replace each uintptrs // by iterating each item. The data in the returned result will be a // []uintptr containing the replaced items. func (v *Value) ReplaceUintptr(replacer func(int, uintptr) uintptr) *Value { - arr := v.MustUintptrSlice() replaced := make([]uintptr, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectUintptr uses the specified collector function to collect a value // for each of the uintptrs in the slice. The data returned will be a // []interface{}. func (v *Value) CollectUintptr(collector func(int, uintptr) interface{}) *Value { - arr := v.MustUintptrSlice() collected := make([]interface{}, len(arr)) - v.EachUintptr(func(index int, val uintptr) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Float32 (float32 and []float32) - -------------------------------------------------- + Float32 (float32 and []float32) */ // Float32 gets the value as a float32, returns the optionalDefault @@ -2364,44 +1809,35 @@ func (v *Value) IsFloat32Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat32(callback func(int, float32) bool) *Value { - for index, val := range v.MustFloat32Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat32 uses the specified decider function to select items // from the []float32. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat32(decider func(int, float32) bool) *Value { - var selected []float32 - v.EachFloat32(func(index int, val float32) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat32 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float32. func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { - groups := make(map[string][]float32) - v.EachFloat32(func(index int, val float32) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2410,47 +1846,37 @@ func (v *Value) GroupFloat32(grouper func(int, float32) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat32 uses the specified function to replace each float32s // by iterating each item. The data in the returned result will be a // []float32 containing the replaced items. func (v *Value) ReplaceFloat32(replacer func(int, float32) float32) *Value { - arr := v.MustFloat32Slice() replaced := make([]float32, len(arr)) - v.EachFloat32(func(index int, val float32) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat32 uses the specified collector function to collect a value // for each of the float32s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat32(collector func(int, float32) interface{}) *Value { - arr := v.MustFloat32Slice() collected := make([]interface{}, len(arr)) - v.EachFloat32(func(index int, val float32) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Float64 (float64 and []float64) - -------------------------------------------------- + Float64 (float64 and []float64) */ // Float64 gets the value as a float64, returns the optionalDefault @@ -2508,44 +1934,35 @@ func (v *Value) IsFloat64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachFloat64(callback func(int, float64) bool) *Value { - for index, val := range v.MustFloat64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereFloat64 uses the specified decider function to select items // from the []float64. The object contained in the result will contain // only the selected items. func (v *Value) WhereFloat64(decider func(int, float64) bool) *Value { - var selected []float64 - v.EachFloat64(func(index int, val float64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupFloat64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]float64. func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { - groups := make(map[string][]float64) - v.EachFloat64(func(index int, val float64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2554,47 +1971,37 @@ func (v *Value) GroupFloat64(grouper func(int, float64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceFloat64 uses the specified function to replace each float64s // by iterating each item. The data in the returned result will be a // []float64 containing the replaced items. func (v *Value) ReplaceFloat64(replacer func(int, float64) float64) *Value { - arr := v.MustFloat64Slice() replaced := make([]float64, len(arr)) - v.EachFloat64(func(index int, val float64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectFloat64 uses the specified collector function to collect a value // for each of the float64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectFloat64(collector func(int, float64) interface{}) *Value { - arr := v.MustFloat64Slice() collected := make([]interface{}, len(arr)) - v.EachFloat64(func(index int, val float64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Complex64 (complex64 and []complex64) - -------------------------------------------------- + Complex64 (complex64 and []complex64) */ // Complex64 gets the value as a complex64, returns the optionalDefault @@ -2652,44 +2059,35 @@ func (v *Value) IsComplex64Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex64(callback func(int, complex64) bool) *Value { - for index, val := range v.MustComplex64Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex64 uses the specified decider function to select items // from the []complex64. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex64(decider func(int, complex64) bool) *Value { - var selected []complex64 - v.EachComplex64(func(index int, val complex64) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex64 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex64. func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { - groups := make(map[string][]complex64) - v.EachComplex64(func(index int, val complex64) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2698,47 +2096,37 @@ func (v *Value) GroupComplex64(grouper func(int, complex64) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex64 uses the specified function to replace each complex64s // by iterating each item. The data in the returned result will be a // []complex64 containing the replaced items. func (v *Value) ReplaceComplex64(replacer func(int, complex64) complex64) *Value { - arr := v.MustComplex64Slice() replaced := make([]complex64, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex64 uses the specified collector function to collect a value // for each of the complex64s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex64(collector func(int, complex64) interface{}) *Value { - arr := v.MustComplex64Slice() collected := make([]interface{}, len(arr)) - v.EachComplex64(func(index int, val complex64) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } /* - Complex128 (complex128 and []complex128) - -------------------------------------------------- + Complex128 (complex128 and []complex128) */ // Complex128 gets the value as a complex128, returns the optionalDefault @@ -2796,44 +2184,35 @@ func (v *Value) IsComplex128Slice() bool { // // Panics if the object is the wrong type. func (v *Value) EachComplex128(callback func(int, complex128) bool) *Value { - for index, val := range v.MustComplex128Slice() { carryon := callback(index, val) - if carryon == false { + if !carryon { break } } - return v - } // WhereComplex128 uses the specified decider function to select items // from the []complex128. The object contained in the result will contain // only the selected items. func (v *Value) WhereComplex128(decider func(int, complex128) bool) *Value { - var selected []complex128 - v.EachComplex128(func(index int, val complex128) bool { shouldSelect := decider(index, val) - if shouldSelect == false { + if !shouldSelect { selected = append(selected, val) } return true }) - return &Value{data: selected} - } // GroupComplex128 uses the specified grouper function to group the items // keyed by the return of the grouper. The object contained in the // result will contain a map[string][]complex128. func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { - groups := make(map[string][]complex128) - v.EachComplex128(func(index int, val complex128) bool { group := grouper(index, val) if _, ok := groups[group]; !ok { @@ -2842,40 +2221,31 @@ func (v *Value) GroupComplex128(grouper func(int, complex128) string) *Value { groups[group] = append(groups[group], val) return true }) - return &Value{data: groups} - } // ReplaceComplex128 uses the specified function to replace each complex128s // by iterating each item. The data in the returned result will be a // []complex128 containing the replaced items. func (v *Value) ReplaceComplex128(replacer func(int, complex128) complex128) *Value { - arr := v.MustComplex128Slice() replaced := make([]complex128, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { replaced[index] = replacer(index, val) return true }) - return &Value{data: replaced} - } // CollectComplex128 uses the specified collector function to collect a value // for each of the complex128s in the slice. The data returned will be a // []interface{}. func (v *Value) CollectComplex128(collector func(int, complex128) interface{}) *Value { - arr := v.MustComplex128Slice() collected := make([]interface{}, len(arr)) - v.EachComplex128(func(index int, val complex128) bool { collected[index] = collector(index, val) return true }) - return &Value{data: collected} } diff --git a/vendor/github.com/stretchr/objx/type_specific_codegen_test.go b/vendor/github.com/stretchr/objx/type_specific_codegen_test.go index f7a4fceea3b0..72fa8c8d73de 100644 --- a/vendor/github.com/stretchr/objx/type_specific_codegen_test.go +++ b/vendor/github.com/stretchr/objx/type_specific_codegen_test.go @@ -1,123 +1,106 @@ -package objx +package objx_test import ( "fmt" - "github.com/stretchr/testify/assert" "testing" -) -// ************************************************************ -// TESTS -// ************************************************************ + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) +/* + Tests for Inter (interface{} and []interface{}) +*/ func TestInter(t *testing.T) { - val := interface{}("something") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Inter()) - assert.Equal(t, val, New(m).Get("value").MustInter()) - assert.Equal(t, interface{}(nil), New(m).Get("nothing").Inter()) - assert.Equal(t, val, New(m).Get("nothing").Inter("something")) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Inter()) + assert.Equal(t, val, m.Get("value").MustInter()) + assert.Equal(t, interface{}(nil), m.Get("nothing").Inter()) + assert.Equal(t, val, m.Get("nothing").Inter("something")) assert.Panics(t, func() { - New(m).Get("age").MustInter() + m.Get("age").MustInter() }) - } func TestInterSlice(t *testing.T) { - val := interface{}("something") - m := map[string]interface{}{"value": []interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").InterSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInterSlice()[0]) - assert.Equal(t, []interface{}(nil), New(m).Get("nothing").InterSlice()) - assert.Equal(t, val, New(m).Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) + m := objx.Map{"value": []interface{}{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").InterSlice()[0]) + assert.Equal(t, val, m.Get("value").MustInterSlice()[0]) + assert.Equal(t, []interface{}(nil), m.Get("nothing").InterSlice()) + assert.Equal(t, val, m.Get("nothing").InterSlice([]interface{}{interface{}("something")})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustInterSlice() + m.Get("nothing").MustInterSlice() }) - } func TestIsInter(t *testing.T) { + m := objx.Map{"data": interface{}("something")} - var v *Value - - v = &Value{data: interface{}("something")} - assert.True(t, v.IsInter()) + assert.True(t, m.Get("data").IsInter()) +} - v = &Value{data: []interface{}{interface{}("something")}} - assert.True(t, v.IsInterSlice()) +func TestIsInterSlice(t *testing.T) { + m := objx.Map{"data": []interface{}{interface{}("something")}} + assert.True(t, m.Get("data").IsInterSlice()) } func TestEachInter(t *testing.T) { - - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + m := objx.Map{"data": []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} count := 0 replacedVals := make([]interface{}, 0) - assert.Equal(t, v, v.EachInter(func(i int, val interface{}) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInter(func(i int, val interface{}) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInterSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustInterSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustInterSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustInterSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustInterSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustInterSlice()[2]) } func TestWhereInter(t *testing.T) { + m := objx.Map{"data": []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - selected := v.WhereInter(func(i int, val interface{}) bool { + selected := m.Get("data").WhereInter(func(i int, val interface{}) bool { return i%2 == 0 }).MustInterSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupInter(t *testing.T) { + m := objx.Map{"data": []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - grouped := v.GroupInter(func(i int, val interface{}) string { + grouped := m.Get("data").GroupInter(func(i int, val interface{}) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]interface{}) + }).Data().(map[string][]interface{}) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInter(t *testing.T) { + m := objx.Map{"data": []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} + rawArr := m.Get("data").MustInterSlice() - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - rawArr := v.MustInterSlice() - - replaced := v.ReplaceInter(func(index int, val interface{}) interface{} { + replaced := m.Get("data").ReplaceInter(func(index int, val interface{}) interface{} { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustInterSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -126,161 +109,16 @@ func TestReplaceInter(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInter(t *testing.T) { + m := objx.Map{"data": []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - v := &Value{data: []interface{}{interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something"), interface{}("something")}} - - collected := v.CollectInter(func(index int, val interface{}) interface{} { + collected := m.Get("data").CollectInter(func(index int, val interface{}) interface{} { return index }) - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestMSI(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSI()) - assert.Equal(t, val, New(m).Get("value").MustMSI()) - assert.Equal(t, map[string]interface{}(nil), New(m).Get("nothing").MSI()) - assert.Equal(t, val, New(m).Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) - - assert.Panics(t, func() { - New(m).Get("age").MustMSI() - }) - -} - -func TestMSISlice(t *testing.T) { - - val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) - m := map[string]interface{}{"value": []map[string]interface{}{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").MSISlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustMSISlice()[0]) - assert.Equal(t, []map[string]interface{}(nil), New(m).Get("nothing").MSISlice()) - assert.Equal(t, val, New(m).Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustMSISlice() - }) -} - -func TestIsMSI(t *testing.T) { - - var v *Value - - v = &Value{data: map[string]interface{}(map[string]interface{}{"name": "Tyler"})} - assert.True(t, v.IsMSI()) - - v = &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - assert.True(t, v.IsMSISlice()) - -} - -func TestEachMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - count := 0 - replacedVals := make([]map[string]interface{}, 0) - assert.Equal(t, v, v.EachMSI(func(i int, val map[string]interface{}) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustMSISlice()[0]) - assert.Equal(t, replacedVals[1], v.MustMSISlice()[1]) - assert.Equal(t, replacedVals[2], v.MustMSISlice()[2]) - -} - -func TestWhereMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - selected := v.WhereMSI(func(i int, val map[string]interface{}) bool { - return i%2 == 0 - }).MustMSISlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - grouped := v.GroupMSI(func(i int, val map[string]interface{}) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]map[string]interface{}) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - rawArr := v.MustMSISlice() - - replaced := v.ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustMSISlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectMSI(t *testing.T) { - - v := &Value{data: []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} - - collected := v.CollectMSI(func(index int, val map[string]interface{}) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -289,264 +127,101 @@ func TestCollectMSI(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - -func TestObjxMap(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMap()) - assert.Equal(t, val, New(m).Get("value").MustObjxMap()) - assert.Equal(t, (Map)(New(nil)), New(m).Get("nothing").ObjxMap()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMap(New(1))) - - assert.Panics(t, func() { - New(m).Get("age").MustObjxMap() - }) - } -func TestObjxMapSlice(t *testing.T) { - - val := (Map)(New(1)) - m := map[string]interface{}{"value": [](Map){val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").ObjxMapSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustObjxMapSlice()[0]) - assert.Equal(t, [](Map)(nil), New(m).Get("nothing").ObjxMapSlice()) - assert.Equal(t, val, New(m).Get("nothing").ObjxMapSlice([](Map){(Map)(New(1))})[0]) - - assert.Panics(t, func() { - New(m).Get("nothing").MustObjxMapSlice() - }) - -} - -func TestIsObjxMap(t *testing.T) { - - var v *Value - - v = &Value{data: (Map)(New(1))} - assert.True(t, v.IsObjxMap()) - - v = &Value{data: [](Map){(Map)(New(1))}} - assert.True(t, v.IsObjxMapSlice()) - -} - -func TestEachObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - count := 0 - replacedVals := make([](Map), 0) - assert.Equal(t, v, v.EachObjxMap(func(i int, val Map) bool { - - count++ - replacedVals = append(replacedVals, val) - - // abort early - if i == 2 { - return false - } - - return true - - })) - - assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustObjxMapSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustObjxMapSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustObjxMapSlice()[2]) - -} - -func TestWhereObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - selected := v.WhereObjxMap(func(i int, val Map) bool { - return i%2 == 0 - }).MustObjxMapSlice() - - assert.Equal(t, 3, len(selected)) - -} - -func TestGroupObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - grouped := v.GroupObjxMap(func(i int, val Map) string { - return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][](Map)) - - assert.Equal(t, 2, len(grouped)) - assert.Equal(t, 3, len(grouped["true"])) - assert.Equal(t, 3, len(grouped["false"])) - -} - -func TestReplaceObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - rawArr := v.MustObjxMapSlice() - - replaced := v.ReplaceObjxMap(func(index int, val Map) Map { - if index < len(rawArr)-1 { - return rawArr[index+1] - } - return rawArr[0] - }) - - replacedArr := replaced.MustObjxMapSlice() - if assert.Equal(t, 6, len(replacedArr)) { - assert.Equal(t, replacedArr[0], rawArr[1]) - assert.Equal(t, replacedArr[1], rawArr[2]) - assert.Equal(t, replacedArr[2], rawArr[3]) - assert.Equal(t, replacedArr[3], rawArr[4]) - assert.Equal(t, replacedArr[4], rawArr[5]) - assert.Equal(t, replacedArr[5], rawArr[0]) - } - -} - -func TestCollectObjxMap(t *testing.T) { - - v := &Value{data: [](Map){(Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1)), (Map)(New(1))}} - - collected := v.CollectObjxMap(func(index int, val Map) interface{} { - return index - }) - - collectedArr := collected.MustInterSlice() - if assert.Equal(t, 6, len(collectedArr)) { - assert.Equal(t, collectedArr[0], 0) - assert.Equal(t, collectedArr[1], 1) - assert.Equal(t, collectedArr[2], 2) - assert.Equal(t, collectedArr[3], 3) - assert.Equal(t, collectedArr[4], 4) - assert.Equal(t, collectedArr[5], 5) - } - -} - -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Bool (bool and []bool) +*/ func TestBool(t *testing.T) { - val := bool(true) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Bool()) - assert.Equal(t, val, New(m).Get("value").MustBool()) - assert.Equal(t, bool(false), New(m).Get("nothing").Bool()) - assert.Equal(t, val, New(m).Get("nothing").Bool(true)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Bool()) + assert.Equal(t, val, m.Get("value").MustBool()) + assert.Equal(t, bool(false), m.Get("nothing").Bool()) + assert.Equal(t, val, m.Get("nothing").Bool(true)) assert.Panics(t, func() { - New(m).Get("age").MustBool() + m.Get("age").MustBool() }) - } func TestBoolSlice(t *testing.T) { - val := bool(true) - m := map[string]interface{}{"value": []bool{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").BoolSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustBoolSlice()[0]) - assert.Equal(t, []bool(nil), New(m).Get("nothing").BoolSlice()) - assert.Equal(t, val, New(m).Get("nothing").BoolSlice([]bool{bool(true)})[0]) + m := objx.Map{"value": []bool{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").BoolSlice()[0]) + assert.Equal(t, val, m.Get("value").MustBoolSlice()[0]) + assert.Equal(t, []bool(nil), m.Get("nothing").BoolSlice()) + assert.Equal(t, val, m.Get("nothing").BoolSlice([]bool{bool(true)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustBoolSlice() + m.Get("nothing").MustBoolSlice() }) - } func TestIsBool(t *testing.T) { + m := objx.Map{"data": bool(true)} - var v *Value - - v = &Value{data: bool(true)} - assert.True(t, v.IsBool()) + assert.True(t, m.Get("data").IsBool()) +} - v = &Value{data: []bool{bool(true)}} - assert.True(t, v.IsBoolSlice()) +func TestIsBoolSlice(t *testing.T) { + m := objx.Map{"data": []bool{bool(true)}} + assert.True(t, m.Get("data").IsBoolSlice()) } func TestEachBool(t *testing.T) { - - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} + m := objx.Map{"data": []bool{bool(true), bool(true), bool(true), bool(true), bool(true)}} count := 0 replacedVals := make([]bool, 0) - assert.Equal(t, v, v.EachBool(func(i int, val bool) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachBool(func(i int, val bool) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustBoolSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustBoolSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustBoolSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustBoolSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustBoolSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustBoolSlice()[2]) } func TestWhereBool(t *testing.T) { + m := objx.Map{"data": []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - selected := v.WhereBool(func(i int, val bool) bool { + selected := m.Get("data").WhereBool(func(i int, val bool) bool { return i%2 == 0 }).MustBoolSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupBool(t *testing.T) { + m := objx.Map{"data": []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - grouped := v.GroupBool(func(i int, val bool) string { + grouped := m.Get("data").GroupBool(func(i int, val bool) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]bool) + }).Data().(map[string][]bool) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceBool(t *testing.T) { + m := objx.Map{"data": []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} + rawArr := m.Get("data").MustBoolSlice() - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - rawArr := v.MustBoolSlice() - - replaced := v.ReplaceBool(func(index int, val bool) bool { + replaced := m.Get("data").ReplaceBool(func(index int, val bool) bool { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustBoolSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -555,18 +230,16 @@ func TestReplaceBool(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectBool(t *testing.T) { + m := objx.Map{"data": []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - v := &Value{data: []bool{bool(true), bool(true), bool(true), bool(true), bool(true), bool(true)}} - - collected := v.CollectBool(func(index int, val bool) interface{} { + collected := m.Get("data").CollectBool(func(index int, val bool) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -575,121 +248,101 @@ func TestCollectBool(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Str (string and []string) +*/ func TestStr(t *testing.T) { - val := string("hello") - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Str()) - assert.Equal(t, val, New(m).Get("value").MustStr()) - assert.Equal(t, string(""), New(m).Get("nothing").Str()) - assert.Equal(t, val, New(m).Get("nothing").Str("hello")) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Str()) + assert.Equal(t, val, m.Get("value").MustStr()) + assert.Equal(t, string(""), m.Get("nothing").Str()) + assert.Equal(t, val, m.Get("nothing").Str("hello")) assert.Panics(t, func() { - New(m).Get("age").MustStr() + m.Get("age").MustStr() }) - } func TestStrSlice(t *testing.T) { - val := string("hello") - m := map[string]interface{}{"value": []string{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").StrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustStrSlice()[0]) - assert.Equal(t, []string(nil), New(m).Get("nothing").StrSlice()) - assert.Equal(t, val, New(m).Get("nothing").StrSlice([]string{string("hello")})[0]) + m := objx.Map{"value": []string{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").StrSlice()[0]) + assert.Equal(t, val, m.Get("value").MustStrSlice()[0]) + assert.Equal(t, []string(nil), m.Get("nothing").StrSlice()) + assert.Equal(t, val, m.Get("nothing").StrSlice([]string{string("hello")})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustStrSlice() + m.Get("nothing").MustStrSlice() }) - } func TestIsStr(t *testing.T) { + m := objx.Map{"data": string("hello")} - var v *Value - - v = &Value{data: string("hello")} - assert.True(t, v.IsStr()) + assert.True(t, m.Get("data").IsStr()) +} - v = &Value{data: []string{string("hello")}} - assert.True(t, v.IsStrSlice()) +func TestIsStrSlice(t *testing.T) { + m := objx.Map{"data": []string{string("hello")}} + assert.True(t, m.Get("data").IsStrSlice()) } func TestEachStr(t *testing.T) { - - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + m := objx.Map{"data": []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} count := 0 replacedVals := make([]string, 0) - assert.Equal(t, v, v.EachStr(func(i int, val string) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachStr(func(i int, val string) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustStrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustStrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustStrSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustStrSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustStrSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustStrSlice()[2]) } func TestWhereStr(t *testing.T) { + m := objx.Map{"data": []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - selected := v.WhereStr(func(i int, val string) bool { + selected := m.Get("data").WhereStr(func(i int, val string) bool { return i%2 == 0 }).MustStrSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupStr(t *testing.T) { + m := objx.Map{"data": []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - grouped := v.GroupStr(func(i int, val string) string { + grouped := m.Get("data").GroupStr(func(i int, val string) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]string) + }).Data().(map[string][]string) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceStr(t *testing.T) { + m := objx.Map{"data": []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} + rawArr := m.Get("data").MustStrSlice() - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - rawArr := v.MustStrSlice() - - replaced := v.ReplaceStr(func(index int, val string) string { + replaced := m.Get("data").ReplaceStr(func(index int, val string) string { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustStrSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -698,18 +351,16 @@ func TestReplaceStr(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectStr(t *testing.T) { + m := objx.Map{"data": []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - v := &Value{data: []string{string("hello"), string("hello"), string("hello"), string("hello"), string("hello"), string("hello")}} - - collected := v.CollectStr(func(index int, val string) interface{} { + collected := m.Get("data").CollectStr(func(index int, val string) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -718,121 +369,101 @@ func TestCollectStr(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Int (int and []int) +*/ func TestInt(t *testing.T) { - val := int(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int()) - assert.Equal(t, val, New(m).Get("value").MustInt()) - assert.Equal(t, int(0), New(m).Get("nothing").Int()) - assert.Equal(t, val, New(m).Get("nothing").Int(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int()) + assert.Equal(t, val, m.Get("value").MustInt()) + assert.Equal(t, int(0), m.Get("nothing").Int()) + assert.Equal(t, val, m.Get("nothing").Int(1)) assert.Panics(t, func() { - New(m).Get("age").MustInt() + m.Get("age").MustInt() }) - } func TestIntSlice(t *testing.T) { - val := int(1) - m := map[string]interface{}{"value": []int{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").IntSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustIntSlice()[0]) - assert.Equal(t, []int(nil), New(m).Get("nothing").IntSlice()) - assert.Equal(t, val, New(m).Get("nothing").IntSlice([]int{int(1)})[0]) + m := objx.Map{"value": []int{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").IntSlice()[0]) + assert.Equal(t, val, m.Get("value").MustIntSlice()[0]) + assert.Equal(t, []int(nil), m.Get("nothing").IntSlice()) + assert.Equal(t, val, m.Get("nothing").IntSlice([]int{int(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustIntSlice() + m.Get("nothing").MustIntSlice() }) - } func TestIsInt(t *testing.T) { + m := objx.Map{"data": int(1)} - var v *Value - - v = &Value{data: int(1)} - assert.True(t, v.IsInt()) + assert.True(t, m.Get("data").IsInt()) +} - v = &Value{data: []int{int(1)}} - assert.True(t, v.IsIntSlice()) +func TestIsIntSlice(t *testing.T) { + m := objx.Map{"data": []int{int(1)}} + assert.True(t, m.Get("data").IsIntSlice()) } func TestEachInt(t *testing.T) { - - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1)}} + m := objx.Map{"data": []int{int(1), int(1), int(1), int(1), int(1)}} count := 0 replacedVals := make([]int, 0) - assert.Equal(t, v, v.EachInt(func(i int, val int) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInt(func(i int, val int) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustIntSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustIntSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustIntSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustIntSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustIntSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustIntSlice()[2]) } func TestWhereInt(t *testing.T) { + m := objx.Map{"data": []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - selected := v.WhereInt(func(i int, val int) bool { + selected := m.Get("data").WhereInt(func(i int, val int) bool { return i%2 == 0 }).MustIntSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupInt(t *testing.T) { + m := objx.Map{"data": []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - grouped := v.GroupInt(func(i int, val int) string { + grouped := m.Get("data").GroupInt(func(i int, val int) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int) + }).Data().(map[string][]int) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInt(t *testing.T) { + m := objx.Map{"data": []int{int(1), int(1), int(1), int(1), int(1), int(1)}} + rawArr := m.Get("data").MustIntSlice() - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - rawArr := v.MustIntSlice() - - replaced := v.ReplaceInt(func(index int, val int) int { + replaced := m.Get("data").ReplaceInt(func(index int, val int) int { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustIntSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -841,18 +472,16 @@ func TestReplaceInt(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInt(t *testing.T) { + m := objx.Map{"data": []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - v := &Value{data: []int{int(1), int(1), int(1), int(1), int(1), int(1)}} - - collected := v.CollectInt(func(index int, val int) interface{} { + collected := m.Get("data").CollectInt(func(index int, val int) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -861,121 +490,101 @@ func TestCollectInt(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Int8 (int8 and []int8) +*/ func TestInt8(t *testing.T) { - val := int8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8()) - assert.Equal(t, val, New(m).Get("value").MustInt8()) - assert.Equal(t, int8(0), New(m).Get("nothing").Int8()) - assert.Equal(t, val, New(m).Get("nothing").Int8(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int8()) + assert.Equal(t, val, m.Get("value").MustInt8()) + assert.Equal(t, int8(0), m.Get("nothing").Int8()) + assert.Equal(t, val, m.Get("nothing").Int8(1)) assert.Panics(t, func() { - New(m).Get("age").MustInt8() + m.Get("age").MustInt8() }) - } func TestInt8Slice(t *testing.T) { - val := int8(1) - m := map[string]interface{}{"value": []int8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt8Slice()[0]) - assert.Equal(t, []int8(nil), New(m).Get("nothing").Int8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int8Slice([]int8{int8(1)})[0]) + m := objx.Map{"value": []int8{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int8Slice()[0]) + assert.Equal(t, val, m.Get("value").MustInt8Slice()[0]) + assert.Equal(t, []int8(nil), m.Get("nothing").Int8Slice()) + assert.Equal(t, val, m.Get("nothing").Int8Slice([]int8{int8(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustInt8Slice() + m.Get("nothing").MustInt8Slice() }) - } func TestIsInt8(t *testing.T) { + m := objx.Map{"data": int8(1)} - var v *Value - - v = &Value{data: int8(1)} - assert.True(t, v.IsInt8()) + assert.True(t, m.Get("data").IsInt8()) +} - v = &Value{data: []int8{int8(1)}} - assert.True(t, v.IsInt8Slice()) +func TestIsInt8Slice(t *testing.T) { + m := objx.Map{"data": []int8{int8(1)}} + assert.True(t, m.Get("data").IsInt8Slice()) } func TestEachInt8(t *testing.T) { - - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} + m := objx.Map{"data": []int8{int8(1), int8(1), int8(1), int8(1), int8(1)}} count := 0 replacedVals := make([]int8, 0) - assert.Equal(t, v, v.EachInt8(func(i int, val int8) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInt8(func(i int, val int8) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt8Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustInt8Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustInt8Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustInt8Slice()[2]) } func TestWhereInt8(t *testing.T) { + m := objx.Map{"data": []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - selected := v.WhereInt8(func(i int, val int8) bool { + selected := m.Get("data").WhereInt8(func(i int, val int8) bool { return i%2 == 0 }).MustInt8Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupInt8(t *testing.T) { + m := objx.Map{"data": []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - grouped := v.GroupInt8(func(i int, val int8) string { + grouped := m.Get("data").GroupInt8(func(i int, val int8) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int8) + }).Data().(map[string][]int8) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInt8(t *testing.T) { + m := objx.Map{"data": []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} + rawArr := m.Get("data").MustInt8Slice() - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - rawArr := v.MustInt8Slice() - - replaced := v.ReplaceInt8(func(index int, val int8) int8 { + replaced := m.Get("data").ReplaceInt8(func(index int, val int8) int8 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustInt8Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -984,18 +593,16 @@ func TestReplaceInt8(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInt8(t *testing.T) { + m := objx.Map{"data": []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - v := &Value{data: []int8{int8(1), int8(1), int8(1), int8(1), int8(1), int8(1)}} - - collected := v.CollectInt8(func(index int, val int8) interface{} { + collected := m.Get("data").CollectInt8(func(index int, val int8) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1004,121 +611,101 @@ func TestCollectInt8(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Int16 (int16 and []int16) +*/ func TestInt16(t *testing.T) { - val := int16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16()) - assert.Equal(t, val, New(m).Get("value").MustInt16()) - assert.Equal(t, int16(0), New(m).Get("nothing").Int16()) - assert.Equal(t, val, New(m).Get("nothing").Int16(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int16()) + assert.Equal(t, val, m.Get("value").MustInt16()) + assert.Equal(t, int16(0), m.Get("nothing").Int16()) + assert.Equal(t, val, m.Get("nothing").Int16(1)) assert.Panics(t, func() { - New(m).Get("age").MustInt16() + m.Get("age").MustInt16() }) - } func TestInt16Slice(t *testing.T) { - val := int16(1) - m := map[string]interface{}{"value": []int16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt16Slice()[0]) - assert.Equal(t, []int16(nil), New(m).Get("nothing").Int16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int16Slice([]int16{int16(1)})[0]) + m := objx.Map{"value": []int16{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int16Slice()[0]) + assert.Equal(t, val, m.Get("value").MustInt16Slice()[0]) + assert.Equal(t, []int16(nil), m.Get("nothing").Int16Slice()) + assert.Equal(t, val, m.Get("nothing").Int16Slice([]int16{int16(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustInt16Slice() + m.Get("nothing").MustInt16Slice() }) - } func TestIsInt16(t *testing.T) { + m := objx.Map{"data": int16(1)} - var v *Value - - v = &Value{data: int16(1)} - assert.True(t, v.IsInt16()) + assert.True(t, m.Get("data").IsInt16()) +} - v = &Value{data: []int16{int16(1)}} - assert.True(t, v.IsInt16Slice()) +func TestIsInt16Slice(t *testing.T) { + m := objx.Map{"data": []int16{int16(1)}} + assert.True(t, m.Get("data").IsInt16Slice()) } func TestEachInt16(t *testing.T) { - - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} + m := objx.Map{"data": []int16{int16(1), int16(1), int16(1), int16(1), int16(1)}} count := 0 replacedVals := make([]int16, 0) - assert.Equal(t, v, v.EachInt16(func(i int, val int16) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInt16(func(i int, val int16) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt16Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustInt16Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustInt16Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustInt16Slice()[2]) } func TestWhereInt16(t *testing.T) { + m := objx.Map{"data": []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - selected := v.WhereInt16(func(i int, val int16) bool { + selected := m.Get("data").WhereInt16(func(i int, val int16) bool { return i%2 == 0 }).MustInt16Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupInt16(t *testing.T) { + m := objx.Map{"data": []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - grouped := v.GroupInt16(func(i int, val int16) string { + grouped := m.Get("data").GroupInt16(func(i int, val int16) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int16) + }).Data().(map[string][]int16) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInt16(t *testing.T) { + m := objx.Map{"data": []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} + rawArr := m.Get("data").MustInt16Slice() - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - rawArr := v.MustInt16Slice() - - replaced := v.ReplaceInt16(func(index int, val int16) int16 { + replaced := m.Get("data").ReplaceInt16(func(index int, val int16) int16 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustInt16Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1127,18 +714,16 @@ func TestReplaceInt16(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInt16(t *testing.T) { + m := objx.Map{"data": []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - v := &Value{data: []int16{int16(1), int16(1), int16(1), int16(1), int16(1), int16(1)}} - - collected := v.CollectInt16(func(index int, val int16) interface{} { + collected := m.Get("data").CollectInt16(func(index int, val int16) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1147,121 +732,101 @@ func TestCollectInt16(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Int32 (int32 and []int32) +*/ func TestInt32(t *testing.T) { - val := int32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32()) - assert.Equal(t, val, New(m).Get("value").MustInt32()) - assert.Equal(t, int32(0), New(m).Get("nothing").Int32()) - assert.Equal(t, val, New(m).Get("nothing").Int32(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int32()) + assert.Equal(t, val, m.Get("value").MustInt32()) + assert.Equal(t, int32(0), m.Get("nothing").Int32()) + assert.Equal(t, val, m.Get("nothing").Int32(1)) assert.Panics(t, func() { - New(m).Get("age").MustInt32() + m.Get("age").MustInt32() }) - } func TestInt32Slice(t *testing.T) { - val := int32(1) - m := map[string]interface{}{"value": []int32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt32Slice()[0]) - assert.Equal(t, []int32(nil), New(m).Get("nothing").Int32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int32Slice([]int32{int32(1)})[0]) + m := objx.Map{"value": []int32{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int32Slice()[0]) + assert.Equal(t, val, m.Get("value").MustInt32Slice()[0]) + assert.Equal(t, []int32(nil), m.Get("nothing").Int32Slice()) + assert.Equal(t, val, m.Get("nothing").Int32Slice([]int32{int32(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustInt32Slice() + m.Get("nothing").MustInt32Slice() }) - } func TestIsInt32(t *testing.T) { + m := objx.Map{"data": int32(1)} - var v *Value - - v = &Value{data: int32(1)} - assert.True(t, v.IsInt32()) + assert.True(t, m.Get("data").IsInt32()) +} - v = &Value{data: []int32{int32(1)}} - assert.True(t, v.IsInt32Slice()) +func TestIsInt32Slice(t *testing.T) { + m := objx.Map{"data": []int32{int32(1)}} + assert.True(t, m.Get("data").IsInt32Slice()) } func TestEachInt32(t *testing.T) { - - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} + m := objx.Map{"data": []int32{int32(1), int32(1), int32(1), int32(1), int32(1)}} count := 0 replacedVals := make([]int32, 0) - assert.Equal(t, v, v.EachInt32(func(i int, val int32) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInt32(func(i int, val int32) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt32Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustInt32Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustInt32Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustInt32Slice()[2]) } func TestWhereInt32(t *testing.T) { + m := objx.Map{"data": []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - selected := v.WhereInt32(func(i int, val int32) bool { + selected := m.Get("data").WhereInt32(func(i int, val int32) bool { return i%2 == 0 }).MustInt32Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupInt32(t *testing.T) { + m := objx.Map{"data": []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - grouped := v.GroupInt32(func(i int, val int32) string { + grouped := m.Get("data").GroupInt32(func(i int, val int32) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int32) + }).Data().(map[string][]int32) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInt32(t *testing.T) { + m := objx.Map{"data": []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} + rawArr := m.Get("data").MustInt32Slice() - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - rawArr := v.MustInt32Slice() - - replaced := v.ReplaceInt32(func(index int, val int32) int32 { + replaced := m.Get("data").ReplaceInt32(func(index int, val int32) int32 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustInt32Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1270,18 +835,16 @@ func TestReplaceInt32(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInt32(t *testing.T) { + m := objx.Map{"data": []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - v := &Value{data: []int32{int32(1), int32(1), int32(1), int32(1), int32(1), int32(1)}} - - collected := v.CollectInt32(func(index int, val int32) interface{} { + collected := m.Get("data").CollectInt32(func(index int, val int32) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1290,121 +853,101 @@ func TestCollectInt32(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Int64 (int64 and []int64) +*/ func TestInt64(t *testing.T) { - val := int64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64()) - assert.Equal(t, val, New(m).Get("value").MustInt64()) - assert.Equal(t, int64(0), New(m).Get("nothing").Int64()) - assert.Equal(t, val, New(m).Get("nothing").Int64(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int64()) + assert.Equal(t, val, m.Get("value").MustInt64()) + assert.Equal(t, int64(0), m.Get("nothing").Int64()) + assert.Equal(t, val, m.Get("nothing").Int64(1)) assert.Panics(t, func() { - New(m).Get("age").MustInt64() + m.Get("age").MustInt64() }) - } func TestInt64Slice(t *testing.T) { - val := int64(1) - m := map[string]interface{}{"value": []int64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Int64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustInt64Slice()[0]) - assert.Equal(t, []int64(nil), New(m).Get("nothing").Int64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Int64Slice([]int64{int64(1)})[0]) + m := objx.Map{"value": []int64{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Int64Slice()[0]) + assert.Equal(t, val, m.Get("value").MustInt64Slice()[0]) + assert.Equal(t, []int64(nil), m.Get("nothing").Int64Slice()) + assert.Equal(t, val, m.Get("nothing").Int64Slice([]int64{int64(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustInt64Slice() + m.Get("nothing").MustInt64Slice() }) - } func TestIsInt64(t *testing.T) { + m := objx.Map{"data": int64(1)} - var v *Value - - v = &Value{data: int64(1)} - assert.True(t, v.IsInt64()) + assert.True(t, m.Get("data").IsInt64()) +} - v = &Value{data: []int64{int64(1)}} - assert.True(t, v.IsInt64Slice()) +func TestIsInt64Slice(t *testing.T) { + m := objx.Map{"data": []int64{int64(1)}} + assert.True(t, m.Get("data").IsInt64Slice()) } func TestEachInt64(t *testing.T) { - - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} + m := objx.Map{"data": []int64{int64(1), int64(1), int64(1), int64(1), int64(1)}} count := 0 replacedVals := make([]int64, 0) - assert.Equal(t, v, v.EachInt64(func(i int, val int64) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachInt64(func(i int, val int64) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustInt64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustInt64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustInt64Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustInt64Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustInt64Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustInt64Slice()[2]) } func TestWhereInt64(t *testing.T) { + m := objx.Map{"data": []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - selected := v.WhereInt64(func(i int, val int64) bool { + selected := m.Get("data").WhereInt64(func(i int, val int64) bool { return i%2 == 0 }).MustInt64Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupInt64(t *testing.T) { + m := objx.Map{"data": []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - grouped := v.GroupInt64(func(i int, val int64) string { + grouped := m.Get("data").GroupInt64(func(i int, val int64) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]int64) + }).Data().(map[string][]int64) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceInt64(t *testing.T) { + m := objx.Map{"data": []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} + rawArr := m.Get("data").MustInt64Slice() - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - rawArr := v.MustInt64Slice() - - replaced := v.ReplaceInt64(func(index int, val int64) int64 { + replaced := m.Get("data").ReplaceInt64(func(index int, val int64) int64 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustInt64Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1413,18 +956,16 @@ func TestReplaceInt64(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectInt64(t *testing.T) { + m := objx.Map{"data": []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - v := &Value{data: []int64{int64(1), int64(1), int64(1), int64(1), int64(1), int64(1)}} - - collected := v.CollectInt64(func(index int, val int64) interface{} { + collected := m.Get("data").CollectInt64(func(index int, val int64) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1433,121 +974,101 @@ func TestCollectInt64(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uint (uint and []uint) +*/ func TestUint(t *testing.T) { - val := uint(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint()) - assert.Equal(t, val, New(m).Get("value").MustUint()) - assert.Equal(t, uint(0), New(m).Get("nothing").Uint()) - assert.Equal(t, val, New(m).Get("nothing").Uint(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint()) + assert.Equal(t, val, m.Get("value").MustUint()) + assert.Equal(t, uint(0), m.Get("nothing").Uint()) + assert.Equal(t, val, m.Get("nothing").Uint(1)) assert.Panics(t, func() { - New(m).Get("age").MustUint() + m.Get("age").MustUint() }) - } func TestUintSlice(t *testing.T) { - val := uint(1) - m := map[string]interface{}{"value": []uint{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintSlice()[0]) - assert.Equal(t, []uint(nil), New(m).Get("nothing").UintSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintSlice([]uint{uint(1)})[0]) + m := objx.Map{"value": []uint{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").UintSlice()[0]) + assert.Equal(t, val, m.Get("value").MustUintSlice()[0]) + assert.Equal(t, []uint(nil), m.Get("nothing").UintSlice()) + assert.Equal(t, val, m.Get("nothing").UintSlice([]uint{uint(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUintSlice() + m.Get("nothing").MustUintSlice() }) - } func TestIsUint(t *testing.T) { + m := objx.Map{"data": uint(1)} - var v *Value - - v = &Value{data: uint(1)} - assert.True(t, v.IsUint()) + assert.True(t, m.Get("data").IsUint()) +} - v = &Value{data: []uint{uint(1)}} - assert.True(t, v.IsUintSlice()) +func TestIsUintSlice(t *testing.T) { + m := objx.Map{"data": []uint{uint(1)}} + assert.True(t, m.Get("data").IsUintSlice()) } func TestEachUint(t *testing.T) { - - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} + m := objx.Map{"data": []uint{uint(1), uint(1), uint(1), uint(1), uint(1)}} count := 0 replacedVals := make([]uint, 0) - assert.Equal(t, v, v.EachUint(func(i int, val uint) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUint(func(i int, val uint) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUintSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUintSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUintSlice()[2]) } func TestWhereUint(t *testing.T) { + m := objx.Map{"data": []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - selected := v.WhereUint(func(i int, val uint) bool { + selected := m.Get("data").WhereUint(func(i int, val uint) bool { return i%2 == 0 }).MustUintSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupUint(t *testing.T) { + m := objx.Map{"data": []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - grouped := v.GroupUint(func(i int, val uint) string { + grouped := m.Get("data").GroupUint(func(i int, val uint) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint) + }).Data().(map[string][]uint) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUint(t *testing.T) { + m := objx.Map{"data": []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} + rawArr := m.Get("data").MustUintSlice() - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - rawArr := v.MustUintSlice() - - replaced := v.ReplaceUint(func(index int, val uint) uint { + replaced := m.Get("data").ReplaceUint(func(index int, val uint) uint { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUintSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1556,18 +1077,16 @@ func TestReplaceUint(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUint(t *testing.T) { + m := objx.Map{"data": []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - v := &Value{data: []uint{uint(1), uint(1), uint(1), uint(1), uint(1), uint(1)}} - - collected := v.CollectUint(func(index int, val uint) interface{} { + collected := m.Get("data").CollectUint(func(index int, val uint) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1576,121 +1095,101 @@ func TestCollectUint(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uint8 (uint8 and []uint8) +*/ func TestUint8(t *testing.T) { - val := uint8(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8()) - assert.Equal(t, val, New(m).Get("value").MustUint8()) - assert.Equal(t, uint8(0), New(m).Get("nothing").Uint8()) - assert.Equal(t, val, New(m).Get("nothing").Uint8(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint8()) + assert.Equal(t, val, m.Get("value").MustUint8()) + assert.Equal(t, uint8(0), m.Get("nothing").Uint8()) + assert.Equal(t, val, m.Get("nothing").Uint8(1)) assert.Panics(t, func() { - New(m).Get("age").MustUint8() + m.Get("age").MustUint8() }) - } func TestUint8Slice(t *testing.T) { - val := uint8(1) - m := map[string]interface{}{"value": []uint8{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint8Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint8Slice()[0]) - assert.Equal(t, []uint8(nil), New(m).Get("nothing").Uint8Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) + m := objx.Map{"value": []uint8{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint8Slice()[0]) + assert.Equal(t, val, m.Get("value").MustUint8Slice()[0]) + assert.Equal(t, []uint8(nil), m.Get("nothing").Uint8Slice()) + assert.Equal(t, val, m.Get("nothing").Uint8Slice([]uint8{uint8(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUint8Slice() + m.Get("nothing").MustUint8Slice() }) - } func TestIsUint8(t *testing.T) { + m := objx.Map{"data": uint8(1)} - var v *Value - - v = &Value{data: uint8(1)} - assert.True(t, v.IsUint8()) + assert.True(t, m.Get("data").IsUint8()) +} - v = &Value{data: []uint8{uint8(1)}} - assert.True(t, v.IsUint8Slice()) +func TestIsUint8Slice(t *testing.T) { + m := objx.Map{"data": []uint8{uint8(1)}} + assert.True(t, m.Get("data").IsUint8Slice()) } func TestEachUint8(t *testing.T) { - - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + m := objx.Map{"data": []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} count := 0 replacedVals := make([]uint8, 0) - assert.Equal(t, v, v.EachUint8(func(i int, val uint8) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUint8(func(i int, val uint8) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint8Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint8Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint8Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUint8Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUint8Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUint8Slice()[2]) } func TestWhereUint8(t *testing.T) { + m := objx.Map{"data": []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - selected := v.WhereUint8(func(i int, val uint8) bool { + selected := m.Get("data").WhereUint8(func(i int, val uint8) bool { return i%2 == 0 }).MustUint8Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupUint8(t *testing.T) { + m := objx.Map{"data": []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - grouped := v.GroupUint8(func(i int, val uint8) string { + grouped := m.Get("data").GroupUint8(func(i int, val uint8) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint8) + }).Data().(map[string][]uint8) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUint8(t *testing.T) { + m := objx.Map{"data": []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} + rawArr := m.Get("data").MustUint8Slice() - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - rawArr := v.MustUint8Slice() - - replaced := v.ReplaceUint8(func(index int, val uint8) uint8 { + replaced := m.Get("data").ReplaceUint8(func(index int, val uint8) uint8 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUint8Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1699,18 +1198,16 @@ func TestReplaceUint8(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUint8(t *testing.T) { + m := objx.Map{"data": []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - v := &Value{data: []uint8{uint8(1), uint8(1), uint8(1), uint8(1), uint8(1), uint8(1)}} - - collected := v.CollectUint8(func(index int, val uint8) interface{} { + collected := m.Get("data").CollectUint8(func(index int, val uint8) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1719,121 +1216,101 @@ func TestCollectUint8(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uint16 (uint16 and []uint16) +*/ func TestUint16(t *testing.T) { - val := uint16(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16()) - assert.Equal(t, val, New(m).Get("value").MustUint16()) - assert.Equal(t, uint16(0), New(m).Get("nothing").Uint16()) - assert.Equal(t, val, New(m).Get("nothing").Uint16(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint16()) + assert.Equal(t, val, m.Get("value").MustUint16()) + assert.Equal(t, uint16(0), m.Get("nothing").Uint16()) + assert.Equal(t, val, m.Get("nothing").Uint16(1)) assert.Panics(t, func() { - New(m).Get("age").MustUint16() + m.Get("age").MustUint16() }) - } func TestUint16Slice(t *testing.T) { - val := uint16(1) - m := map[string]interface{}{"value": []uint16{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint16Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint16Slice()[0]) - assert.Equal(t, []uint16(nil), New(m).Get("nothing").Uint16Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) + m := objx.Map{"value": []uint16{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint16Slice()[0]) + assert.Equal(t, val, m.Get("value").MustUint16Slice()[0]) + assert.Equal(t, []uint16(nil), m.Get("nothing").Uint16Slice()) + assert.Equal(t, val, m.Get("nothing").Uint16Slice([]uint16{uint16(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUint16Slice() + m.Get("nothing").MustUint16Slice() }) - } func TestIsUint16(t *testing.T) { + m := objx.Map{"data": uint16(1)} - var v *Value - - v = &Value{data: uint16(1)} - assert.True(t, v.IsUint16()) + assert.True(t, m.Get("data").IsUint16()) +} - v = &Value{data: []uint16{uint16(1)}} - assert.True(t, v.IsUint16Slice()) +func TestIsUint16Slice(t *testing.T) { + m := objx.Map{"data": []uint16{uint16(1)}} + assert.True(t, m.Get("data").IsUint16Slice()) } func TestEachUint16(t *testing.T) { - - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + m := objx.Map{"data": []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} count := 0 replacedVals := make([]uint16, 0) - assert.Equal(t, v, v.EachUint16(func(i int, val uint16) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUint16(func(i int, val uint16) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint16Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint16Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint16Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUint16Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUint16Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUint16Slice()[2]) } func TestWhereUint16(t *testing.T) { + m := objx.Map{"data": []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - selected := v.WhereUint16(func(i int, val uint16) bool { + selected := m.Get("data").WhereUint16(func(i int, val uint16) bool { return i%2 == 0 }).MustUint16Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupUint16(t *testing.T) { + m := objx.Map{"data": []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - grouped := v.GroupUint16(func(i int, val uint16) string { + grouped := m.Get("data").GroupUint16(func(i int, val uint16) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint16) + }).Data().(map[string][]uint16) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUint16(t *testing.T) { + m := objx.Map{"data": []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} + rawArr := m.Get("data").MustUint16Slice() - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - rawArr := v.MustUint16Slice() - - replaced := v.ReplaceUint16(func(index int, val uint16) uint16 { + replaced := m.Get("data").ReplaceUint16(func(index int, val uint16) uint16 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUint16Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1842,18 +1319,16 @@ func TestReplaceUint16(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUint16(t *testing.T) { + m := objx.Map{"data": []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - v := &Value{data: []uint16{uint16(1), uint16(1), uint16(1), uint16(1), uint16(1), uint16(1)}} - - collected := v.CollectUint16(func(index int, val uint16) interface{} { + collected := m.Get("data").CollectUint16(func(index int, val uint16) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -1862,121 +1337,101 @@ func TestCollectUint16(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uint32 (uint32 and []uint32) +*/ func TestUint32(t *testing.T) { - val := uint32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32()) - assert.Equal(t, val, New(m).Get("value").MustUint32()) - assert.Equal(t, uint32(0), New(m).Get("nothing").Uint32()) - assert.Equal(t, val, New(m).Get("nothing").Uint32(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint32()) + assert.Equal(t, val, m.Get("value").MustUint32()) + assert.Equal(t, uint32(0), m.Get("nothing").Uint32()) + assert.Equal(t, val, m.Get("nothing").Uint32(1)) assert.Panics(t, func() { - New(m).Get("age").MustUint32() + m.Get("age").MustUint32() }) - } func TestUint32Slice(t *testing.T) { - val := uint32(1) - m := map[string]interface{}{"value": []uint32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint32Slice()[0]) - assert.Equal(t, []uint32(nil), New(m).Get("nothing").Uint32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) + m := objx.Map{"value": []uint32{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint32Slice()[0]) + assert.Equal(t, val, m.Get("value").MustUint32Slice()[0]) + assert.Equal(t, []uint32(nil), m.Get("nothing").Uint32Slice()) + assert.Equal(t, val, m.Get("nothing").Uint32Slice([]uint32{uint32(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUint32Slice() + m.Get("nothing").MustUint32Slice() }) - } func TestIsUint32(t *testing.T) { + m := objx.Map{"data": uint32(1)} - var v *Value - - v = &Value{data: uint32(1)} - assert.True(t, v.IsUint32()) + assert.True(t, m.Get("data").IsUint32()) +} - v = &Value{data: []uint32{uint32(1)}} - assert.True(t, v.IsUint32Slice()) +func TestIsUint32Slice(t *testing.T) { + m := objx.Map{"data": []uint32{uint32(1)}} + assert.True(t, m.Get("data").IsUint32Slice()) } func TestEachUint32(t *testing.T) { - - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + m := objx.Map{"data": []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} count := 0 replacedVals := make([]uint32, 0) - assert.Equal(t, v, v.EachUint32(func(i int, val uint32) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUint32(func(i int, val uint32) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint32Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUint32Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUint32Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUint32Slice()[2]) } func TestWhereUint32(t *testing.T) { + m := objx.Map{"data": []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - selected := v.WhereUint32(func(i int, val uint32) bool { + selected := m.Get("data").WhereUint32(func(i int, val uint32) bool { return i%2 == 0 }).MustUint32Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupUint32(t *testing.T) { + m := objx.Map{"data": []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - grouped := v.GroupUint32(func(i int, val uint32) string { + grouped := m.Get("data").GroupUint32(func(i int, val uint32) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint32) + }).Data().(map[string][]uint32) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUint32(t *testing.T) { + m := objx.Map{"data": []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} + rawArr := m.Get("data").MustUint32Slice() - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - rawArr := v.MustUint32Slice() - - replaced := v.ReplaceUint32(func(index int, val uint32) uint32 { + replaced := m.Get("data").ReplaceUint32(func(index int, val uint32) uint32 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUint32Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -1985,18 +1440,16 @@ func TestReplaceUint32(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUint32(t *testing.T) { + m := objx.Map{"data": []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - v := &Value{data: []uint32{uint32(1), uint32(1), uint32(1), uint32(1), uint32(1), uint32(1)}} - - collected := v.CollectUint32(func(index int, val uint32) interface{} { + collected := m.Get("data").CollectUint32(func(index int, val uint32) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2005,121 +1458,101 @@ func TestCollectUint32(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uint64 (uint64 and []uint64) +*/ func TestUint64(t *testing.T) { - val := uint64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64()) - assert.Equal(t, val, New(m).Get("value").MustUint64()) - assert.Equal(t, uint64(0), New(m).Get("nothing").Uint64()) - assert.Equal(t, val, New(m).Get("nothing").Uint64(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint64()) + assert.Equal(t, val, m.Get("value").MustUint64()) + assert.Equal(t, uint64(0), m.Get("nothing").Uint64()) + assert.Equal(t, val, m.Get("nothing").Uint64(1)) assert.Panics(t, func() { - New(m).Get("age").MustUint64() + m.Get("age").MustUint64() }) - } func TestUint64Slice(t *testing.T) { - val := uint64(1) - m := map[string]interface{}{"value": []uint64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uint64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUint64Slice()[0]) - assert.Equal(t, []uint64(nil), New(m).Get("nothing").Uint64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) + m := objx.Map{"value": []uint64{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uint64Slice()[0]) + assert.Equal(t, val, m.Get("value").MustUint64Slice()[0]) + assert.Equal(t, []uint64(nil), m.Get("nothing").Uint64Slice()) + assert.Equal(t, val, m.Get("nothing").Uint64Slice([]uint64{uint64(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUint64Slice() + m.Get("nothing").MustUint64Slice() }) - } func TestIsUint64(t *testing.T) { + m := objx.Map{"data": uint64(1)} - var v *Value - - v = &Value{data: uint64(1)} - assert.True(t, v.IsUint64()) + assert.True(t, m.Get("data").IsUint64()) +} - v = &Value{data: []uint64{uint64(1)}} - assert.True(t, v.IsUint64Slice()) +func TestIsUint64Slice(t *testing.T) { + m := objx.Map{"data": []uint64{uint64(1)}} + assert.True(t, m.Get("data").IsUint64Slice()) } func TestEachUint64(t *testing.T) { - - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + m := objx.Map{"data": []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} count := 0 replacedVals := make([]uint64, 0) - assert.Equal(t, v, v.EachUint64(func(i int, val uint64) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUint64(func(i int, val uint64) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUint64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustUint64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustUint64Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUint64Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUint64Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUint64Slice()[2]) } func TestWhereUint64(t *testing.T) { + m := objx.Map{"data": []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - selected := v.WhereUint64(func(i int, val uint64) bool { + selected := m.Get("data").WhereUint64(func(i int, val uint64) bool { return i%2 == 0 }).MustUint64Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupUint64(t *testing.T) { + m := objx.Map{"data": []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - grouped := v.GroupUint64(func(i int, val uint64) string { + grouped := m.Get("data").GroupUint64(func(i int, val uint64) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uint64) + }).Data().(map[string][]uint64) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUint64(t *testing.T) { + m := objx.Map{"data": []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} + rawArr := m.Get("data").MustUint64Slice() - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - rawArr := v.MustUint64Slice() - - replaced := v.ReplaceUint64(func(index int, val uint64) uint64 { + replaced := m.Get("data").ReplaceUint64(func(index int, val uint64) uint64 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUint64Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2128,18 +1561,16 @@ func TestReplaceUint64(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUint64(t *testing.T) { + m := objx.Map{"data": []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - v := &Value{data: []uint64{uint64(1), uint64(1), uint64(1), uint64(1), uint64(1), uint64(1)}} - - collected := v.CollectUint64(func(index int, val uint64) interface{} { + collected := m.Get("data").CollectUint64(func(index int, val uint64) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2148,121 +1579,101 @@ func TestCollectUint64(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Uintptr (uintptr and []uintptr) +*/ func TestUintptr(t *testing.T) { - val := uintptr(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Uintptr()) - assert.Equal(t, val, New(m).Get("value").MustUintptr()) - assert.Equal(t, uintptr(0), New(m).Get("nothing").Uintptr()) - assert.Equal(t, val, New(m).Get("nothing").Uintptr(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Uintptr()) + assert.Equal(t, val, m.Get("value").MustUintptr()) + assert.Equal(t, uintptr(0), m.Get("nothing").Uintptr()) + assert.Equal(t, val, m.Get("nothing").Uintptr(1)) assert.Panics(t, func() { - New(m).Get("age").MustUintptr() + m.Get("age").MustUintptr() }) - } func TestUintptrSlice(t *testing.T) { - val := uintptr(1) - m := map[string]interface{}{"value": []uintptr{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").UintptrSlice()[0]) - assert.Equal(t, val, New(m).Get("value").MustUintptrSlice()[0]) - assert.Equal(t, []uintptr(nil), New(m).Get("nothing").UintptrSlice()) - assert.Equal(t, val, New(m).Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) + m := objx.Map{"value": []uintptr{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").UintptrSlice()[0]) + assert.Equal(t, val, m.Get("value").MustUintptrSlice()[0]) + assert.Equal(t, []uintptr(nil), m.Get("nothing").UintptrSlice()) + assert.Equal(t, val, m.Get("nothing").UintptrSlice([]uintptr{uintptr(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustUintptrSlice() + m.Get("nothing").MustUintptrSlice() }) - } func TestIsUintptr(t *testing.T) { + m := objx.Map{"data": uintptr(1)} - var v *Value - - v = &Value{data: uintptr(1)} - assert.True(t, v.IsUintptr()) + assert.True(t, m.Get("data").IsUintptr()) +} - v = &Value{data: []uintptr{uintptr(1)}} - assert.True(t, v.IsUintptrSlice()) +func TestIsUintptrSlice(t *testing.T) { + m := objx.Map{"data": []uintptr{uintptr(1)}} + assert.True(t, m.Get("data").IsUintptrSlice()) } func TestEachUintptr(t *testing.T) { - - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + m := objx.Map{"data": []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} count := 0 replacedVals := make([]uintptr, 0) - assert.Equal(t, v, v.EachUintptr(func(i int, val uintptr) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachUintptr(func(i int, val uintptr) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustUintptrSlice()[0]) - assert.Equal(t, replacedVals[1], v.MustUintptrSlice()[1]) - assert.Equal(t, replacedVals[2], v.MustUintptrSlice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustUintptrSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustUintptrSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustUintptrSlice()[2]) } func TestWhereUintptr(t *testing.T) { + m := objx.Map{"data": []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - selected := v.WhereUintptr(func(i int, val uintptr) bool { + selected := m.Get("data").WhereUintptr(func(i int, val uintptr) bool { return i%2 == 0 }).MustUintptrSlice() assert.Equal(t, 3, len(selected)) - } func TestGroupUintptr(t *testing.T) { + m := objx.Map{"data": []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - grouped := v.GroupUintptr(func(i int, val uintptr) string { + grouped := m.Get("data").GroupUintptr(func(i int, val uintptr) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]uintptr) + }).Data().(map[string][]uintptr) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceUintptr(t *testing.T) { + m := objx.Map{"data": []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} + rawArr := m.Get("data").MustUintptrSlice() - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - rawArr := v.MustUintptrSlice() - - replaced := v.ReplaceUintptr(func(index int, val uintptr) uintptr { + replaced := m.Get("data").ReplaceUintptr(func(index int, val uintptr) uintptr { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustUintptrSlice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2271,18 +1682,16 @@ func TestReplaceUintptr(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectUintptr(t *testing.T) { + m := objx.Map{"data": []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - v := &Value{data: []uintptr{uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1), uintptr(1)}} - - collected := v.CollectUintptr(func(index int, val uintptr) interface{} { + collected := m.Get("data").CollectUintptr(func(index int, val uintptr) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2291,121 +1700,101 @@ func TestCollectUintptr(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Float32 (float32 and []float32) +*/ func TestFloat32(t *testing.T) { - val := float32(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32()) - assert.Equal(t, val, New(m).Get("value").MustFloat32()) - assert.Equal(t, float32(0), New(m).Get("nothing").Float32()) - assert.Equal(t, val, New(m).Get("nothing").Float32(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Float32()) + assert.Equal(t, val, m.Get("value").MustFloat32()) + assert.Equal(t, float32(0), m.Get("nothing").Float32()) + assert.Equal(t, val, m.Get("nothing").Float32(1)) assert.Panics(t, func() { - New(m).Get("age").MustFloat32() + m.Get("age").MustFloat32() }) - } func TestFloat32Slice(t *testing.T) { - val := float32(1) - m := map[string]interface{}{"value": []float32{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float32Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat32Slice()[0]) - assert.Equal(t, []float32(nil), New(m).Get("nothing").Float32Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float32Slice([]float32{float32(1)})[0]) + m := objx.Map{"value": []float32{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Float32Slice()[0]) + assert.Equal(t, val, m.Get("value").MustFloat32Slice()[0]) + assert.Equal(t, []float32(nil), m.Get("nothing").Float32Slice()) + assert.Equal(t, val, m.Get("nothing").Float32Slice([]float32{float32(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustFloat32Slice() + m.Get("nothing").MustFloat32Slice() }) - } func TestIsFloat32(t *testing.T) { + m := objx.Map{"data": float32(1)} - var v *Value - - v = &Value{data: float32(1)} - assert.True(t, v.IsFloat32()) + assert.True(t, m.Get("data").IsFloat32()) +} - v = &Value{data: []float32{float32(1)}} - assert.True(t, v.IsFloat32Slice()) +func TestIsFloat32Slice(t *testing.T) { + m := objx.Map{"data": []float32{float32(1)}} + assert.True(t, m.Get("data").IsFloat32Slice()) } func TestEachFloat32(t *testing.T) { - - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} + m := objx.Map{"data": []float32{float32(1), float32(1), float32(1), float32(1), float32(1)}} count := 0 replacedVals := make([]float32, 0) - assert.Equal(t, v, v.EachFloat32(func(i int, val float32) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachFloat32(func(i int, val float32) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat32Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat32Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat32Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustFloat32Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustFloat32Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustFloat32Slice()[2]) } func TestWhereFloat32(t *testing.T) { + m := objx.Map{"data": []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - selected := v.WhereFloat32(func(i int, val float32) bool { + selected := m.Get("data").WhereFloat32(func(i int, val float32) bool { return i%2 == 0 }).MustFloat32Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupFloat32(t *testing.T) { + m := objx.Map{"data": []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - grouped := v.GroupFloat32(func(i int, val float32) string { + grouped := m.Get("data").GroupFloat32(func(i int, val float32) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float32) + }).Data().(map[string][]float32) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceFloat32(t *testing.T) { + m := objx.Map{"data": []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} + rawArr := m.Get("data").MustFloat32Slice() - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - rawArr := v.MustFloat32Slice() - - replaced := v.ReplaceFloat32(func(index int, val float32) float32 { + replaced := m.Get("data").ReplaceFloat32(func(index int, val float32) float32 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustFloat32Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2414,18 +1803,16 @@ func TestReplaceFloat32(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectFloat32(t *testing.T) { + m := objx.Map{"data": []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - v := &Value{data: []float32{float32(1), float32(1), float32(1), float32(1), float32(1), float32(1)}} - - collected := v.CollectFloat32(func(index int, val float32) interface{} { + collected := m.Get("data").CollectFloat32(func(index int, val float32) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2434,121 +1821,101 @@ func TestCollectFloat32(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Float64 (float64 and []float64) +*/ func TestFloat64(t *testing.T) { - val := float64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64()) - assert.Equal(t, val, New(m).Get("value").MustFloat64()) - assert.Equal(t, float64(0), New(m).Get("nothing").Float64()) - assert.Equal(t, val, New(m).Get("nothing").Float64(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Float64()) + assert.Equal(t, val, m.Get("value").MustFloat64()) + assert.Equal(t, float64(0), m.Get("nothing").Float64()) + assert.Equal(t, val, m.Get("nothing").Float64(1)) assert.Panics(t, func() { - New(m).Get("age").MustFloat64() + m.Get("age").MustFloat64() }) - } func TestFloat64Slice(t *testing.T) { - val := float64(1) - m := map[string]interface{}{"value": []float64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Float64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustFloat64Slice()[0]) - assert.Equal(t, []float64(nil), New(m).Get("nothing").Float64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Float64Slice([]float64{float64(1)})[0]) + m := objx.Map{"value": []float64{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Float64Slice()[0]) + assert.Equal(t, val, m.Get("value").MustFloat64Slice()[0]) + assert.Equal(t, []float64(nil), m.Get("nothing").Float64Slice()) + assert.Equal(t, val, m.Get("nothing").Float64Slice([]float64{float64(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustFloat64Slice() + m.Get("nothing").MustFloat64Slice() }) - } func TestIsFloat64(t *testing.T) { + m := objx.Map{"data": float64(1)} - var v *Value - - v = &Value{data: float64(1)} - assert.True(t, v.IsFloat64()) + assert.True(t, m.Get("data").IsFloat64()) +} - v = &Value{data: []float64{float64(1)}} - assert.True(t, v.IsFloat64Slice()) +func TestIsFloat64Slice(t *testing.T) { + m := objx.Map{"data": []float64{float64(1)}} + assert.True(t, m.Get("data").IsFloat64Slice()) } func TestEachFloat64(t *testing.T) { - - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} + m := objx.Map{"data": []float64{float64(1), float64(1), float64(1), float64(1), float64(1)}} count := 0 replacedVals := make([]float64, 0) - assert.Equal(t, v, v.EachFloat64(func(i int, val float64) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachFloat64(func(i int, val float64) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustFloat64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustFloat64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustFloat64Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustFloat64Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustFloat64Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustFloat64Slice()[2]) } func TestWhereFloat64(t *testing.T) { + m := objx.Map{"data": []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - selected := v.WhereFloat64(func(i int, val float64) bool { + selected := m.Get("data").WhereFloat64(func(i int, val float64) bool { return i%2 == 0 }).MustFloat64Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupFloat64(t *testing.T) { + m := objx.Map{"data": []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - grouped := v.GroupFloat64(func(i int, val float64) string { + grouped := m.Get("data").GroupFloat64(func(i int, val float64) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]float64) + }).Data().(map[string][]float64) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceFloat64(t *testing.T) { + m := objx.Map{"data": []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} + rawArr := m.Get("data").MustFloat64Slice() - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - rawArr := v.MustFloat64Slice() - - replaced := v.ReplaceFloat64(func(index int, val float64) float64 { + replaced := m.Get("data").ReplaceFloat64(func(index int, val float64) float64 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustFloat64Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2557,18 +1924,16 @@ func TestReplaceFloat64(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectFloat64(t *testing.T) { + m := objx.Map{"data": []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - v := &Value{data: []float64{float64(1), float64(1), float64(1), float64(1), float64(1), float64(1)}} - - collected := v.CollectFloat64(func(index int, val float64) interface{} { + collected := m.Get("data").CollectFloat64(func(index int, val float64) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2577,121 +1942,101 @@ func TestCollectFloat64(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Complex64 (complex64 and []complex64) +*/ func TestComplex64(t *testing.T) { - val := complex64(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64()) - assert.Equal(t, val, New(m).Get("value").MustComplex64()) - assert.Equal(t, complex64(0), New(m).Get("nothing").Complex64()) - assert.Equal(t, val, New(m).Get("nothing").Complex64(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Complex64()) + assert.Equal(t, val, m.Get("value").MustComplex64()) + assert.Equal(t, complex64(0), m.Get("nothing").Complex64()) + assert.Equal(t, val, m.Get("nothing").Complex64(1)) assert.Panics(t, func() { - New(m).Get("age").MustComplex64() + m.Get("age").MustComplex64() }) - } func TestComplex64Slice(t *testing.T) { - val := complex64(1) - m := map[string]interface{}{"value": []complex64{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex64Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex64Slice()[0]) - assert.Equal(t, []complex64(nil), New(m).Get("nothing").Complex64Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) + m := objx.Map{"value": []complex64{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Complex64Slice()[0]) + assert.Equal(t, val, m.Get("value").MustComplex64Slice()[0]) + assert.Equal(t, []complex64(nil), m.Get("nothing").Complex64Slice()) + assert.Equal(t, val, m.Get("nothing").Complex64Slice([]complex64{complex64(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustComplex64Slice() + m.Get("nothing").MustComplex64Slice() }) - } func TestIsComplex64(t *testing.T) { + m := objx.Map{"data": complex64(1)} - var v *Value - - v = &Value{data: complex64(1)} - assert.True(t, v.IsComplex64()) + assert.True(t, m.Get("data").IsComplex64()) +} - v = &Value{data: []complex64{complex64(1)}} - assert.True(t, v.IsComplex64Slice()) +func TestIsComplex64Slice(t *testing.T) { + m := objx.Map{"data": []complex64{complex64(1)}} + assert.True(t, m.Get("data").IsComplex64Slice()) } func TestEachComplex64(t *testing.T) { - - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + m := objx.Map{"data": []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} count := 0 replacedVals := make([]complex64, 0) - assert.Equal(t, v, v.EachComplex64(func(i int, val complex64) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachComplex64(func(i int, val complex64) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex64Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex64Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex64Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustComplex64Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustComplex64Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustComplex64Slice()[2]) } func TestWhereComplex64(t *testing.T) { + m := objx.Map{"data": []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - selected := v.WhereComplex64(func(i int, val complex64) bool { + selected := m.Get("data").WhereComplex64(func(i int, val complex64) bool { return i%2 == 0 }).MustComplex64Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupComplex64(t *testing.T) { + m := objx.Map{"data": []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - grouped := v.GroupComplex64(func(i int, val complex64) string { + grouped := m.Get("data").GroupComplex64(func(i int, val complex64) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex64) + }).Data().(map[string][]complex64) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceComplex64(t *testing.T) { + m := objx.Map{"data": []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} + rawArr := m.Get("data").MustComplex64Slice() - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - rawArr := v.MustComplex64Slice() - - replaced := v.ReplaceComplex64(func(index int, val complex64) complex64 { + replaced := m.Get("data").ReplaceComplex64(func(index int, val complex64) complex64 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustComplex64Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2700,18 +2045,16 @@ func TestReplaceComplex64(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectComplex64(t *testing.T) { + m := objx.Map{"data": []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - v := &Value{data: []complex64{complex64(1), complex64(1), complex64(1), complex64(1), complex64(1), complex64(1)}} - - collected := v.CollectComplex64(func(index int, val complex64) interface{} { + collected := m.Get("data").CollectComplex64(func(index int, val complex64) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2720,121 +2063,101 @@ func TestCollectComplex64(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } -// ************************************************************ -// TESTS -// ************************************************************ - +/* + Tests for Complex128 (complex128 and []complex128) +*/ func TestComplex128(t *testing.T) { - val := complex128(1) - m := map[string]interface{}{"value": val, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128()) - assert.Equal(t, val, New(m).Get("value").MustComplex128()) - assert.Equal(t, complex128(0), New(m).Get("nothing").Complex128()) - assert.Equal(t, val, New(m).Get("nothing").Complex128(1)) + m := objx.Map{"value": val, "nothing": nil} + assert.Equal(t, val, m.Get("value").Complex128()) + assert.Equal(t, val, m.Get("value").MustComplex128()) + assert.Equal(t, complex128(0), m.Get("nothing").Complex128()) + assert.Equal(t, val, m.Get("nothing").Complex128(1)) assert.Panics(t, func() { - New(m).Get("age").MustComplex128() + m.Get("age").MustComplex128() }) - } func TestComplex128Slice(t *testing.T) { - val := complex128(1) - m := map[string]interface{}{"value": []complex128{val}, "nothing": nil} - assert.Equal(t, val, New(m).Get("value").Complex128Slice()[0]) - assert.Equal(t, val, New(m).Get("value").MustComplex128Slice()[0]) - assert.Equal(t, []complex128(nil), New(m).Get("nothing").Complex128Slice()) - assert.Equal(t, val, New(m).Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) + m := objx.Map{"value": []complex128{val}, "nothing": nil} + assert.Equal(t, val, m.Get("value").Complex128Slice()[0]) + assert.Equal(t, val, m.Get("value").MustComplex128Slice()[0]) + assert.Equal(t, []complex128(nil), m.Get("nothing").Complex128Slice()) + assert.Equal(t, val, m.Get("nothing").Complex128Slice([]complex128{complex128(1)})[0]) assert.Panics(t, func() { - New(m).Get("nothing").MustComplex128Slice() + m.Get("nothing").MustComplex128Slice() }) - } func TestIsComplex128(t *testing.T) { + m := objx.Map{"data": complex128(1)} - var v *Value - - v = &Value{data: complex128(1)} - assert.True(t, v.IsComplex128()) + assert.True(t, m.Get("data").IsComplex128()) +} - v = &Value{data: []complex128{complex128(1)}} - assert.True(t, v.IsComplex128Slice()) +func TestIsComplex128Slice(t *testing.T) { + m := objx.Map{"data": []complex128{complex128(1)}} + assert.True(t, m.Get("data").IsComplex128Slice()) } func TestEachComplex128(t *testing.T) { - - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + m := objx.Map{"data": []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} count := 0 replacedVals := make([]complex128, 0) - assert.Equal(t, v, v.EachComplex128(func(i int, val complex128) bool { - + assert.Equal(t, m.Get("data"), m.Get("data").EachComplex128(func(i int, val complex128) bool { count++ replacedVals = append(replacedVals, val) // abort early - if i == 2 { - return false - } - - return true - + return i != 2 })) assert.Equal(t, count, 3) - assert.Equal(t, replacedVals[0], v.MustComplex128Slice()[0]) - assert.Equal(t, replacedVals[1], v.MustComplex128Slice()[1]) - assert.Equal(t, replacedVals[2], v.MustComplex128Slice()[2]) - + assert.Equal(t, replacedVals[0], m.Get("data").MustComplex128Slice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustComplex128Slice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustComplex128Slice()[2]) } func TestWhereComplex128(t *testing.T) { + m := objx.Map{"data": []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - selected := v.WhereComplex128(func(i int, val complex128) bool { + selected := m.Get("data").WhereComplex128(func(i int, val complex128) bool { return i%2 == 0 }).MustComplex128Slice() assert.Equal(t, 3, len(selected)) - } func TestGroupComplex128(t *testing.T) { + m := objx.Map{"data": []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - grouped := v.GroupComplex128(func(i int, val complex128) string { + grouped := m.Get("data").GroupComplex128(func(i int, val complex128) string { return fmt.Sprintf("%v", i%2 == 0) - }).data.(map[string][]complex128) + }).Data().(map[string][]complex128) assert.Equal(t, 2, len(grouped)) assert.Equal(t, 3, len(grouped["true"])) assert.Equal(t, 3, len(grouped["false"])) - } func TestReplaceComplex128(t *testing.T) { + m := objx.Map{"data": []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} + rawArr := m.Get("data").MustComplex128Slice() - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - rawArr := v.MustComplex128Slice() - - replaced := v.ReplaceComplex128(func(index int, val complex128) complex128 { + replaced := m.Get("data").ReplaceComplex128(func(index int, val complex128) complex128 { if index < len(rawArr)-1 { return rawArr[index+1] } return rawArr[0] }) - replacedArr := replaced.MustComplex128Slice() + if assert.Equal(t, 6, len(replacedArr)) { assert.Equal(t, replacedArr[0], rawArr[1]) assert.Equal(t, replacedArr[1], rawArr[2]) @@ -2843,18 +2166,16 @@ func TestReplaceComplex128(t *testing.T) { assert.Equal(t, replacedArr[4], rawArr[5]) assert.Equal(t, replacedArr[5], rawArr[0]) } - } func TestCollectComplex128(t *testing.T) { + m := objx.Map{"data": []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - v := &Value{data: []complex128{complex128(1), complex128(1), complex128(1), complex128(1), complex128(1), complex128(1)}} - - collected := v.CollectComplex128(func(index int, val complex128) interface{} { + collected := m.Get("data").CollectComplex128(func(index int, val complex128) interface{} { return index }) - collectedArr := collected.MustInterSlice() + if assert.Equal(t, 6, len(collectedArr)) { assert.Equal(t, collectedArr[0], 0) assert.Equal(t, collectedArr[1], 1) @@ -2863,5 +2184,4 @@ func TestCollectComplex128(t *testing.T) { assert.Equal(t, collectedArr[4], 4) assert.Equal(t, collectedArr[5], 5) } - } diff --git a/vendor/github.com/stretchr/objx/type_specific_test.go b/vendor/github.com/stretchr/objx/type_specific_test.go new file mode 100644 index 000000000000..7b66847565aa --- /dev/null +++ b/vendor/github.com/stretchr/objx/type_specific_test.go @@ -0,0 +1,459 @@ +package objx_test + +import ( + "fmt" + "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" +) + +/* + Tests for MSI (map[string]interface{} and []map[string]interface{}) +*/ +func TestMSI(t *testing.T) { + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := objx.Map{"value": val, "nothing": nil} + mVal := map[string]interface{}{"value": val, "nothing": nil} + + assert.Equal(t, mVal, m.Value().MSI()) + assert.Equal(t, val, m.Get("value").MSI()) + assert.Equal(t, mVal, m.Value().MustMSI()) + assert.Equal(t, val, m.Get("value").MustMSI()) + assert.Equal(t, map[string]interface{}(nil), m.Get("nothing").MSI()) + assert.Equal(t, val, m.Get("nothing").MSI(map[string]interface{}{"name": "Tyler"})) + assert.Panics(t, func() { + m.Get("age").MustMSI() + }) +} + +func TestMSISlice(t *testing.T) { + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := objx.Map{ + "value": []map[string]interface{}{val}, + "value2": []objx.Map{val}, + "value3": []interface{}{val}, + "nothing": nil, + } + + assert.Equal(t, val, m.Get("value").MSISlice()[0]) + assert.Equal(t, val, m.Get("value2").MSISlice()[0]) + assert.Equal(t, val, m.Get("value3").MSISlice()[0]) + assert.Equal(t, val, m.Get("value").MustMSISlice()[0]) + assert.Equal(t, val, m.Get("value2").MustMSISlice()[0]) + assert.Equal(t, val, m.Get("value3").MustMSISlice()[0]) + assert.Equal(t, []map[string]interface{}(nil), m.Get("nothing").MSISlice()) + assert.Equal(t, val, m.Get("nothing").MSISlice([]map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"})})[0]) + assert.Panics(t, func() { + m.Get("nothing").MustMSISlice() + }) + + o := objx.MustFromJSON(`{"d":[{"author":{"displayName":"DemoUser3","id":2},"classes":null,"id":9879,"v":{"code":"","created":"2013-09-19T09:38:50+02:00","published":"0001-01-01T00:00:00Z","updated":"2013-09-19T09:38:50+02:00"}}],"s":200}`) + assert.Equal(t, 9879, o.Get("d").MustMSISlice()[0]["id"]) + assert.Equal(t, 1, len(o.Get("d").MSISlice())) + + i := objx.MustFromJSON(`{"d":[{"author":"abc"},[1]]}`) + assert.Nil(t, i.Get("d").MSISlice()) +} + +func TestIsMSI(t *testing.T) { + m := objx.Map{"data": map[string]interface{}(map[string]interface{}{"name": "Tyler"})} + + assert.True(t, m.Get("data").IsMSI()) + assert.True(t, m.Value().IsMSI()) +} + +func TestIsMSISlice(t *testing.T) { + val := map[string]interface{}(map[string]interface{}{"name": "Tyler"}) + m := objx.Map{"data": []map[string]interface{}{val}, "data2": []objx.Map{val}} + + assert.True(t, m.Get("data").IsMSISlice()) + assert.True(t, m.Get("data2").IsMSISlice()) + + o := objx.MustFromJSON(`{"d":[{"author":{"displayName":"DemoUser3","id":2},"classes":null,"id":9879,"v":{"code":"","created":"2013-09-19T09:38:50+02:00","published":"0001-01-01T00:00:00Z","updated":"2013-09-19T09:38:50+02:00"}}],"s":200}`) + assert.True(t, o.Has("d")) + assert.True(t, o.Get("d").IsMSISlice()) + + o = objx.MustFromJSON(`{"d":[{"author":"abc"},[1]]}`) + assert.True(t, o.Has("d")) + assert.False(t, o.Get("d").IsMSISlice()) +} + +func TestEachMSI(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + count := 0 + replacedVals := make([]map[string]interface{}, 0) + assert.Equal(t, m.Get("data"), m.Get("data").EachMSI(func(i int, val map[string]interface{}) bool { + count++ + replacedVals = append(replacedVals, val) + + // abort early + return i != 2 + })) + + m2 := objx.Map{"data": []objx.Map{{"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}}} + assert.Equal(t, m2.Get("data"), m2.Get("data").EachMSI(func(i int, val map[string]interface{}) bool { + count++ + replacedVals = append(replacedVals, val) + + // abort early + return i != 2 + })) + + assert.Equal(t, count, 6) + assert.Equal(t, replacedVals[0], m.Get("data").MustMSISlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustMSISlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustMSISlice()[2]) + assert.Equal(t, replacedVals[3], m2.Get("data").MustMSISlice()[0]) + assert.Equal(t, replacedVals[4], m2.Get("data").MustMSISlice()[1]) + assert.Equal(t, replacedVals[5], m2.Get("data").MustMSISlice()[2]) +} + +func TestWhereMSI(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := m.Get("data").WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 3, len(selected)) +} + +func TestWhereMSI2(t *testing.T) { + m := objx.Map{"data": []objx.Map{{"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}}} + + selected := m.Get("data").WhereMSI(func(i int, val map[string]interface{}) bool { + return i%2 == 0 + }).MustMSISlice() + + assert.Equal(t, 2, len(selected)) +} + +func TestGroupMSI(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := m.Get("data").GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).Data().(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) +} + +func TestGroupMSI2(t *testing.T) { + m := objx.Map{"data": []objx.Map{{"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}}} + + grouped := m.Get("data").GroupMSI(func(i int, val map[string]interface{}) string { + return fmt.Sprintf("%v", i%2 == 0) + }).Data().(map[string][]map[string]interface{}) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 2, len(grouped["false"])) +} + +func TestReplaceMSI(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + rawArr := m.Get("data").MustMSISlice() + + replaced := m.Get("data").ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + replacedArr := replaced.MustMSISlice() + + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } +} + +func TestReplaceMSI2(t *testing.T) { + m := objx.Map{"data": []objx.Map{{"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}}} + rawArr := m.Get("data").MustMSISlice() + + replaced := m.Get("data").ReplaceMSI(func(index int, val map[string]interface{}) map[string]interface{} { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + replacedArr := replaced.MustMSISlice() + + if assert.Equal(t, 5, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[0]) + } +} + +func TestCollectMSI(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := m.Get("data").CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + collectedArr := collected.MustInterSlice() + + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } +} + +func TestCollectMSI2(t *testing.T) { + m := objx.Map{"data": []objx.Map{{"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}, {"name": "Taylor"}}} + + collected := m.Get("data").CollectMSI(func(index int, val map[string]interface{}) interface{} { + return index + }) + collectedArr := collected.MustInterSlice() + + if assert.Equal(t, 5, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + } +} + +/* + Tests for ObjxMap ((objx.Map) and [](objx.Map)) +*/ +func TestObjxMap(t *testing.T) { + val := (objx.Map)(objx.New(1)) + m := objx.Map{"value": val, "value2": map[string]interface{}{"name": "Taylor"}, "nothing": nil} + valMSI := objx.Map{"name": "Taylor"} + + assert.Equal(t, val, m.Get("value").ObjxMap()) + assert.Equal(t, valMSI, m.Get("value2").ObjxMap()) + assert.Equal(t, val, m.Get("value").MustObjxMap()) + assert.Equal(t, valMSI, m.Get("value2").MustObjxMap()) + assert.Equal(t, (objx.Map)(objx.New(nil)), m.Get("nothing").ObjxMap()) + assert.Equal(t, val, m.Get("nothing").ObjxMap(objx.New(1))) + assert.Panics(t, func() { + m.Get("age").MustObjxMap() + }) +} + +func TestObjxMapSlice(t *testing.T) { + val := (objx.Map)(objx.New(1)) + m := objx.Map{ + "value": [](objx.Map){val}, + "value2": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Taylor"})}, + "value3": []interface{}{val}, + "value4": []interface{}{map[string]interface{}(map[string]interface{}{"name": "Taylor"})}, + "nothing": nil, + } + valMSI := objx.Map{"name": "Taylor"} + + assert.Equal(t, val, m.Get("value").ObjxMapSlice()[0]) + assert.Equal(t, valMSI, m.Get("value2").ObjxMapSlice()[0]) + assert.Equal(t, val, m.Get("value3").ObjxMapSlice()[0]) + assert.Equal(t, valMSI, m.Get("value4").ObjxMapSlice()[0]) + assert.Equal(t, val, m.Get("value").MustObjxMapSlice()[0]) + assert.Equal(t, valMSI, m.Get("value2").MustObjxMapSlice()[0]) + assert.Equal(t, val, m.Get("value3").MustObjxMapSlice()[0]) + assert.Equal(t, valMSI, m.Get("value4").MustObjxMapSlice()[0]) + assert.Equal(t, [](objx.Map)(nil), m.Get("nothing").ObjxMapSlice()) + assert.Equal(t, val, m.Get("nothing").ObjxMapSlice([](objx.Map){(objx.Map)(objx.New(1))})[0]) + assert.Panics(t, func() { + m.Get("nothing").MustObjxMapSlice() + }) + + o := objx.MustFromJSON(`{"d":[{"author":{"displayName":"DemoUser3","id":2},"classes":null,"id":9879,"v":{"code":"","created":"2013-09-19T09:38:50+02:00","published":"0001-01-01T00:00:00Z","updated":"2013-09-19T09:38:50+02:00"}}],"s":200}`) + assert.Equal(t, 9879, o.Get("d").MustObjxMapSlice()[0].Get("id").Int()) + assert.Equal(t, 1, len(o.Get("d").ObjxMapSlice())) + + i := objx.MustFromJSON(`{"d":[{"author":"abc"},[1]]}`) + assert.Nil(t, i.Get("d").ObjxMapSlice()) +} + +func TestIsObjxMap(t *testing.T) { + m := objx.Map{"data": (objx.Map)(objx.New(1)), "data2": map[string]interface{}{"name": "Taylor"}} + + assert.True(t, m.Get("data").IsObjxMap()) + assert.True(t, m.Get("data2").IsObjxMap()) +} + +func TestIsObjxMapSlice(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1))}, "data2": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Taylor"})}} + + assert.True(t, m.Get("data").IsObjxMapSlice()) + assert.True(t, m.Get("data2").IsObjxMapSlice()) + + o := objx.MustFromJSON(`{"d":[{"author":{"displayName":"DemoUser3","id":2},"classes":null,"id":9879,"v":{"code":"","created":"2013-09-19T09:38:50+02:00","published":"0001-01-01T00:00:00Z","updated":"2013-09-19T09:38:50+02:00"}}],"s":200}`) + assert.True(t, o.Has("d")) + assert.True(t, o.Get("d").IsObjxMapSlice()) + + //Valid json but not MSI slice + o = objx.MustFromJSON(`{"d":[{"author":"abc"},[1]]}`) + assert.True(t, o.Has("d")) + assert.False(t, o.Get("d").IsObjxMapSlice()) +} + +func TestEachObjxMap(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1))}} + count := 0 + replacedVals := make([](objx.Map), 0) + assert.Equal(t, m.Get("data"), m.Get("data").EachObjxMap(func(i int, val objx.Map) bool { + count++ + replacedVals = append(replacedVals, val) + + // abort early + return i != 2 + })) + + m2 := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + assert.Equal(t, m2.Get("data"), m2.Get("data").EachObjxMap(func(i int, val objx.Map) bool { + count++ + replacedVals = append(replacedVals, val) + + // abort early + return i != 2 + })) + + assert.Equal(t, count, 6) + assert.Equal(t, replacedVals[0], m.Get("data").MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[1], m.Get("data").MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[2], m.Get("data").MustObjxMapSlice()[2]) + assert.Equal(t, replacedVals[3], m2.Get("data").MustObjxMapSlice()[0]) + assert.Equal(t, replacedVals[4], m2.Get("data").MustObjxMapSlice()[1]) + assert.Equal(t, replacedVals[5], m2.Get("data").MustObjxMapSlice()[2]) +} + +func TestWhereObjxMap(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1))}} + + selected := m.Get("data").WhereObjxMap(func(i int, val objx.Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 3, len(selected)) +} + +func TestWhereObjxMap2(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + selected := m.Get("data").WhereObjxMap(func(i int, val objx.Map) bool { + return i%2 == 0 + }).MustObjxMapSlice() + + assert.Equal(t, 2, len(selected)) +} + +func TestGroupObjxMap(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1))}} + + grouped := m.Get("data").GroupObjxMap(func(i int, val objx.Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).Data().(map[string][](objx.Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 3, len(grouped["false"])) +} + +func TestGroupObjxMap2(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + grouped := m.Get("data").GroupObjxMap(func(i int, val objx.Map) string { + return fmt.Sprintf("%v", i%2 == 0) + }).Data().(map[string][](objx.Map)) + + assert.Equal(t, 2, len(grouped)) + assert.Equal(t, 3, len(grouped["true"])) + assert.Equal(t, 2, len(grouped["false"])) +} + +func TestReplaceObjxMap(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1))}} + rawArr := m.Get("data").MustObjxMapSlice() + + replaced := m.Get("data").ReplaceObjxMap(func(index int, val objx.Map) objx.Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + replacedArr := replaced.MustObjxMapSlice() + + if assert.Equal(t, 6, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[5]) + assert.Equal(t, replacedArr[5], rawArr[0]) + } +} + +func TestReplaceObjxMap2(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + rawArr := m.Get("data").MustObjxMapSlice() + + replaced := m.Get("data").ReplaceObjxMap(func(index int, val objx.Map) objx.Map { + if index < len(rawArr)-1 { + return rawArr[index+1] + } + return rawArr[0] + }) + replacedArr := replaced.MustObjxMapSlice() + + if assert.Equal(t, 5, len(replacedArr)) { + assert.Equal(t, replacedArr[0], rawArr[1]) + assert.Equal(t, replacedArr[1], rawArr[2]) + assert.Equal(t, replacedArr[2], rawArr[3]) + assert.Equal(t, replacedArr[3], rawArr[4]) + assert.Equal(t, replacedArr[4], rawArr[0]) + } +} + +func TestCollectObjxMap(t *testing.T) { + m := objx.Map{"data": [](objx.Map){(objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1)), (objx.Map)(objx.New(1))}} + + collected := m.Get("data").CollectObjxMap(func(index int, val objx.Map) interface{} { + return index + }) + collectedArr := collected.MustInterSlice() + + if assert.Equal(t, 6, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + assert.Equal(t, collectedArr[5], 5) + } +} + +func TestCollectObjxMap2(t *testing.T) { + m := objx.Map{"data": []map[string]interface{}{map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"}), map[string]interface{}(map[string]interface{}{"name": "Tyler"})}} + + collected := m.Get("data").CollectObjxMap(func(index int, val objx.Map) interface{} { + return index + }) + collectedArr := collected.MustInterSlice() + + if assert.Equal(t, 5, len(collectedArr)) { + assert.Equal(t, collectedArr[0], 0) + assert.Equal(t, collectedArr[1], 1) + assert.Equal(t, collectedArr[2], 2) + assert.Equal(t, collectedArr[3], 3) + assert.Equal(t, collectedArr[4], 4) + } +} diff --git a/vendor/github.com/stretchr/objx/value.go b/vendor/github.com/stretchr/objx/value.go index 956a2211d4d5..4e5f9b77e69c 100644 --- a/vendor/github.com/stretchr/objx/value.go +++ b/vendor/github.com/stretchr/objx/value.go @@ -20,6 +20,8 @@ func (v *Value) Data() interface{} { // String returns the value always as a string func (v *Value) String() string { switch { + case v.IsNil(): + return "" case v.IsStr(): return v.Str() case v.IsBool(): @@ -30,8 +32,6 @@ func (v *Value) String() string { return strconv.FormatFloat(v.Float64(), 'f', -1, 64) case v.IsInt(): return strconv.FormatInt(int64(v.Int()), 10) - case v.IsInt(): - return strconv.FormatInt(int64(v.Int()), 10) case v.IsInt8(): return strconv.FormatInt(int64(v.Int8()), 10) case v.IsInt16(): @@ -51,6 +51,109 @@ func (v *Value) String() string { case v.IsUint64(): return strconv.FormatUint(v.Uint64(), 10) } - return fmt.Sprintf("%#v", v.Data()) } + +// StringSlice returns the value always as a []string +func (v *Value) StringSlice(optionalDefault ...[]string) []string { + switch { + case v.IsStrSlice(): + return v.MustStrSlice() + case v.IsBoolSlice(): + slice := v.MustBoolSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatBool(iv) + } + return vals + case v.IsFloat32Slice(): + slice := v.MustFloat32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(float64(iv), 'f', -1, 32) + } + return vals + case v.IsFloat64Slice(): + slice := v.MustFloat64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatFloat(iv, 'f', -1, 64) + } + return vals + case v.IsIntSlice(): + slice := v.MustIntSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt8Slice(): + slice := v.MustInt8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt16Slice(): + slice := v.MustInt16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt32Slice(): + slice := v.MustInt32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(int64(iv), 10) + } + return vals + case v.IsInt64Slice(): + slice := v.MustInt64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatInt(iv, 10) + } + return vals + case v.IsUintSlice(): + slice := v.MustUintSlice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint8Slice(): + slice := v.MustUint8Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint16Slice(): + slice := v.MustUint16Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint32Slice(): + slice := v.MustUint32Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(uint64(iv), 10) + } + return vals + case v.IsUint64Slice(): + slice := v.MustUint64Slice() + vals := make([]string, len(slice)) + for i, iv := range slice { + vals[i] = strconv.FormatUint(iv, 10) + } + return vals + } + if len(optionalDefault) == 1 { + return optionalDefault[0] + } + + return []string{} +} diff --git a/vendor/github.com/stretchr/objx/value_test.go b/vendor/github.com/stretchr/objx/value_test.go index 521405801be4..898a59af7d86 100644 --- a/vendor/github.com/stretchr/objx/value_test.go +++ b/vendor/github.com/stretchr/objx/value_test.go @@ -1,28 +1,36 @@ -package objx +package objx_test import ( - "github.com/stretchr/testify/assert" "testing" + + "github.com/stretchr/objx" + "github.com/stretchr/testify/assert" ) func TestStringTypeString(t *testing.T) { - m := New(map[string]interface{}{"string": "foo"}) + m := objx.Map{ + "string": "foo", + } + assert.Equal(t, "foo", m.Get("string").String()) } func TestStringTypeBool(t *testing.T) { - m := New(map[string]interface{}{"bool": true}) + m := objx.Map{ + "bool": true, + } + assert.Equal(t, "true", m.Get("bool").String()) } func TestStringTypeInt(t *testing.T) { - m := New(map[string]interface{}{ + m := objx.Map{ "int": int(1), "int8": int8(8), "int16": int16(16), "int32": int32(32), "int64": int64(64), - }) + } assert.Equal(t, "1", m.Get("int").String()) assert.Equal(t, "8", m.Get("int8").String()) @@ -32,13 +40,13 @@ func TestStringTypeInt(t *testing.T) { } func TestStringTypeUint(t *testing.T) { - m := New(map[string]interface{}{ + m := objx.Map{ "uint": uint(1), "uint8": uint8(8), "uint16": uint16(16), "uint32": uint32(32), "uint64": uint64(64), - }) + } assert.Equal(t, "1", m.Get("uint").String()) assert.Equal(t, "8", m.Get("uint8").String()) @@ -48,19 +56,88 @@ func TestStringTypeUint(t *testing.T) { } func TestStringTypeFloat(t *testing.T) { - m := New(map[string]interface{}{ + m := objx.Map{ "float32": float32(32.32), "float64": float64(64.64), - }) + } assert.Equal(t, "32.32", m.Get("float32").String()) assert.Equal(t, "64.64", m.Get("float64").String()) } func TestStringTypeOther(t *testing.T) { - m := New(map[string]interface{}{ - "other": []string{"foo", "bar"}, - }) + m := objx.Map{ + "other": []string{"foo", "bar"}, + "nilValue": nil, + } assert.Equal(t, "[]string{\"foo\", \"bar\"}", m.Get("other").String()) + assert.Equal(t, "", m.Get("nilValue").String()) +} + +func TestStringSliceTypeString(t *testing.T) { + m := objx.Map{ + "string": []string{"foo", "bar"}, + } + + assert.Equal(t, []string{"foo", "bar"}, m.Get("string").StringSlice()) +} + +func TestStringSliceTypeBool(t *testing.T) { + m := objx.Map{ + "bool": []bool{true, false}, + } + + assert.Equal(t, []string{"true", "false"}, m.Get("bool").StringSlice()) +} + +func TestStringSliceTypeInt(t *testing.T) { + m := objx.Map{ + "int": []int{1, 2}, + "int8": []int8{8, 9}, + "int16": []int16{16, 17}, + "int32": []int32{32, 33}, + "int64": []int64{64, 65}, + } + + assert.Equal(t, []string{"1", "2"}, m.Get("int").StringSlice()) + assert.Equal(t, []string{"8", "9"}, m.Get("int8").StringSlice()) + assert.Equal(t, []string{"16", "17"}, m.Get("int16").StringSlice()) + assert.Equal(t, []string{"32", "33"}, m.Get("int32").StringSlice()) + assert.Equal(t, []string{"64", "65"}, m.Get("int64").StringSlice()) +} + +func TestStringSliceTypeUint(t *testing.T) { + m := objx.Map{ + "uint": []uint{1, 2}, + "uint8": []uint8{8, 9}, + "uint16": []uint16{16, 17}, + "uint32": []uint32{32, 33}, + "uint64": []uint64{64, 65}, + } + + assert.Equal(t, []string{"1", "2"}, m.Get("uint").StringSlice()) + assert.Equal(t, []string{"8", "9"}, m.Get("uint8").StringSlice()) + assert.Equal(t, []string{"16", "17"}, m.Get("uint16").StringSlice()) + assert.Equal(t, []string{"32", "33"}, m.Get("uint32").StringSlice()) + assert.Equal(t, []string{"64", "65"}, m.Get("uint64").StringSlice()) +} + +func TestStringSliceTypeFloat(t *testing.T) { + m := objx.Map{ + "float32": []float32{32.32, 33.33}, + "float64": []float64{64.64, 65.65}, + } + + assert.Equal(t, []string{"32.32", "33.33"}, m.Get("float32").StringSlice()) + assert.Equal(t, []string{"64.64", "65.65"}, m.Get("float64").StringSlice()) +} + +func TestStringSliceTypeOther(t *testing.T) { + m := objx.Map{ + "other": "foo", + } + + assert.Equal(t, []string{}, m.Get("other").StringSlice()) + assert.Equal(t, []string{"bar"}, m.Get("other").StringSlice([]string{"bar"})) } diff --git a/vendor/k8s.io/kubernetes/CHANGELOG-1.16.md b/vendor/k8s.io/kubernetes/CHANGELOG-1.16.md index 86265a898cff..22cd57cd5cb2 100644 --- a/vendor/k8s.io/kubernetes/CHANGELOG-1.16.md +++ b/vendor/k8s.io/kubernetes/CHANGELOG-1.16.md @@ -1,55 +1,970 @@ -- [v1.16.0-rc.1](#v1160-rc1) - - [Downloads for v1.16.0-rc.1](#downloads-for-v1160-rc1) +- [v1.16.1](#v1161) + - [Downloads for v1.16.1](#downloads-for-v1161) - [Client Binaries](#client-binaries) - [Server Binaries](#server-binaries) - [Node Binaries](#node-binaries) - - [Changelog since v1.16.0-beta.2](#changelog-since-v1160-beta2) + - [Changelog since v1.16.0](#changelog-since-v1160) - [Other notable changes](#other-notable-changes) -- [v1.16.0-beta.2](#v1160-beta2) - - [Downloads for v1.16.0-beta.2](#downloads-for-v1160-beta2) +- [v1.16.0](#v1160) + - [Downloads for v1.16.0](#downloads-for-v1160) - [Client Binaries](#client-binaries-1) - [Server Binaries](#server-binaries-1) - [Node Binaries](#node-binaries-1) - - [Changelog since v1.16.0-beta.1](#changelog-since-v1160-beta1) - - [Other notable changes](#other-notable-changes-1) -- [v1.16.0-beta.1](#v1160-beta1) - - [Downloads for v1.16.0-beta.1](#downloads-for-v1160-beta1) +- [Kubernetes v1.16.0 Release Notes](#kubernetes-v1160-release-notes) + - [What’s New (Major Themes)](#whats-new-major-themes) + - [Additional Notable Feature Updates](#additional-notable-feature-updates) + - [Known Issues](#known-issues) + - [Urgent Upgrade Notes](#urgent-upgrade-notes) + - [(No, really, you MUST read this before you upgrade)](#no-really-you-must-read-this-before-you-upgrade) + - [Cluster Lifecycle](#cluster-lifecycle) + - [Storage](#storage) + - [Deprecations and Removals](#deprecations-and-removals) + - [Metrics Changes](#metrics-changes) + - [Added metrics](#added-metrics) + - [Removed metrics](#removed-metrics) + - [Deprecated/changed metrics](#deprecatedchanged-metrics) + - [Notable Features](#notable-features) + - [Beta](#beta) + - [Alpha](#alpha) + - [CLI Improvements](#cli-improvements) + - [Misc](#misc) + - [API Changes](#api-changes) + - [Other notable changes](#other-notable-changes-1) + - [API Machinery](#api-machinery) + - [Apps](#apps) + - [Auth](#auth) + - [CLI](#cli) + - [Cloud Provider](#cloud-provider) + - [Cluster Lifecycle](#cluster-lifecycle-1) + - [Instrumentation](#instrumentation) + - [Network](#network) + - [Node](#node) + - [Scheduling](#scheduling) + - [Storage](#storage-1) + - [Testing](#testing) + - [Windows](#windows) + - [Dependencies](#dependencies) + - [Changed](#changed) + - [Unchanged](#unchanged) + - [Removed](#removed) + - [Detailed go Dependency Changes](#detailed-go-dependency-changes) + - [Added](#added) + - [Changed](#changed-1) + - [Removed](#removed-1) +- [v1.16.0-rc.2](#v1160-rc2) + - [Downloads for v1.16.0-rc.2](#downloads-for-v1160-rc2) - [Client Binaries](#client-binaries-2) - [Server Binaries](#server-binaries-2) - [Node Binaries](#node-binaries-2) - - [Changelog since v1.16.0-alpha.3](#changelog-since-v1160-alpha3) - - [Action Required](#action-required) + - [Changelog since v1.16.0-rc.1](#changelog-since-v1160-rc1) - [Other notable changes](#other-notable-changes-2) -- [v1.16.0-alpha.3](#v1160-alpha3) - - [Downloads for v1.16.0-alpha.3](#downloads-for-v1160-alpha3) +- [v1.16.0-rc.1](#v1160-rc1) + - [Downloads for v1.16.0-rc.1](#downloads-for-v1160-rc1) - [Client Binaries](#client-binaries-3) - [Server Binaries](#server-binaries-3) - [Node Binaries](#node-binaries-3) - - [Changelog since v1.16.0-alpha.2](#changelog-since-v1160-alpha2) - - [Action Required](#action-required-1) + - [Changelog since v1.16.0-beta.2](#changelog-since-v1160-beta2) - [Other notable changes](#other-notable-changes-3) -- [v1.16.0-alpha.2](#v1160-alpha2) - - [Downloads for v1.16.0-alpha.2](#downloads-for-v1160-alpha2) +- [v1.16.0-beta.2](#v1160-beta2) + - [Downloads for v1.16.0-beta.2](#downloads-for-v1160-beta2) - [Client Binaries](#client-binaries-4) - [Server Binaries](#server-binaries-4) - [Node Binaries](#node-binaries-4) - - [Changelog since v1.16.0-alpha.1](#changelog-since-v1160-alpha1) - - [Action Required](#action-required-2) + - [Changelog since v1.16.0-beta.1](#changelog-since-v1160-beta1) - [Other notable changes](#other-notable-changes-4) -- [v1.16.0-alpha.1](#v1160-alpha1) - - [Downloads for v1.16.0-alpha.1](#downloads-for-v1160-alpha1) +- [v1.16.0-beta.1](#v1160-beta1) + - [Downloads for v1.16.0-beta.1](#downloads-for-v1160-beta1) - [Client Binaries](#client-binaries-5) - [Server Binaries](#server-binaries-5) - [Node Binaries](#node-binaries-5) + - [Changelog since v1.16.0-alpha.3](#changelog-since-v1160-alpha3) + - [Action Required](#action-required) + - [Other notable changes](#other-notable-changes-5) +- [v1.16.0-alpha.3](#v1160-alpha3) + - [Downloads for v1.16.0-alpha.3](#downloads-for-v1160-alpha3) + - [Client Binaries](#client-binaries-6) + - [Server Binaries](#server-binaries-6) + - [Node Binaries](#node-binaries-6) + - [Changelog since v1.16.0-alpha.2](#changelog-since-v1160-alpha2) + - [Action Required](#action-required-1) + - [Other notable changes](#other-notable-changes-6) +- [v1.16.0-alpha.2](#v1160-alpha2) + - [Downloads for v1.16.0-alpha.2](#downloads-for-v1160-alpha2) + - [Client Binaries](#client-binaries-7) + - [Server Binaries](#server-binaries-7) + - [Node Binaries](#node-binaries-7) + - [Changelog since v1.16.0-alpha.1](#changelog-since-v1160-alpha1) + - [Action Required](#action-required-2) + - [Other notable changes](#other-notable-changes-7) +- [v1.16.0-alpha.1](#v1160-alpha1) + - [Downloads for v1.16.0-alpha.1](#downloads-for-v1160-alpha1) + - [Client Binaries](#client-binaries-8) + - [Server Binaries](#server-binaries-8) + - [Node Binaries](#node-binaries-8) - [Changelog since v1.15.0](#changelog-since-v1150) - [Action Required](#action-required-3) - - [Other notable changes](#other-notable-changes-5) + - [Other notable changes](#other-notable-changes-8) +# v1.16.1 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.16.1 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes.tar.gz) | `3dc4f0f2a208d3f235b4dc579418c503ba64fec261294c3683268259117e5c82f56db3f0c4ad63199f0f35f0de21df0b13ca00a74efe7b3142b275f0733e9219` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-src.tar.gz) | `2d79b1e4e43c57c0e24da07f1312d0e49f8c43ebba0231dd5d628a52c25e929fed34c3d34b2ab739c0348c2f42c88feea8f1a4c2d821ace170f451bf74903ae3` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-darwin-386.tar.gz) | `f7139df5f06f3fc9d1944058e7ba40a482621d6a169bc2f09a50dfa8f903866706051cded14fbd39d8d03c576f9e5219b0887f029ee1cac45919dc5ff232cdcd` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-darwin-amd64.tar.gz) | `c4d458a0c3ef2dc9e4527daecf6b34da065d1ae7fd245f8b8aceca43467f03073f538e65e504f9800d3db0c64d88fc41f687b493be4d9a8283f39dd001a86a7d` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-386.tar.gz) | `82d79bc833ae5317c4a1d0d397ccaa9aef616e0c2e5ff2166da8c66a4f55851f4bfa3b57708f75dc12eabefdee35b08d8f7bee74cfaf38a0339ee927f90e1655` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-amd64.tar.gz) | `e355a74a17d96785b0b217673e67fa0f02daa1939f10d410602ac0a0d061a4db71d727b67f75aa886007dab95dd5c7f8cc38253d291dc4d2504ce673df69fb32` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-arm.tar.gz) | `b9fba5995a7a02d001bead0305ae2a0c8ee7d2a98069aad6e57612988e298972cb6f69267afb79df292195958871c56a2cc5ec7a8e180a8d56c428f82bbbece8` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-arm64.tar.gz) | `64917bd6c4b277f4cb676b2e3d69cb12148bc90e2b04011d11985e492d525646fd13021b74f93914d37849fba87c07c513fafc1bd07edc7d3e4415fe8bd9ea37` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-ppc64le.tar.gz) | `258cc1b13f3ef90c52d401190f4d59c8c0d10367a6d960c6273522a15b879de2df4d141461d80338144e83995422da802a417a55911d7a4d8aeeec5aecd2adec` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-linux-s390x.tar.gz) | `adaa34bde363a030f099a96417e257688e6280d86bdd3e04f7c06b6bccce639f599d494b5181a78e91c7f50207c53fa1a04b3b2fbd65be91512e51441e66a2e8` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-windows-386.tar.gz) | `3ed99240a99a2d9f4193264f14168896a2418f296e5a2372e4e6aac44b65cd6a531ceec71d24333a7c825ba02acec63fdb51b97aa9fb4665048e929b7e4b774d` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-client-windows-amd64.tar.gz) | `cbf8363147e44bfadee4da9ced9ab7bfc3c88ba7dcfa3f64803376efa5adc6dbb38519f73253aa9ea8b12bb42a92f5ea78e8b63fb082e4ace04d3ee3e285f549` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-server-linux-amd64.tar.gz) | `c43795fc2bfaee240f1e32e0b523cbbc8a888a40efa85371e682433d6f60dd1f3225ca34484fc9db54e68e250acbb5c2c388071bcd033fed0d8a47d044708b0b` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-server-linux-arm.tar.gz) | `3a6c38db6d6c0950f6d3cd4c158ba9c816dcbf1b1959a74d55ead53b1df2cf9131f56d5b0f5720f9c2000085ada1fd4d48439ea20f4ba35764e1a54f94848697` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-server-linux-arm64.tar.gz) | `3cbde62bb5ef0af033d643cea800e891c199f9d6e9f0a3e1131bebe6759fd72b2c8d1f86d92ab5d1c352297b57320b47c70a6422768656b2c44c4ef4b7e3c721` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-server-linux-ppc64le.tar.gz) | `a60a25a346de61e705d416b50f8eac582a36faa622c6c45d4e3af92c46f48409b4c2a9bcbfd9297cc77d747cf1a96721146a24a48500a55b806c6fc0d9008e21` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-server-linux-s390x.tar.gz) | `1ee9bbb36245825604d7cf4c9bdb2e43d0d755958413809ae7d3c9448ada567c0ab510f68318055dd71ec73d2ccce5b8d23faa698fd42772e3bd3c9592006d80` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-linux-amd64.tar.gz) | `4ef686de6aa5a2bdcb8034b3def008995fc4474f34f939620c657ae115c61403c8d5698c9af68d3fe7015beaa8d578edc9f1a62b08dea92d99a3c73c9fe9e7a6` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-linux-arm.tar.gz) | `58607b1cf68e8420bacc8dc3f95b19b7f90fe4fda1255814c76e66f67638b8db150eb464f85e1b625e13a74b0d86a973e54da87e2fb41985eff14b25a7761b1d` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-linux-arm64.tar.gz) | `f02417299b5b6f3ee4b325d4a23b0ca19ee3c10dbe871a716470e68a3e89f4b84232eac1048dc9dc88a00ed069a8fd3b4f7817664e296e8842322379702e67e4` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-linux-ppc64le.tar.gz) | `fa3b44b668d0a341847601a18c463698810457e113459d6f8ddbce6f9305b613c4f61f9831e16d8bab128efdd69524c86981785f9703e248eff11f95e9330e74` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-linux-s390x.tar.gz) | `afe166d9245bd35fbc21763b9ef24bf174396c0611f75ddf93f1e4f3fb3e1cf1d4e7998d963e26fe3761bf4984ea33a5d6656035092c9b0d35e94f97882a5595` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.1/kubernetes-node-windows-amd64.tar.gz) | `93ebe21c147dcb38cb1ee222975f35f7c8c8175255cfce09381c8852734d054def58bc630cf868d99baa7f90bd2c201ccaa42dbae3ef360c3135c825ec2c74b1` + +## Changelog since v1.16.0 + +### Other notable changes + +* Limit the body length of exec readiness/liveness probes. remote CRIs and Docker shim read a max of 16MB output of which the exec probe itself inspects 10kb. ([#82514](https://github.com/kubernetes/kubernetes/pull/82514), [@dims](https://github.com/dims)) +* Fix possible fd leak and closing of dirs when using openstack ([#82873](https://github.com/kubernetes/kubernetes/pull/82873), [@odinuge](https://github.com/odinuge)) +* Update to go 1.12.10 ([#83139](https://github.com/kubernetes/kubernetes/pull/83139), [@cblecker](https://github.com/cblecker)) +* Use ipv4 in wincat port forward. ([#83036](https://github.com/kubernetes/kubernetes/pull/83036), [@liyanhui1228](https://github.com/liyanhui1228)) +* Fixes a panic in kube-controller-manager cleaning up bootstrap tokens ([#82887](https://github.com/kubernetes/kubernetes/pull/82887), [@tedyu](https://github.com/tedyu)) +* Resolves bottleneck in internal API server communication that can cause increased goroutines and degrade API Server performance ([#80465](https://github.com/kubernetes/kubernetes/pull/80465), [@answer1991](https://github.com/answer1991)) +* Resolves regression generating informers for packages whose names contain `.` characters ([#82410](https://github.com/kubernetes/kubernetes/pull/82410), [@nikhita](https://github.com/nikhita)) +* Fixed a scheduler panic when using PodAffinity. ([#82841](https://github.com/kubernetes/kubernetes/pull/82841), [@Huang-Wei](https://github.com/Huang-Wei)) +* Update Cluster Autoscaler version to 1.16.1 (release notes: https://github.com/kubernetes/autoscaler/releases/tag/cluster-autoscaler-1.16.1) ([#83052](https://github.com/kubernetes/kubernetes/pull/83052), [@losipiuk](https://github.com/losipiuk)) +* Resolves issue with /readyz and /livez not including etcd and kms health checks ([#82713](https://github.com/kubernetes/kubernetes/pull/82713), [@logicalhan](https://github.com/logicalhan)) +* fix: azure disk detach failure if node not exists ([#82640](https://github.com/kubernetes/kubernetes/pull/82640), [@andyzhangx](https://github.com/andyzhangx)) + + + +# v1.16.0 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.16.0 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes.tar.gz) | `99aa74225dd999d112ebc3e7b7d586a2312ec9c99de7a7fef8bbbfb198a5b4cf740baa57ea262995303e2a5060d26397775d928a086acd926042a41ef00f200b` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-src.tar.gz) | `0be7d1d6564385cc20ff4d26bab55b71cc8657cf795429d04caa5db133a6725108d6a116553bf55081ccd854a4078e84d26366022634cdbfffd1a34a10b566cf` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-darwin-386.tar.gz) | `a5fb80d26c2a75741ad0efccdacd5d5869fbc303ae4bb1920a6883ebd93a6b41969f898d177f2602faf23a7462867e1235edeb0ba0675041d0c8d5ab266ec62d` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-darwin-amd64.tar.gz) | `47a9a78fada4b840d9ae4dac2b469a36d0812ac83d22fd798c4cb0f1673fb65c6558383c19a7268ed7101ac9fa32d53d79498407bdf94923f4f8f019ea39e912` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-386.tar.gz) | `916e4dd98f5ed8ee111eeb6c2cf5c5f313e1d98f3531b40a5a777240ddb96b9cc53df101daa077ffff52cf01167fdcc39d38a8655631bac846641308634e127a` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-amd64.tar.gz) | `fccf152588edbaaa21ca94c67408b8754f8bc55e49470380e10cf987be27495a8411d019d807df2b2c1c7620f8535e8f237848c3c1ac3791b91da8df59dea5aa` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-arm.tar.gz) | `066c55fabbe3434604c46574c51c324336a02a5bfaed2e4d83b67012d26bf98354928c9c12758b53ece16b8567e2b5ce6cb88d5cf3008c7baf3c5df02611a610` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-arm64.tar.gz) | `e41be74cc36240a64ecc962a066988b5ef7c3f3112977efd4e307b35dd78688f41d6c5b376a6d1152d843182bbbe75d179de75675548bb846f8c1e28827e0e0c` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-ppc64le.tar.gz) | `08783eb3bb2e35b48dab3481e17d6e345d43bab8b8dee25bb5ff184ba46cb632750d4c38e9982366050aecce6e121c67bb6812dbfd607216acd3a2d19e05f5a1` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-linux-s390x.tar.gz) | `bcb6eb9cd3d8c92dfaf4f102ff2dc7517f632b1e955be6a02e7f223b15fc09c4ca2d6d9cd5b23871168cf6b455e2368daf17025c9cd61bf43d2ea72676db913a` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-windows-386.tar.gz) | `efbc764d8e2889ce13c9eaaa61f685a8714563ddc20464523140d6f5bef0dfd51b745c3bd3fe2093258db242bf9b3207f8e9f451b0484de64f18cdb7162ec30e` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-client-windows-amd64.tar.gz) | `b34bce694c6a0e4c8c5ddabcecb6adcb4d35f8c126b4b5ced7e44ef39cd45982dd9f6483a38e04430846f4da592dc74b475c37da7fe08444ef4eb5efde85e0b2` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-server-linux-amd64.tar.gz) | `a6bdac1eba1b87dc98b2bf5bf3690758960ecb50ed067736459b757fca0c3b01dd01fd215b4c06a653964048c6a81ea80b61ee8c7e4c98241409c091faf0cee1` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-server-linux-arm.tar.gz) | `0560e1e893fe175d74465065d43081ee7f40ba7e7d7cafa53e5d7491f89c61957cf0d3abfa4620cd0f33b6e44911b43184199761005d20b72e3cd2ddc1224f9f` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-server-linux-arm64.tar.gz) | `4d5dd001fa3ac2b28bfee64e85dbedab0706302ffd634c34330617674e7a90e0108710f4248a2145676bd72f0bbc3598ed61e1e739c64147ea00d3b6a4ba4604` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-server-linux-ppc64le.tar.gz) | `cc642fca57e22bf6edd371e61e254b369b760c67fa00cac50e34464470f7eea624953deff800fa1e4f7791fe06791c48dbba3ed47e789297ead889c2aa7b2bbf` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-server-linux-s390x.tar.gz) | `1f480ba6f593a3aa20203e82e9e34ac206e35839fd9135f495c5d154480c57d1118673dcb5a6b112c18025fb4a847f65dc7aac470f01d2f06ad3da6aa63d98a3` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-linux-amd64.tar.gz) | `e987f141bc0a248e99a371ce220403b78678c739a39dad1c1612e63a0bee4525fbca5ee8c2b5e5332a553cc5f63bce9ec95645589298f41fe83e1fd41faa538e` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-linux-arm.tar.gz) | `8b084c1063beda2dd4000e8004634d82e580f05cc300c2ee13ad84bb884987b2c7fd1f033fb2ed46941dfc311249acef06efe5044fb72dc4b6089c66388e1f61` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-linux-arm64.tar.gz) | `365bdf9759e24d22cf507a0a5a507895ed44723496985e6d8f0bd10b03ffe7c78198732ee39873912147f2dd840d2e284118fc6fc1e3876d8f4c2c3a441def0b` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-linux-ppc64le.tar.gz) | `ff54d83dd0fd3c447cdd76cdffd253598f6800045d2b6b91b513849d15b0b602590002e7fe2a55dc25ed5a05787f4973c480126491d24be7c5fce6ce98d0b6b6` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-linux-s390x.tar.gz) | `527cd9bf4bf392c3f097f232264c0f0e096ac410b5211b0f308c9d964f86900f5875012353b0b787efc9104f51ad90880f118efb1da54eba5c7675c1840eae5f` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.0/kubernetes-node-windows-amd64.tar.gz) | `4f76a94c70481dd1d57941f156f395df008835b5d1cc17708945e8f560234dbd426f3cff7586f10fd4c24e14e3dfdce28e90c8ec213c23d6ed726aec94e9b0ff` + +# Kubernetes v1.16.0 Release Notes + +A complete changelog for the release notes is now hosted in a customizable format at [relnotes.k8s.io](https://relnotes.k8s.io/?releaseVersions=1.16.0). Check it out and please give us your feedback! + +## What’s New (Major Themes) + +We’re pleased to announce the delivery of Kubernetes 1.16, our third release of 2019! Kubernetes 1.16 consists of 31 enhancements: 8 enhancements moving to stable, 8 enhancements in beta, and 15 enhancements in alpha. + +The main themes of this release are: + +- **Custom resources:** CRDs are in widespread use as a way to extend Kubernetes to persist and serve new resource types, and have been available in beta since the 1.7 release. The 1.16 release marks the graduation of CRDs to general availability (GA). +- **Admission webhooks:** Admission webhooks are in widespread use as a Kubernetes extensibility mechanism and have been available in beta since the 1.9 release. The 1.16 release marks the graduation of admission webhooks to general availability (GA). +- **Overhauled metrics**: Kubernetes has previously made extensive use of a global metrics registry to register metrics to be exposed. By implementing a metrics registry, metrics are registered in more transparent means. Previously, Kubernetes metrics have been excluded from any kind of stability requirements. +- **Volume Extension**: There are quite a few enhancements in this release that pertain to volumes and volume modifications. Volume resizing support in CSI specs is moving to beta which allows for any CSI spec volume plugin to be resizable. + +### Additional Notable Feature Updates + +- [Topology Manager](https://github.com/kubernetes/enhancements/issues/693), a new Kubelet component, aims to co-ordinate resource assignment decisions to provide optimized resource allocations. +- [IPv4/IPv6 dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack) enables the allocation of both IPv4 and IPv6 addresses to Pods and Services. +- [API Server Network Proxy](https://github.com/kubernetes/enhancements/blob/master/keps/sig-api-machinery/20190226-network-proxy.md) going alpha in 1.16. +- [Extensions](https://github.com/kubernetes/enhancements/blob/master/keps/sig-cloud-provider/20190422-cloud-controller-manager-migration.md) for Cloud Controller Manager Migration. +- Continued deprecation of extensions/v1beta1, apps/v1beta1, and apps/v1beta2 APIs; these extensions will be retired in 1.16! + +## Known Issues + +- The etcd and KMS plugin health checks are not exposed in the new `livez` and `readyz` endpoints. This will be fixed in 1.16.1. +- Systems running `iptables` 1.8.0 or newer should start it in legacy mode. Please note that this affects all versions of Kubernetes and not only v1.16.0. For more detailed information about the issue and how to apply a workaround, please refer to the official documentation +- Generating informers for packages in directories containing dots in their name is broken. This will be fixed in v1.16.1. ([#82860](https://github.com/kubernetes/kubernetes/issues/82860)) +- kube-scheduler won't be able to report scheduling Events if `events.k8s.io/v1beta1` API is disabled. We are targeting the fix for v1.16.2 ([#83203](https://github.com/kubernetes/kubernetes/issues/83203)) + +## Urgent Upgrade Notes + +### (No, really, you MUST read this before you upgrade) + +#### Cluster Lifecycle + +- Container images tar files for `amd64` will now contain the architecture in the RepoTags manifest.json section. + If you are using docker manifests there are not visible changes. ([#80266](https://github.com/kubernetes/kubernetes/pull/80266), [@javier-b-perez](https://github.com/javier-b-perez)) +- kubeadm now deletes the bootstrap-kubelet.conf file after TLS bootstrap + User relying on bootstrap-kubelet.conf should switch to kubelet.conf that contains node credentials ([#80676](https://github.com/kubernetes/kubernetes/pull/80676), [@fabriziopandini](https://github.com/fabriziopandini)) +- Node labels `beta.kubernetes.io/metadata-proxy-ready`, `beta.kubernetes.io/metadata-proxy-ready` and `beta.kubernetes.io/kube-proxy-ds-ready` are no longer added on new nodes. + - ip-mask-agent addon starts to use the label `node.kubernetes.io/masq-agent-ds-ready` instead of `beta.kubernetes.io/masq-agent-ds-ready` as its node selector. + - kube-proxy addon starts to use the label `node.kubernetes.io/kube-proxy-ds-ready` instead of `beta.kubernetes.io/kube-proxy-ds-ready` as its node selector. + - metadata-proxy addon starts to use the label `cloud.google.com/metadata-proxy-ready` instead of `beta.kubernetes.io/metadata-proxy-ready` as its node selector. + +#### Storage + +- When PodInfoOnMount is enabled for a CSI driver, the new csi.storage.k8s.io/ephemeral parameter in the volume context allows a driver's NodePublishVolume implementation to determine on a case-by-case basis whether the volume is ephemeral or a normal persistent volume ([#79983](https://github.com/kubernetes/kubernetes/pull/79983), [@pohly](https://github.com/pohly)) +- Add CSI Migration Shim for VerifyVolumesAreAttached and BulkVolumeVerify ([#80443](https://github.com/kubernetes/kubernetes/pull/80443), [@davidz627](https://github.com/davidz627)) +- Promotes VolumePVCDataSource (Cloning) feature to beta for 1.16 release ([#81792](https://github.com/kubernetes/kubernetes/pull/81792), [@j-griffith](https://github.com/j-griffith)) +- Integrated volume limits for in-tree and CSI volumes into one scheduler predicate. ([#77595](https://github.com/kubernetes/kubernetes/pull/77595), [@bertinatto](https://github.com/bertinatto)) + +## Deprecations and Removals + +- API + + - The following APIs are no longer served by default: + + - All resources under `apps/v1beta1` and `apps/v1beta2` - use `apps/v1` instead + - `daemonsets`, `deployments`, `replicasets` resources under `extensions/v1beta1` - use `apps/v1` instead + - `networkpolicies` resources under `extensions/v1beta1` - use `networking.k8s.io/v1` instead + - `podsecuritypolicies` resources under `extensions/v1beta1` - use `policy/v1beta1` instead + + Serving these resources can be temporarily re-enabled using the `--runtime-config` apiserver flag. + + - `apps/v1beta1=true` + - `apps/v1beta2=true` + - `extensions/v1beta1/daemonsets=true,extensions/v1beta1/deployments=true,extensions/v1beta1/replicasets=true,extensions/v1beta1/networkpolicies=true,extensions/v1beta1/podsecuritypolicies=true` + + The ability to serve these resources will be completely removed in v1.18. ([#70672](https://github.com/kubernetes/kubernetes/pull/70672), [@liggitt](https://github.com/liggitt)) + + - Ingress resources will no longer be served from `extensions/v1beta1` in v1.20. Migrate use to the `networking.k8s.io/v1beta1` API, available since v1.14. Existing persisted data can be retrieved via the `networking.k8s.io/v1beta1` API. + - PriorityClass resources will no longer be served from `scheduling.k8s.io/v1beta1` and `scheduling.k8s.io/v1alpha1` in v1.17. Migrate to the `scheduling.k8s.io/v1` API, available since v1.14. Existing persisted data can be retrieved via the `scheduling.k8s.io/v1` API. + - The `export` query parameter for list API calls, deprecated since v1.14, will be removed in v1.18. + - The `series.state` field in the events.k8s.io/v1beta1 Event API is deprecated and will be removed in v1.18 ([#75987](https://github.com/kubernetes/kubernetes/pull/75987), [@yastij](https://github.com/yastij)) + - The `apiextensions.k8s.io/v1beta1` version of `CustomResourceDefinition` is deprecated and will no longer be served in v1.19. Use `apiextensions.k8s.io/v1` instead. ([#79604](https://github.com/kubernetes/kubernetes/pull/79604), [@liggitt](https://github.com/liggitt)) + - The `admissionregistration.k8s.io/v1beta1` versions of `MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration` are deprecated and will no longer be served in v1.19. Use `admissionregistration.k8s.io/v1` instead. ([#79549](https://github.com/kubernetes/kubernetes/pull/79549), [@liggitt](https://github.com/liggitt)) + - The alpha `metadata.initializers` field, deprecated in 1.13, has been removed. ([#79504](https://github.com/kubernetes/kubernetes/pull/79504), [@yue9944882](https://github.com/yue9944882)) + - The deprecated node condition type `OutOfDisk` has been removed. Use the `DiskPressure` condition instead. ([#72420](https://github.com/kubernetes/kubernetes/pull/72420), [@Pingan2017](https://github.com/Pingan2017)) + - The `metadata.selfLink` field is deprecated in individual and list objects. It will no longer be returned starting in v1.20, and the field will be removed entirely in v1.21. ([#80978](https://github.com/kubernetes/kubernetes/pull/80978), [@wojtek-t](https://github.com/wojtek-t)) + - The deprecated cloud providers `ovirt`, `cloudstack` and `photon` have been removed ([#72178](https://github.com/kubernetes/kubernetes/pull/72178), [@dims](https://github.com/dims)) + - The `Cinder` and `ScaleIO` volume providers have been deprecated and will be removed in a future release. ([#80099](https://github.com/kubernetes/kubernetes/pull/80099), [@dims](https://github.com/dims)) + - The GA `PodPriority` feature gate is now on by default and cannot be disabled. The feature gate will be removed in v1.18. ([#79262](https://github.com/kubernetes/kubernetes/pull/79262), [@draveness](https://github.com/draveness)) + - Aggregated discovery requests can now timeout. Aggregated API servers must complete discovery calls within 5 seconds (other requests can take longer). Use the feature gate `EnableAggregatedDiscoveryTimeout=false` to temporarily revert behavior to the previous 30 second timeout if required (the temporary `EnableAggregatedDiscoveryTimeout` feature gate will be removed in v1.17). ([#82146](https://github.com/kubernetes/kubernetes/pull/82146), [@deads2k](https://github.com/deads2k)) + - the `scheduler.alpha.kubernetes.io/critical-pod` annotation is removed. Pod priority (`spec.priorityClassName`) should be used instead to mark pods as critical. ([#80342](https://github.com/kubernetes/kubernetes/pull/80342), [@draveness](https://github.com/draveness)) + - the NormalizeScore plugin set is removed from scheduler framework config API. Use ScorePlugin only. ([#80930](https://github.com/kubernetes/kubernetes/pull/80930), [@liu-cong](https://github.com/liu-cong)) + +- Features: + + - The following features are now GA, and the associated feature gates are deprecated and will be removed in v1.17: + - `GCERegionalPersistentDisk` (since 1.15.0) + - `CustomResourcePublishOpenAPI` + - `CustomResourceSubresources` + - `CustomResourceValidation` + - `CustomResourceWebhookConversion` + - The feature flags `HugePages`, `VolumeScheduling`, `CustomPodDNS` and `PodReadinessGates` have been removed ([#79307](https://github.com/kubernetes/kubernetes/pull/79307), [@draveness](https://github.com/draveness)) + +- hyperkube + + - the `--make-symlinks` flag, deprecated in v1.14, has been removed. ([#80017](https://github.com/kubernetes/kubernetes/pull/80017), [@Pothulapati](https://github.com/Pothulapati)) + +- kube-apiserver + + - the `--basic-auth-file` flag and authentication mode is deprecated and will be removed in a future release. It is not recommended for production environments. ([#81152](https://github.com/kubernetes/kubernetes/pull/81152), [@tedyu](https://github.com/tedyu)) + - the `--cloud-provider-gce-lb-src-cidrs` flag has been deprecated. This flag will be removed once the GCE Cloud Provider is removed from kube-apiserver. ([#81094](https://github.com/kubernetes/kubernetes/pull/81094), [@andrewsykim](https://github.com/andrewsykim)) + - the `--enable-logs-handler` flag and log-serving functionality is deprecated since v1.15, and scheduled to be removed in v1.19. ([#77611](https://github.com/kubernetes/kubernetes/pull/77611), [@rohitsardesai83](https://github.com/rohitsardesai83)) + - Deprecate the default service IP CIDR. The previous default was `10.0.0.0/24` which will be removed in 6 months/2 releases. Cluster admins must specify their own desired value, by using `--service-cluster-ip-range` on kube-apiserver. ([#81668](https://github.com/kubernetes/kubernetes/pull/81668), [@darshanime](https://github.com/darshanime)) + +- kube-proxy + + - the `--resource-container` flag has been removed from kube-proxy, and specifying it will now cause an error. The behavior is now as if you specified `--resource-container=""`. If you previously specified a non-empty `--resource-container`, you can no longer do so as of kubernetes 1.16. ([#78294](https://github.com/kubernetes/kubernetes/pull/78294), [@vllry](https://github.com/vllry)) + +- kube-scheduler + + - Migrate scheduler to use v1beta1 Event API. any tool targeting scheduler events needs to use v1beta1 Event API ([#78447](https://github.com/kubernetes/kubernetes/pull/78447), [@yastij](https://github.com/yastij)) + +- kubeadm + + - The CoreDNS Deployment now checks readiness via the `ready` plugin. + - The `proxy` plugin has been deprecated. The `forward` plugin is to be used instead. + - `kubernetes` plugin removes the `resyncperiod` option. + - The `upstream` option is deprecated and ignored if included. + ([#82127](https://github.com/kubernetes/kubernetes/pull/82127), [@rajansandeep](https://github.com/rajansandeep)) + +- kubectl + + - `kubectl convert`, deprecated since v1.14, will be removed in v1.17. + - The `--export` flag for the `kubectl get` command, deprecated since v1.14, will be removed in v1.18. + - `kubectl cp` no longer supports copying symbolic links from containers; to support this use case, see `kubectl exec --help` for examples using `tar` directly ([#82143](https://github.com/kubernetes/kubernetes/pull/82143), [@soltysh](https://github.com/soltysh)) + - Removed deprecated flag `--include-uninitialized`. ([#80337](https://github.com/kubernetes/kubernetes/pull/80337), [@draveness](https://github.com/draveness)) + +- kubelet + + - the `--containerized` flag was deprecated in 1.14 and has been removed ([#80043](https://github.com/kubernetes/kubernetes/pull/80043), [@dims](https://github.com/dims)) + - the `beta.kubernetes.io/os` and `beta.kubernetes.io/arch` labels, deprecated since v1.14, are targeted for removal in v1.18. + - cAdvisor json endpoints have been deprecated since 1.15. ([#78504](https://github.com/kubernetes/kubernetes/pull/78504), [@dashpole](https://github.com/dashpole)) + - removed the ability to set `kubernetes.io`- or `k8s.io`-prefixed labels via `--node-labels`, other than the [specifically allowed labels/prefixes](https://github.com/kubernetes/enhancements/blob/master/keps/sig-auth/0000-20170814-bounding-self-labeling-kubelets.md#proposal). ([#79305](https://github.com/kubernetes/kubernetes/pull/79305), [@paivagustavo](https://github.com/paivagustavo)) + +- client-go + - Remove `DirectCodecFactory` (replaced with `serializer.WithoutConversionCodecFactory`), `DirectEncoder` (replaced with `runtime.WithVersionEncoder`) and `DirectDecoder` (replaced with `runtime.WithoutVersionDecoder`). ([#79263](https://github.com/kubernetes/kubernetes/pull/79263), [@draveness](https://github.com/draveness)) + +## Metrics Changes + +### Added metrics + +- Added metrics `aggregator_openapi_v2_regeneration_count`, `aggregator_openapi_v2_regeneration_gauge` and `apiextension_openapi_v2_regeneration_count` counting the triggering APIService and CRDs and the reason (add, update, delete) when kube-apiserver regenerates the OpenAPI spec. ([#81786](https://github.com/kubernetes/kubernetes/pull/81786), [@sttts](https://github.com/sttts)) +- Added metrics `authentication_attempts` that can be used to understand the attempts of authentication. ([#81509](https://github.com/kubernetes/kubernetes/pull/81509), [@RainbowMango](https://github.com/RainbowMango)) +- Add a new counter metrics `apiserver_admission_webhook_rejection_count` with details about the causing for a webhook rejection. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw)) +- NFS Drivers are now enabled to collect metrics, StatFS metrics provider is used to collect the metrics. (@brahmaroutu) ([#75805](https://github.com/kubernetes/kubernetes/pull/75805), [@brahmaroutu](https://github.com/brahmaroutu)) +- Add `container_sockets`, `container_threads`, and `container_threads_max` metrics ([#81972](https://github.com/kubernetes/kubernetes/pull/81972), [@dashpole](https://github.com/dashpole)) +- Add `container_state` label to `running_container_count` kubelet metrics, to get count of containers based on their state(running/exited/created/unknown) ([#81573](https://github.com/kubernetes/kubernetes/pull/81573), [@irajdeep](https://github.com/irajdeep)) +- Added metric `apiserver_watch_events_total` that can be used to understand the number of watch events in the system. ([#78732](https://github.com/kubernetes/kubernetes/pull/78732), [@mborsz](https://github.com/mborsz)) +- Added metric `apiserver_watch_events_sizes` that can be used to estimate sizes of watch events in the system. ([#80477](https://github.com/kubernetes/kubernetes/pull/80477), [@mborsz](https://github.com/mborsz)) +- Added a new Prometheus counter metric `sync_proxy_rules_iptables_restore_failures_total` for kube-proxy iptables-restore failures (both ipvs and iptables modes) + ([#81210](https://github.com/kubernetes/kubernetes/pull/81210), [@figo](https://github.com/figo)) +- kubelet now exports an `kubelet_evictions` metric that counts the number of pod evictions carried out by the kubelet to reclaim resources ([#81377](https://github.com/kubernetes/kubernetes/pull/81377), [@sjenning](https://github.com/sjenning)) + +### Removed metrics + +- Removed cadvisor metric labels `pod_name` and `container_name` to match instrumentation guidelines. Any Prometheus queries that match `pod_name` and `container_name` labels (e.g. cadvisor or kubelet probe metrics) must be updated to use `pod` and `container` instead. ([#80376](https://github.com/kubernetes/kubernetes/pull/80376), [@ehashman](https://github.com/ehashman)) + +### Deprecated/changed metrics + +- kube-controller-manager and cloud-controller-manager metrics are now marked as with the ALPHA stability level. ([#81624](https://github.com/kubernetes/kubernetes/pull/81624), [@logicalhan](https://github.com/logicalhan)) +- kube-proxy metrics are now marked as with the ALPHA stability level. ([#81626](https://github.com/kubernetes/kubernetes/pull/81626), [@logicalhan](https://github.com/logicalhan)) +- kube-apiserver metrics are now marked as with the ALPHA stability level. ([#81531](https://github.com/kubernetes/kubernetes/pull/81531), [@logicalhan](https://github.com/logicalhan)) +- kubelet metrics for /metrics and /metrics/probes are now marked as with the ALPHA stability level. ([#81534](https://github.com/kubernetes/kubernetes/pull/81534), [@logicalhan](https://github.com/logicalhan)) +- Scheduler metrics are now marked as with the ALPHA stability level. ([#81576](https://github.com/kubernetes/kubernetes/pull/81576), [@logicalhan](https://github.com/logicalhan)) +- The `rejected` label in `apiserver_admission_webhook_admission_duration_seconds` metrices now properly indicates if the request was rejected. ([#81399](https://github.com/kubernetes/kubernetes/pull/81399), [@roycaihw](https://github.com/roycaihw)) +- Fixed a bug in the CSI metrics that does not return not supported error when a CSI driver does not support metrics. ([#79851](https://github.com/kubernetes/kubernetes/pull/79851), [@jparklab](https://github.com/jparklab)) +- Fix disk stats in LXD using ZFS storage pool and CRI-O missing network metris bug ([#81972](https://github.com/kubernetes/kubernetes/pull/81972), [@dashpole](https://github.com/dashpole)) + +## Notable Features + +### Beta + +- Promote WatchBookmark feature to beta and enable it by default. + With WatchBookmark feature, clients are able to request watch events with BOOKMARK type. Clients should not assume bookmarks are returned at any specific interval, nor may they assume the server will send any BOOKMARK event during a session. ([#79786](https://github.com/kubernetes/kubernetes/pull/79786), [@wojtek-t](https://github.com/wojtek-t)) +- The server-side apply feature is now beta ([#81956](https://github.com/kubernetes/kubernetes/pull/81956), [@apelisse](https://github.com/apelisse)) +- Server-side apply will now use the openapi provided in the CRD validation field to help figure out how to correctly merge objects and update ownership. ([#77354](https://github.com/kubernetes/kubernetes/pull/77354), [@jennybuckley](https://github.com/jennybuckley)) +- The `CustomResourceDefaulting` feature is promoted to beta and enabled by default. Defaults may be specified in structural schemas via the `apiextensions.k8s.io/v1` API. See https://kubernetes.io/docs/tasks/access-kubernetes-api/custom-resources/custom-resource-definitions/#specifying-a-structural-schema for details. ([#81872](https://github.com/kubernetes/kubernetes/pull/81872), [@sttts](https://github.com/sttts)) +- Finalizer Protection for Service LoadBalancers is now in beta (enabled by default). This feature ensures the Service resource is not fully deleted until the correlating load balancer resources are deleted. ([#81691](https://github.com/kubernetes/kubernetes/pull/81691), [@MrHohn](https://github.com/MrHohn)) +- Graduating Windows GMSA support from alpha to beta ([#82110](https://github.com/kubernetes/kubernetes/pull/82110), [@wk8](https://github.com/wk8)) + +### Alpha + +- Introduce a new admission controller for RuntimeClass. Initially, RuntimeClass will be used to apply the pod overhead associated with a given RuntimeClass to the Pod `spec` if a corresponding RuntimeClassName is specified. PodOverhead is an alpha feature as of Kubernetes 1.16. ([#78484](https://github.com/kubernetes/kubernetes/pull/78484), [@egernst](https://github.com/egernst)) +- Introduction of the pod overhead feature to the scheduler. This functionality is alpha-level as of + Kubernetes v1.16, and is only honored by servers that enable the PodOverhead feature.gate. ([#78319](https://github.com/kubernetes/kubernetes/pull/78319), [@egernst](https://github.com/egernst)) +- Ephemeral containers have been added in alpha. These temporary containers can be added to running pods for purposes such as debugging, similar to how `kubectl exec` runs a process in an existing container. Also like `kubectl exec`, no resources are reserved for ephemeral containers and they are not restarted when they exit. Note that container namespace targeting is not yet implemented, so [process namespace sharing](https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/) must be enabled to view process from other containers in the pod. ([#59484](https://github.com/kubernetes/kubernetes/pull/59484), [@verb](https://github.com/verb)) +- Pod spread constraints have been added in alpha. You can use these constraints to control how Pods are spread across the cluster among failure-domains. ([#77327](https://github.com/kubernetes/kubernetes/pull/77327), [#77760](https://github.com/kubernetes/kubernetes/pull/77760), [#77828](https://github.com/kubernetes/kubernetes/pull/77828), [#79062](https://github.com/kubernetes/kubernetes/pull/79062), [#80011](https://github.com/kubernetes/kubernetes/pull/80011), [#81068](https://github.com/kubernetes/kubernetes/pull/81068), [@Huang-Wei](https://github.com/Huang-Wei)) + +### CLI Improvements + +- the new flag `--endpoint-updates-batch-period` in kube-controller-manager can be used to reduce the number of endpoints updates generated by pod changes. ([#80509](https://github.com/kubernetes/kubernetes/pull/80509), [@mborsz](https://github.com/mborsz)) +- the kubectl `--all-namespaces` flag is now honored by `kubectl wait` ([#81468](https://github.com/kubernetes/kubernetes/pull/81468), [@ashutoshgngwr](https://github.com/ashutoshgngwr)) +- `kubectl get -w` now takes an `--output-watch-events` flag to indicate the event type (ADDED, MODIFIED, DELETED) ([#72416](https://github.com/kubernetes/kubernetes/pull/72416), [@liggitt](https://github.com/liggitt)) +- Adds Endpoint Slice support for kubectl when discovery API group is enabled. ([#81795](https://github.com/kubernetes/kubernetes/pull/81795), [@robscott](https://github.com/robscott)) + +### Misc + +- Add `--shutdown-delay-duration` to kube-apiserver in order to delay a graceful shutdown. `/healthz` will keep returning success during this time and requests are normally served, but `/readyz` will return failure immediately. This delay can be used to allow the SDN to update iptables on all nodes and stop sending traffic. ([#74416](https://github.com/kubernetes/kubernetes/pull/74416), [@sttts](https://github.com/sttts)) + Kubeadm now seamlessly migrates the CoreDNS Configuration when upgrading CoreDNS. ([#78033](https://github.com/kubernetes/kubernetes/pull/78033), [@rajansandeep](https://github.com/rajansandeep)) +- Add Endpoint Slice Controller for managing new EndpointSlice resource, disabled by default. ([#81048](https://github.com/kubernetes/kubernetes/pull/81048), [@robscott](https://github.com/robscott)) +- Adds `\livez` for liveness health checking for kube-apiserver. Using the parameter `--maximum-startup-sequence-duration` will allow the liveness endpoint to defer boot-sequence failures for the specified duration period. ([#81969](https://github.com/kubernetes/kubernetes/pull/81969), [@logicalhan](https://github.com/logicalhan)) +- Adds EndpointSlice integration to kube-proxy, can be enabled with EndpointSlice feature gate. ([#81430](https://github.com/kubernetes/kubernetes/pull/81430), [@robscott](https://github.com/robscott)) +- Add status condition to namespace resource ([#73405](https://github.com/kubernetes/kubernetes/pull/73405), [@wozniakjan](https://github.com/wozniakjan)) +- Enhance Azure cloud provider code to support both AAD and ADFS authentication. ([#80841](https://github.com/kubernetes/kubernetes/pull/80841), [@rjaini](https://github.com/rjaini)) +- kubeadm: implement support for concurrent add/remove of stacked etcd members ([#79677](https://github.com/kubernetes/kubernetes/pull/79677), [@neolit123](https://github.com/neolit123)) +- kubeadm: support any Linux kernel version newer than 3.10 ([#81623](https://github.com/kubernetes/kubernetes/pull/81623), [@neolit123](https://github.com/neolit123)) +- Volume expansion is enabled in the default GCE storageclass ([#78672](https://github.com/kubernetes/kubernetes/pull/78672), [@msau42](https://github.com/msau42)) +- kubeadm ClusterConfiguration now supports featureGates: IPv6DualStack: true ([#80145](https://github.com/kubernetes/kubernetes/pull/80145), [@Arvinderpal](https://github.com/Arvinderpal)) +- In order to enable dual-stack support within kubeadm and kubernetes components, as part of the init config file, the user should set feature-gate `IPv6DualStack=true` in the ClusterConfiguration. Additionally, for each worker node, the user should set the feature-gate for kubelet using either `nodeRegistration.kubeletExtraArgs` or `KUBELET_EXTRA_ARGS`. ([#80531](https://github.com/kubernetes/kubernetes/pull/80531), [@Arvinderpal](https://github.com/Arvinderpal)) +- Add possibility to configure controller manager to use IPv6 dual stack: + use `--cluster-cidr=","`. + Notes: + 1. Only the first two CIDRs are used (soft limits for Alpha, might be lifted later on). + 2. Only the "RangeAllocator" (default) is allowed as a value for `--cidr-allocator-type`. Cloud allocators are not compatible with IPv6 dual stack + ([#73977](https://github.com/kubernetes/kubernetes/pull/73977), [@khenidak](https://github.com/khenidak)) +- Add scheduling support for RuntimeClasses. RuntimeClasses can now specify nodeSelector constraints & tolerations, which are merged into the PodSpec for pods using that RuntimeClass. ([#80825](https://github.com/kubernetes/kubernetes/pull/80825), [@tallclair](https://github.com/tallclair)) +- When specifying `--(kube|system)-reserved-cgroup`, with `--cgroup-driver=systemd`, it is now possible to use the fully qualified cgroupfs name (i.e. `/test-cgroup.slice`). ([#78793](https://github.com/kubernetes/kubernetes/pull/78793), [@mattjmcnaughton](https://github.com/mattjmcnaughton)) +- Adds support for vSphere volumes on Windows ([#80911](https://github.com/kubernetes/kubernetes/pull/80911), [@gab-satchi](https://github.com/gab-satchi)) + +## API Changes + +- The `MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration` APIs have been promoted to `admissionregistration.k8s.io/v1`: + - `failurePolicy` default changed from `Ignore` to `Fail` for v1 + - `matchPolicy` default changed from `Exact` to `Equivalent` for v1 + - `timeout` default changed from `30s` to `10s` for v1 + - `sideEffects` default value is removed, and the field made required, and only `None` and `NoneOnDryRun` are permitted for v1 + - `admissionReviewVersions` default value is removed and the field made required for v1 (supported versions for AdmissionReview are `v1` and `v1beta1`) + - The `name` field for specified webhooks must be unique for `MutatingWebhookConfiguration` and `ValidatingWebhookConfiguration` objects created via `admissionregistration.k8s.io/v1` +- The `AdmissionReview` API sent to and received from admission webhooks has been promoted to `admission.k8s.io/v1`. Webhooks can specify a preference for receiving `v1` AdmissionReview objects with `admissionReviewVersions: ["v1","v1beta1"]`, and must respond with an API object in the same `apiVersion` they are sent. When webhooks use `admission.k8s.io/v1`, the following additional validation is performed on their responses: + - `response.patch` and `response.patchType` are not permitted from validating admission webhooks + - `apiVersion: "admission.k8s.io/v1"` is required + - `kind: "AdmissionReview"` is required + - `response.uid: ""` is required + - `response.patchType: "JSONPatch"` is required (if `response.patch` is set) ([#80231](https://github.com/kubernetes/kubernetes/pull/80231), [@liggitt](https://github.com/liggitt)) +- The `CustomResourceDefinition` API type is promoted to `apiextensions.k8s.io/v1` with the following changes: + - Use of the new `default` feature in validation schemas is limited to v1 + - `spec.scope` is no longer defaulted to `Namespaced` and must be explicitly specified + - `spec.version` is removed in v1; use `spec.versions` instead + - `spec.validation` is removed in v1; use `spec.versions[*].schema` instead + - `spec.subresources` is removed in v1; use `spec.versions[*].subresources` instead + - `spec.additionalPrinterColumns` is removed in v1; use `spec.versions[*].additionalPrinterColumns` instead + - `spec.conversion.webhookClientConfig` is moved to `spec.conversion.webhook.clientConfig` in v1 + - `spec.conversion.conversionReviewVersions` is moved to `spec.conversion.webhook.conversionReviewVersions` in v1 + - `spec.versions[*].schema.openAPIV3Schema` is now required when creating v1 CustomResourceDefinitions + - `spec.preserveUnknownFields: true` is disallowed when creating v1 CustomResourceDefinitions; it must be specified within schema definitions as `x-kubernetes-preserve-unknown-fields: true` + - In `additionalPrinterColumns` items, the `JSONPath` field was renamed to `jsonPath` in v1 (fixes https://github.com/kubernetes/kubernetes/issues/66531) + The `apiextensions.k8s.io/v1beta1` version of `CustomResourceDefinition` is deprecated and will no longer be served in v1.19. ([#79604](https://github.com/kubernetes/kubernetes/pull/79604), [@liggitt](https://github.com/liggitt)) +- The `ConversionReview` API sent to and received from custom resource CustomResourceDefinition conversion webhooks has been promoted to `apiextensions.k8s.io/v1`. CustomResourceDefinition conversion webhooks can now indicate they support receiving and responding with `ConversionReview` API objects in the `apiextensions.k8s.io/v1` version by including `v1` in the `conversionReviewVersions` list in their CustomResourceDefinition. Conversion webhooks must respond with a ConversionReview object in the same apiVersion they receive. `apiextensions.k8s.io/v1` `ConversionReview` responses must specify a `response.uid` that matches the `request.uid` of the object they were sent. ([#81476](https://github.com/kubernetes/kubernetes/pull/81476), [@liggitt](https://github.com/liggitt)) +- Add scheduling support for RuntimeClasses. RuntimeClasses can now specify nodeSelector constraints & tolerations, which are merged into the PodSpec for pods using that RuntimeClass. ([#80825](https://github.com/kubernetes/kubernetes/pull/80825), [@tallclair](https://github.com/tallclair)) +- Kubelet should now more reliably report the same primary node IP even if the set of node IPs reported by the CloudProvider changes. ([#79391](https://github.com/kubernetes/kubernetes/pull/79391), [@danwinship](https://github.com/danwinship)) +- Omit nil or empty field when calculating container hash value to avoid hash changed. For a new field with a non-nil default value in the container spec, the hash would still get changed. ([#57741](https://github.com/kubernetes/kubernetes/pull/57741), [@dixudx](https://github.com/dixudx)) +- Property `conditions` in `apiextensions.v1beta1.CustomResourceDefinitionStatus` and `apiextensions.v1.CustomResourceDefinitionStatus` is now optional instead of required. ([#64996](https://github.com/kubernetes/kubernetes/pull/64996), [@roycaihw](https://github.com/roycaihw)) +- When the status of a CustomResourceDefinition condition changes, its corresponding `lastTransitionTime` is now updated. ([#69655](https://github.com/kubernetes/kubernetes/pull/69655), [@CaoShuFeng](https://github.com/CaoShuFeng)) + +## Other notable changes + +### API Machinery + +- Remove `GetReference()` and `GetPartialReference()` function from `pkg/api/ref`, as the same function exists also in `staging/src/k8s.io/client-go/tools/ref` ([#80361](https://github.com/kubernetes/kubernetes/pull/80361), [@wojtek-t](https://github.com/wojtek-t)) +- Verify that CRD default values in OpenAPI specs are pruned, with the exceptions of values under `metadata`. ([#78829](https://github.com/kubernetes/kubernetes/pull/78829), [@sttts](https://github.com/sttts)) +- Fixes a bug that when there is a "connection refused" error, the reflector's ListAndWatch func will return directly but what expected is that sleep 1 second and rewatch since the specified resourceVersion. + ([#81634](https://github.com/kubernetes/kubernetes/pull/81634), [@likakuli](https://github.com/likakuli)) +- Resolves an issue serving aggregated APIs backed by services that respond to requests to `/` with non-2xx HTTP responses ([#79895](https://github.com/kubernetes/kubernetes/pull/79895), [@deads2k](https://github.com/deads2k)) +- The CRD handler now properly re-creates stale CR storage to reflect CRD update. ([#79114](https://github.com/kubernetes/kubernetes/pull/79114), [@roycaihw](https://github.com/roycaihw)) +- Fix CVE-2019-11247: API server allows access to custom resources via wrong scope ([#80750](https://github.com/kubernetes/kubernetes/pull/80750), [@sttts](https://github.com/sttts)) +- Fixed a bug with the openAPI definition for `io.k8s.apimachinery.pkg.runtime.RawExtension`, which previously required a field `raw` to be specified ([#80773](https://github.com/kubernetes/kubernetes/pull/80773), [@jennybuckley](https://github.com/jennybuckley)) +- Property `conditions` in `apiextensions.v1beta1.CustomResourceDefinitionStatus` and `apiextensions.v1.CustomResourceDefinitionStatus` is now optional instead of required. ([#64996](https://github.com/kubernetes/kubernetes/pull/64996), [@roycaihw](https://github.com/roycaihw)) +- Resolves a transient 404 response to custom resource requests during server startup ([#81244](https://github.com/kubernetes/kubernetes/pull/81244), [@liggitt](https://github.com/liggitt)) +- OpenAPI now advertises correctly supported patch types for custom resources ([#81515](https://github.com/kubernetes/kubernetes/pull/81515), [@liggitt](https://github.com/liggitt)) +- When the status of a CRD Condition changes, it's corresponding `LastTransitionTime` is now updated. ([#69655](https://github.com/kubernetes/kubernetes/pull/69655), [@CaoShuFeng](https://github.com/CaoShuFeng)) +- Add `metadata.generation=1` to old CustomResources. ([#82005](https://github.com/kubernetes/kubernetes/pull/82005), [@sttts](https://github.com/sttts)) +- Fix a bug in the apiserver that could cause a valid update request to be rejected with a precondition check failure. ([#82303](https://github.com/kubernetes/kubernetes/pull/82303), [@roycaihw](https://github.com/roycaihw)) +- Fixes regression in logging spurious stack traces when proxied connections are closed by the backend ([#82588](https://github.com/kubernetes/kubernetes/pull/82588), [@liggitt](https://github.com/liggitt)) +- RateLimiter add a context-aware method, fix client-go request goruntine backlog in async timeout scene. ([#79375](https://github.com/kubernetes/kubernetes/pull/79375), [@answer1991](https://github.com/answer1991)) +- Add a `Patch` method to `ScaleInterface` ([#80699](https://github.com/kubernetes/kubernetes/pull/80699), [@knight42](https://github.com/knight42)) +- CRDs under k8s.io and kubernetes.io must have the `api-approved.kubernetes.io` set to either `unapproved.*` or a link to the pull request approving the schema. See https://github.com/kubernetes/enhancements/pull/1111 for more details. ([#79992](https://github.com/kubernetes/kubernetes/pull/79992), [@deads2k](https://github.com/deads2k)) +- KMS Providers will install a healthz check for the status of kms-plugin in kube-apiservers' encryption config. ([#78540](https://github.com/kubernetes/kubernetes/pull/78540), [@immutableT](https://github.com/immutableT)) +- Improves validation errors for custom resources ([#81212](https://github.com/kubernetes/kubernetes/pull/81212), [@liggitt](https://github.com/liggitt)) +- Populate object name for admission attributes when CREATE ([#53185](https://github.com/kubernetes/kubernetes/pull/53185), [@dixudx](https://github.com/dixudx)) +- Add Overhead field to the PodSpec and RuntimeClass types as part of the Pod Overhead KEP ([#76968](https://github.com/kubernetes/kubernetes/pull/76968), [@egernst](https://github.com/egernst)) + +### Apps + +- Fix a bug that pods not be deleted from unmatched nodes by daemon controller ([#78974](https://github.com/kubernetes/kubernetes/pull/78974), [@DaiHao](https://github.com/DaiHao)) +- Fix a bug that causes DaemonSet rolling update hang when there exist failed pods. ([#78170](https://github.com/kubernetes/kubernetes/pull/78170), [@DaiHao](https://github.com/DaiHao)) + +### Auth + +- Service account tokens now include the JWT Key ID field in their header. ([#78502](https://github.com/kubernetes/kubernetes/pull/78502), [@ahmedtd](https://github.com/ahmedtd)) +- The nbf (not before) claim, if present in ID token, is now enforced. ([#81413](https://github.com/kubernetes/kubernetes/pull/81413), [@anderseknert](https://github.com/anderseknert)) + +### CLI + +- Fix CVE-2019-11249: Incomplete fixes for CVE-2019-1002101 and CVE-2019-11246, kubectl cp potential directory traversal ([#80436](https://github.com/kubernetes/kubernetes/pull/80436), [@M00nF1sh](https://github.com/M00nF1sh)) +- Fix the bash completion error with override flags. ([#80802](https://github.com/kubernetes/kubernetes/pull/80802), [@dtaniwaki](https://github.com/dtaniwaki)) +- Fix a bug in server printer that could cause kube-apiserver to panic. ([#79349](https://github.com/kubernetes/kubernetes/pull/79349), [@roycaihw](https://github.com/roycaihw)) +- Fix invalid "time stamp is the future" error when kubectl cp-ing a file ([#73982](https://github.com/kubernetes/kubernetes/pull/73982), [@tanshanshan](https://github.com/tanshanshan)) +- Fix a bug where `kubectl set config` hangs and uses 100% CPU on some invalid property names ([#79000](https://github.com/kubernetes/kubernetes/pull/79000), [@pswica](https://github.com/pswica)) +- Fix output of `kubectl get --watch-only` when watching a single resource ([#79345](https://github.com/kubernetes/kubernetes/pull/79345), [@liggitt](https://github.com/liggitt)) +- Make kubectl get `--ignore-not-found` continue processing when encountering error. ([#82120](https://github.com/kubernetes/kubernetes/pull/82120), [@soltysh](https://github.com/soltysh)) +- Correct a reference to a not/no longer used kustomize subcommand in the documentation ([#82535](https://github.com/kubernetes/kubernetes/pull/82535), [@demobox](https://github.com/demobox)) +- kubectl could scale custom resource again ([#81342](https://github.com/kubernetes/kubernetes/pull/81342), [@knight42](https://github.com/knight42)) +- Add PodOverhead awareness to kubectl ([#81929](https://github.com/kubernetes/kubernetes/pull/81929), [@egernst](https://github.com/egernst)) + +### Cloud Provider + +- When a load balancer type service is created in a k8s cluster that is backed by Azure Standard Load Balancer, the corresponding load balancer rule added in the Azure Standard Load Balancer would now have the "EnableTcpReset" property set to true. ([#80624](https://github.com/kubernetes/kubernetes/pull/80624), [@xuto2](https://github.com/xuto2)) +- Switch to VM Update call in attach/detach disk operation, original CreateOrUpdate call may lead to orphaned VMs or blocked resources ([#81208](https://github.com/kubernetes/kubernetes/pull/81208), [@andyzhangx](https://github.com/andyzhangx)) +- Fix azure disk naming matching issue due to case sensitive comparison ([#81720](https://github.com/kubernetes/kubernetes/pull/81720), [@andyzhangx](https://github.com/andyzhangx)) +- Fix retry issues when the nodes are under deleting on Azure ([#80419](https://github.com/kubernetes/kubernetes/pull/80419), [@feiskyer](https://github.com/feiskyer)) +- Fix conflicted cache when the requests are canceled by other Azure operations. ([#81282](https://github.com/kubernetes/kubernetes/pull/81282), [@feiskyer](https://github.com/feiskyer)) +- Fix make azure disk URI as case insensitive ([#79020](https://github.com/kubernetes/kubernetes/pull/79020), [@andyzhangx](https://github.com/andyzhangx)) +- Fix VMSS LoadBalancer backend pools so that the network won't be broken when instances are upgraded to latest model ([#81411](https://github.com/kubernetes/kubernetes/pull/81411), [@nilo19](https://github.com/nilo19)) +- Default resourceGroup should be used when the value of annotation azure-load-balancer-resource-group is an empty string. ([#79514](https://github.com/kubernetes/kubernetes/pull/79514), [@feiskyer](https://github.com/feiskyer)) +- Kubelet could be run with no Azure identity without subscriptionId configured now. + A sample cloud provider configure is: '{"vmType": "vmss", "useInstanceMetadata": true}'. ([#81500](https://github.com/kubernetes/kubernetes/pull/81500), [@feiskyer](https://github.com/feiskyer)) +- Fix public IP not found issues for VMSS nodes ([#80703](https://github.com/kubernetes/kubernetes/pull/80703), [@feiskyer](https://github.com/feiskyer)) +- Fix Azure client requests stuck issues on http.StatusTooManyRequests (HTTP Code 429). ([#81279](https://github.com/kubernetes/kubernetes/pull/81279), [@feiskyer](https://github.com/feiskyer)) +- Add a service annotation `service.beta.kubernetes.io/azure-pip-name` to specify the public IP name for Azure load balancer. ([#81213](https://github.com/kubernetes/kubernetes/pull/81213), [@nilo19](https://github.com/nilo19)) +- Optimize EC2 DescribeInstances API calls in aws cloud provider library by querying instance ID instead of EC2 filters when possible ([#78140](https://github.com/kubernetes/kubernetes/pull/78140), [@zhan849](https://github.com/zhan849)) +- Creates an annotation `service.beta.kubernetes.io/aws-load-balancer-eip-allocations` to assign AWS EIP to the newly created Network Load Balancer. Number of allocations and subnets must match. ([#69263](https://github.com/kubernetes/kubernetes/pull/69263), [@brooksgarrett](https://github.com/brooksgarrett)) +- Add an azure cloud configuration `LoadBalancerName` and `LoadBalancerResourceGroup` to allow the corresponding customizations of azure load balancer. ([#81054](https://github.com/kubernetes/kubernetes/pull/81054), [@nilo19](https://github.com/nilo19)) + +### Cluster Lifecycle + +- Fix error handling and potential go null pointer exception in kubeadm upgrade diff ([#80648](https://github.com/kubernetes/kubernetes/pull/80648), [@odinuge](https://github.com/odinuge)) +- kubeadm: fall back to client version in case of certain HTTP errors ([#80024](https://github.com/kubernetes/kubernetes/pull/80024), [@RainbowMango](https://github.com/RainbowMango)) +- kubeadm: fix a potential panic if kubeadm discovers an invalid, existing kubeconfig file ([#79165](https://github.com/kubernetes/kubernetes/pull/79165), [@neolit123](https://github.com/neolit123)) +- kubeadm: treat non-fatal errors as warnings when doing reset ([#80862](https://github.com/kubernetes/kubernetes/pull/80862), [@drpaneas](https://github.com/drpaneas)) +- kubeadm: prevent PSP blocking of upgrade image prepull by using a non-root user ([#77792](https://github.com/kubernetes/kubernetes/pull/77792), [@neolit123](https://github.com/neolit123)) +- kubeadm: fix "certificate-authority" files not being pre-loaded when using file discovery ([#80966](https://github.com/kubernetes/kubernetes/pull/80966), [@neolit123](https://github.com/neolit123)) +- Add instruction to setup "Application Default Credentials" to run GCE Windows e2e tests locally. ([#81337](https://github.com/kubernetes/kubernetes/pull/81337), [@YangLu1031](https://github.com/YangLu1031)) +- Fix error in `kubeadm join --discovery-file` when using discovery files with embedded credentials ([#80675](https://github.com/kubernetes/kubernetes/pull/80675), [@fabriziopandini](https://github.com/fabriziopandini)) +- Fix remove the etcd member from the cluster during a kubeadm reset. ([#79326](https://github.com/kubernetes/kubernetes/pull/79326), [@bradbeam](https://github.com/bradbeam)) +- kubeadm: the permissions of generated CSR files are changed from 0644 to 0600 ([#81217](https://github.com/kubernetes/kubernetes/pull/81217), [@SataQiu](https://github.com/SataQiu)) +- kubeadm: avoid double deletion of the upgrade prepull DaemonSet ([#80798](https://github.com/kubernetes/kubernetes/pull/80798), [@xlgao-zju](https://github.com/xlgao-zju)) +- kubeadm: introduce deterministic ordering for the certificates generation in the phase command `kubeadm init phase certs`. ([#78556](https://github.com/kubernetes/kubernetes/pull/78556), [@neolit123](https://github.com/neolit123)) +- kubeadm: implement retry logic for certain ConfigMap failures when joining nodes ([#78915](https://github.com/kubernetes/kubernetes/pull/78915), [@ereslibre](https://github.com/ereslibre)) +- kubeadm: use etcd's /health endpoint for a HTTP liveness probe on localhost instead of having a custom health check using etcdctl ([#81385](https://github.com/kubernetes/kubernetes/pull/81385), [@neolit123](https://github.com/neolit123)) +- kubeadm reset: unmount directories under `/var/lib/kubelet` for Linux only ([#81494](https://github.com/kubernetes/kubernetes/pull/81494), [@Klaven](https://github.com/Klaven)) +- kubeadm: fix the bug that `--cri-socket` flag does not work for `kubeadm reset` ([#79498](https://github.com/kubernetes/kubernetes/pull/79498), [@SataQiu](https://github.com/SataQiu)) +- kubeadm: produce errors if they occur when resetting cluster status for a control-plane node ([#80573](https://github.com/kubernetes/kubernetes/pull/80573), [@bart0sh](https://github.com/bart0sh)) +- Fix an error when using external etcd but storing etcd certificates in the same folder with the same name used by kubeadm for local etcd certificates; for an older version of kubeadm, the workaround is to avoid file name used by kubeadm for local etcd. ([#80867](https://github.com/kubernetes/kubernetes/pull/80867), [@fabriziopandini](https://github.com/fabriziopandini)) +- `kubeadm join` fails if file-based discovery is too long, with a default timeout of 5 minutes. ([#80804](https://github.com/kubernetes/kubernetes/pull/80804), [@olivierlemasle](https://github.com/olivierlemasle)) +- kubeadm: fixed ignoring errors when pulling control plane images ([#80529](https://github.com/kubernetes/kubernetes/pull/80529), [@bart0sh](https://github.com/bart0sh)) +- Fix a bug in kube-addon-manager's leader election logic that made all replicas active. ([#80575](https://github.com/kubernetes/kubernetes/pull/80575), [@mborsz](https://github.com/mborsz)) +- kubeadm: prevent overriding of certain kubelet security configuration parameters if the user wished to modify them ([#81903](https://github.com/kubernetes/kubernetes/pull/81903), [@jfbai](https://github.com/jfbai)) +- kubeadm no longer performs IPVS checks as part of its preflight checks ([#81791](https://github.com/kubernetes/kubernetes/pull/81791), [@yastij](https://github.com/yastij)) +- kubeadm: fix for HTTPProxy check for IPv6 addresses ([#82267](https://github.com/kubernetes/kubernetes/pull/82267), [@kad](https://github.com/kad)) +- kubeadm: Allow users to skip the kube-proxy init addon phase during init and still be able to join a cluster and perform some other minor operations (but not upgrade). ([#82248](https://github.com/kubernetes/kubernetes/pull/82248), [@rosti](https://github.com/rosti)) +- Mounts `/home/kubernetes/bin/nvidia/vulkan/icd.d` on the host to `/etc/vulkan/icd.d` inside containers requesting GPU. ([#78868](https://github.com/kubernetes/kubernetes/pull/78868), [@chardch](https://github.com/chardch)) +- kubeadm: use the `--pod-network-cidr` flag to init or use the podSubnet field in the kubeadm config to pass a comma separated list of pod CIDRs. ([#79033](https://github.com/kubernetes/kubernetes/pull/79033), [@Arvinderpal](https://github.com/Arvinderpal)) +- kubeadm: provide `--control-plane-endpoint` flag for `controlPlaneEndpoint` ([#79270](https://github.com/kubernetes/kubernetes/pull/79270), [@SataQiu](https://github.com/SataQiu)) +- kubeadm: enable secure serving for the kube-scheduler ([#80951](https://github.com/kubernetes/kubernetes/pull/80951), [@neolit123](https://github.com/neolit123)) +- kubeadm: print the stack trace of an error for klog level `--v>=5` ([#80937](https://github.com/kubernetes/kubernetes/pull/80937), [@neolit123](https://github.com/neolit123)) +- Add `--kubernetes-version` to `kubeadm init phase certs ca` and `kubeadm init phase kubeconfig` ([#80115](https://github.com/kubernetes/kubernetes/pull/80115), [@gyuho](https://github.com/gyuho)) +- kubeadm: support fetching configuration from the original cluster for `upgrade diff` ([#80025](https://github.com/kubernetes/kubernetes/pull/80025), [@SataQiu](https://github.com/SataQiu)) +- When using the conformance test image, a new environment variable `E2E_USE_GO_RUNNER` will cause the tests to be run with the new golang-based test runner rather than the current bash wrapper. ([#79284](https://github.com/kubernetes/kubernetes/pull/79284), [@johnSchnake](https://github.com/johnSchnake)) +- Implement a new feature that allows applying kustomize patches to static pod manifests generated by kubeadm. ([#80905](https://github.com/kubernetes/kubernetes/pull/80905), [@fabriziopandini](https://github.com/fabriziopandini)) +- The 404 request handler for the GCE Ingress load balancer now exports prometheus metrics, including: + + - `http_404_request_total` (the number of 404 requests handled) + - `http_404_request_duration_ms` (the amount of time the server took to respond in ms) + + Also includes percentile groupings. The directory for the default 404 handler includes instructions on how to enable prometheus for monitoring and setting alerts. + ([#79106](https://github.com/kubernetes/kubernetes/pull/79106), [@vbannai](https://github.com/vbannai)) + +### Instrumentation + +- Kibana has been slightly revamped/improved in the latest version ([#80421](https://github.com/kubernetes/kubernetes/pull/80421), [@lostick](https://github.com/lostick)) + +### Network + +- Fix a string comparison bug in IPVS graceful termination where UDP real servers are not deleted. ([#78999](https://github.com/kubernetes/kubernetes/pull/78999), [@andrewsykim](https://github.com/andrewsykim)) +- `kube-proxy --cleanup will` return the correct exit code if the cleanup was successful ([#78775](https://github.com/kubernetes/kubernetes/pull/78775), [@johscheuer](https://github.com/johscheuer)) +- Fix a bug in the IPVS proxier where virtual servers are not cleaned up even though the corresponding Service object was deleted. ([#80942](https://github.com/kubernetes/kubernetes/pull/80942), [@gongguan](https://github.com/gongguan)) +- kube-proxy waits for some duration for the node to be defined. ([#77167](https://github.com/kubernetes/kubernetes/pull/77167), [@paulsubrata55](https://github.com/paulsubrata55)) +- Increase log level for graceful termination to `v=5` ([#80100](https://github.com/kubernetes/kubernetes/pull/80100), [@andrewsykim](https://github.com/andrewsykim)) +- Reduce kube-proxy CPU usage in IPVS mode when a large number of nodePort services exist. ([#79444](https://github.com/kubernetes/kubernetes/pull/79444), [@cezarsa](https://github.com/cezarsa)) +- Fix in kube-proxy for SCTP nodeport service which only works for node's InternalIP, but doesn't work for other IPs present in the node when ipvs is enabled. ([#81477](https://github.com/kubernetes/kubernetes/pull/81477), [@paulsubrata55](https://github.com/paulsubrata55)) +- Ensure the `KUBE-MARK-DROP` chain in kube-proxy IPVS mode. The chain is ensured for both IPv4 and IPv6 in dual-stack operation. ([#82214](https://github.com/kubernetes/kubernetes/pull/82214), [@uablrek](https://github.com/uablrek)) +- Introduce `node.kubernetes.io/exclude-balancer` and `node.kubernetes.io/exclude-disruption` labels in alpha to prevent cluster deployers from being dependent on the optional `node-role` labels which not all clusters may provide. ([#80238](https://github.com/kubernetes/kubernetes/pull/80238), [@smarterclayton](https://github.com/smarterclayton)) +- If targetPort is changed that will process by service controller ([#77712](https://github.com/kubernetes/kubernetes/pull/77712), [@Sn0rt](https://github.com/Sn0rt)) + +### Node + +- Remove PIDs cgroup controller requirement when related feature gates are disabled + ([#79073](https://github.com/kubernetes/kubernetes/pull/79073), [@rafatio](https://github.com/rafatio)) +- Fix kubelet NodeLease potential performance issues. Kubelet now will try to update lease using cached one instead of get from API Server every time. ([#81174](https://github.com/kubernetes/kubernetes/pull/81174), [@answer1991](https://github.com/answer1991)) +- Passing an invalid policy name in the `--cpu-manager-policy` flag will now cause the kubelet to fail instead of simply ignoring the flag and running the `cpumanager`’s default policy instead. ([#80294](https://github.com/kubernetes/kubernetes/pull/80294), [@klueska](https://github.com/klueska)) +- Make node lease renew interval more heuristic based on node-status-update-frequency in kubelet ([#80173](https://github.com/kubernetes/kubernetes/pull/80173), [@gaorong](https://github.com/gaorong)) +- Kubelet should now more reliably report the same primary node IP even if the set of node IPs reported by the CloudProvider changes. ([#79391](https://github.com/kubernetes/kubernetes/pull/79391), [@danwinship](https://github.com/danwinship)) +- Omit `nil` or empty field when calculating container hash value to avoid hash changed. For a new field with a non-nil default value in the container spec, the hash would still get changed. ([#57741](https://github.com/kubernetes/kubernetes/pull/57741), [@dixudx](https://github.com/dixudx)) +- Fix a bug where kubelet would not retry pod sandbox creation when the restart policy of the pod is Never ([#79451](https://github.com/kubernetes/kubernetes/pull/79451), [@yujuhong](https://github.com/yujuhong)) +- Limit the body length of exec readiness/liveness probes. remote CRIs and Docker shim read a max of 16MB output of which the exec probe itself inspects 10kb. ([#82514](https://github.com/kubernetes/kubernetes/pull/82514), [@dims](https://github.com/dims)) +- Single static pod files and pod files from http endpoints cannot be larger than 10 MB. HTTP probe payloads are now truncated to 10KB. ([#82669](https://github.com/kubernetes/kubernetes/pull/82669), [@rphillips](https://github.com/rphillips)) +- Introduce support for applying pod overhead to pod cgroups, if the PodOverhead feature is enabled. ([#79247](https://github.com/kubernetes/kubernetes/pull/79247), [@egernst](https://github.com/egernst)) +- Node-Problem-Detector v0.7.1 is used on GCI ([#80726](https://github.com/kubernetes/kubernetes/pull/80726), [@wangzhen127](https://github.com/wangzhen127)) +- Node-Problem-Detector v0.7.1 is used for addon daemonset. ([#82140](https://github.com/kubernetes/kubernetes/pull/82140), [@wangzhen127](https://github.com/wangzhen127)) +- Enable cAdvisor ProcessMetrics collecting. ([#79002](https://github.com/kubernetes/kubernetes/pull/79002), [@jiayingz](https://github.com/jiayingz)) +- kubelet: change `node-lease-renew-interval` to 0.25 of lease-renew-duration ([#80429](https://github.com/kubernetes/kubernetes/pull/80429), [@gaorong](https://github.com/gaorong)) +- Attempt to set the kubelet's hostname & internal IP if `--cloud-provider=external` and no node addresses exists ([#75229](https://github.com/kubernetes/kubernetes/pull/75229), [@andrewsykim](https://github.com/andrewsykim)) + +### Scheduling + +- Scheduler should terminate when it loses leader lock. ([#81306](https://github.com/kubernetes/kubernetes/pull/81306), [@ravisantoshgudimetla](https://github.com/ravisantoshgudimetla)) +- If scheduler extender filtered a not found node, current scheduling round for this pod will just be skipped. + ([#79641](https://github.com/kubernetes/kubernetes/pull/79641), [@yqwang-ms](https://github.com/yqwang-ms)) +- Extender bind should respect IsInterested ([#79804](https://github.com/kubernetes/kubernetes/pull/79804), [@yqwang-ms](https://github.com/yqwang-ms)) +- Fix an issue with toleration merging & whitelist checking in the PodTolerationRestriction admission controller. ([#81732](https://github.com/kubernetes/kubernetes/pull/81732), [@tallclair](https://github.com/tallclair)) +- Add a helper function to decode scheduler plugin args. ([#80696](https://github.com/kubernetes/kubernetes/pull/80696), [@hex108](https://github.com/hex108)) +- Fix filter plugins are not been called during preemption ([#81876](https://github.com/kubernetes/kubernetes/pull/81876), [@wgliang](https://github.com/wgliang)) +- Fix an issue that the correct PluginConfig.Args is not passed to the corresponding PluginFactory in kube-scheduler when multiple PluginConfig items are defined. ([#82483](https://github.com/kubernetes/kubernetes/pull/82483), [@everpeace](https://github.com/everpeace)) +- Take the context as the first argument of Schedule. ([#82119](https://github.com/kubernetes/kubernetes/pull/82119), [@wgliang](https://github.com/wgliang)) +- Implement `post-filter` extension point for scheduling framework ([#78097](https://github.com/kubernetes/kubernetes/pull/78097), [@draveness](https://github.com/draveness)) +- Add Bind extension point of the scheduling framework ([#78513](https://github.com/kubernetes/kubernetes/pull/78513), [@chenchun](https://github.com/chenchun)) +- Add Filter extension point to the scheduling framework. ([#78477](https://github.com/kubernetes/kubernetes/pull/78477), [@YoubingLi](https://github.com/YoubingLi)) +- Return error when the scoring plugin returns score out of range `[0, 100]`. ([#81015](https://github.com/kubernetes/kubernetes/pull/81015), [@draveness](https://github.com/draveness)) +- Use a named array instead of a score array in normalizing-score phase. ([#80901](https://github.com/kubernetes/kubernetes/pull/80901), [@draveness](https://github.com/draveness)) +- Updates the `requestedToCapacityRatioArguments` to add resources parameter that allows the users to specify the resource name along with weights for each resource to score nodes based on the request to capacity ratio. ([#77688](https://github.com/kubernetes/kubernetes/pull/77688), [@sudeshsh](https://github.com/sudeshsh)) +- Add `UnschedulableAndUnresolvable` status code for scheduling framework ([#82034](https://github.com/kubernetes/kubernetes/pull/82034), [@alculquicondor](https://github.com/alculquicondor)) +- Add normalize plugin extension point for the scheduling framework. + ([#80383](https://github.com/kubernetes/kubernetes/pull/80383), [@liu-cong](https://github.com/liu-cong)) +- Add Bind extension point to the scheduling framework. ([#79313](https://github.com/kubernetes/kubernetes/pull/79313), [@chenchun](https://github.com/chenchun)) +- Add Score extension point to the scheduling framework. ([#79109](https://github.com/kubernetes/kubernetes/pull/79109), [@ahg-g](https://github.com/ahg-g)) +- Add Pre-filter extension point to the scheduling framework. ([#78005](https://github.com/kubernetes/kubernetes/pull/78005), [@ahg-g](https://github.com/ahg-g)) +- Add support for writing out of tree custom scheduler plugins. ([#78162](https://github.com/kubernetes/kubernetes/pull/78162), [@hex108](https://github.com/hex108)) + +### Storage + +- Fix possible file descriptor leak and closing of dirs in `doSafeMakeDir` ([#79534](https://github.com/kubernetes/kubernetes/pull/79534), [@odinuge](https://github.com/odinuge)) +- Azure disks of shared kind will no longer fail if they do not contain `skuname` or `storageaccounttype`. ([#80837](https://github.com/kubernetes/kubernetes/pull/80837), [@rmweir](https://github.com/rmweir)) +- Fix CSI plugin supporting raw block that does not need attach mounted failed ([#79920](https://github.com/kubernetes/kubernetes/pull/79920), [@cwdsuzhou](https://github.com/cwdsuzhou)) +- Reduces GCE PD Node Attach Limits by 1 since the node boot disk is considered an attachable disk ([#80923](https://github.com/kubernetes/kubernetes/pull/80923), [@davidz627](https://github.com/davidz627)) +- Remove iSCSI volume storage cleartext secrets in logs ([#81215](https://github.com/kubernetes/kubernetes/pull/81215), [@zouyee](https://github.com/zouyee)) +- Fixes validation of VolumeAttachment API objects created with inline volume sources. ([#80945](https://github.com/kubernetes/kubernetes/pull/80945), [@tedyu](https://github.com/tedyu)) +- Changes timeout value in csi plugin from 15s to 2min which fixes the timeout issue ([#79529](https://github.com/kubernetes/kubernetes/pull/79529), [@andyzhangx](https://github.com/andyzhangx)) +- Fix kubelet fail to delete orphaned pod directory when the kubelet's pods directory (default is `/var/lib/kubelet/pods`) symbolically links to another disk device's directory ([#79094](https://github.com/kubernetes/kubernetes/pull/79094), [@gaorong](https://github.com/gaorong)) + +## Testing + +- Fix pod list return value of `framework.WaitForPodsWithLabelRunningReady` ([#78687](https://github.com/kubernetes/kubernetes/pull/78687), [@pohly](https://github.com/pohly)) +- Adding `TerminationGracePeriodSeconds` to the test framework API ([#82170](https://github.com/kubernetes/kubernetes/pull/82170), [@vivekbagade](https://github.com/vivekbagade)) +- `/test/e2e/framework`: Adds a flag `non-blocking-taints` which allows tests to run in environments with tainted nodes. String value should be a comma-separated list. ([#81043](https://github.com/kubernetes/kubernetes/pull/81043), [@johnSchnake](https://github.com/johnSchnake)) +- Move CSI volume expansion to beta. ([#81467](https://github.com/kubernetes/kubernetes/pull/81467), [@bertinatto](https://github.com/bertinatto)) +- Added E2E tests validating WindowsOptions.RunAsUserName. ([#79539](https://github.com/kubernetes/kubernetes/pull/79539), [@bclau](https://github.com/bclau)) +- `framework.ExpectNoError` no longer logs the error and instead relies on using the new `log.Fail` as gomega fail handler. ([#80253](https://github.com/kubernetes/kubernetes/pull/80253), [@pohly](https://github.com/pohly)) + +### Windows + +- On Windows systems, `%USERPROFILE%` is now preferred over `%HOMEDRIVE%\%HOMEPATH%` as the home folder if `%HOMEDRIVE%\%HOMEPATH%` does not contain a `.kube\config` file, and `%USERPROFILE%` exists and is writable. ([#73923](https://github.com/kubernetes/kubernetes/pull/73923), [@liggitt](https://github.com/liggitt)) +- Add support for AWS EBS on windows ([#79552](https://github.com/kubernetes/kubernetes/pull/79552), [@wongma7](https://github.com/wongma7)) +- Support Kubelet plugin watcher on Windows nodes. ([#81397](https://github.com/kubernetes/kubernetes/pull/81397), [@ddebroy](https://github.com/ddebroy)) + +## Dependencies + +### Changed + +- the default Go version was updated to v1.12.9. ([#78958](https://github.com/kubernetes/kubernetes/pull/78958), [#79966](https://github.com/kubernetes/kubernetes/pull/79966), [#81390](https://github.com/kubernetes/kubernetes/pull/81390), [#81489](https://github.com/kubernetes/kubernetes/pull/81489)) +- etcd has been updated to v3.3.15 ([#82199](https://github.com/kubernetes/kubernetes/pull/82199), [@dims](https://github.com/dims)) +- CoreDNS for kubeadm and kube-up has been updated to v1.6.2 ([#82127](https://github.com/kubernetes/kubernetes/pull/82127)) +- Cluster Autoscaler has been updated to v1.16.0 ([#82501](https://github.com/kubernetes/kubernetes/pull/82501), [@losipiuk](https://github.com/losipiuk)) +- fluentd has been updated to v1.5.1 ([#79014](https://github.com/kubernetes/kubernetes/pull/79014)) +- fluentd-elasticsearch plugin has been updated to v3.5.3 ([#79014](https://github.com/kubernetes/kubernetes/pull/79014)) +- elasticsearch has been updated to v7.1.1 ([#79014](https://github.com/kubernetes/kubernetes/pull/79014)) +- kibana has been updated to v7.1.1 ([#79014](https://github.com/kubernetes/kubernetes/pull/79014)) +- Azure SDK and go-autorest API versions have been updated ([#79574](https://github.com/kubernetes/kubernetes/pull/79574)) +- Azure API versions have been updated (container registry to 2018-09-01, network to 2018-08-01) ([#79583](https://github.com/kubernetes/kubernetes/pull/79583)) +- kube-addon-manager has been updated to v9.0.2 ([#80861](https://github.com/kubernetes/kubernetes/pull/80861)) +- golang/x/net has been updated to bring in fixes for CVE-2019-9512, CVE-2019-9514 ([#81394](https://github.com/kubernetes/kubernetes/pull/81394)) +- GCE windows node image has been updated. ([#81106](https://github.com/kubernetes/kubernetes/pull/81106)) +- portworx plugin has been updated on libopenstorage/openstorage to v1.0.0 ([#80495](https://github.com/kubernetes/kubernetes/pull/80495)) +- metrics-server has been updated to v0.3.4 ([#82322](https://github.com/kubernetes/kubernetes/pull/82322), [@olagacek](https://github.com/olagacek)) +- klog has been updated to v0.4.0 ([#81164](https://github.com/kubernetes/kubernetes/pull/81164)) + +### Unchanged + +- The list of validated docker versions remains unchanged. + - The current list is 1.13.1, 17.03, 17.06, 17.09, 18.06, 18.09. ([#72823](https://github.com/kubernetes/kubernetes/pull/72823), [#72831](https://github.com/kubernetes/kubernetes/pull/72831)) +- CNI remains unchanged at v0.7.5. ([#75455](https://github.com/kubernetes/kubernetes/pull/75455)) +- cri-tools remains unchanged at v1.14.0. ([#75658](https://github.com/kubernetes/kubernetes/pull/75658)) +- CAdvisor remains unchanged at v0.33.2. ([#76291](https://github.com/kubernetes/kubernetes/pull/76291)) +- event-exporter remains unchanged at v0.2.5. ([#77815](https://github.com/kubernetes/kubernetes/pull/77815)) +- ip-masq-agent remains unchanged at v2.4.1. ([#77844](https://github.com/kubernetes/kubernetes/pull/77844)) +- k8s-dns-node-cache remains unchanged at v1.15.1 ([#76640](https://github.com/kubernetes/kubernetes/pull/76640), [@george-angel](https://github.com/george-angel)) +- CSI remains unchanged at to v1.1.0. ([#75391](https://github.com/kubernetes/kubernetes/pull/75391)) +- The dashboard add-on remains unchanged at v1.10.1. ([#72495](https://github.com/kubernetes/kubernetes/pull/72495)) +- kube-dns is unchanged at v1.14.13 as of Kubernetes 1.12. ([#68900](https://github.com/kubernetes/kubernetes/pull/68900)) +- Influxdb is unchanged at v1.3.3 as of Kubernetes 1.10. ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- Grafana is unchanged at v4.4.3 as of Kubernetes 1.10. ([#53319](https://github.com/kubernetes/kubernetes/pull/53319)) +- The fluent-plugin-kubernetes_metadata_filter plugin in fluentd-elasticsearch is unchanged at v2.1.6. ([#71180](https://github.com/kubernetes/kubernetes/pull/71180)) +- fluentd-gcp is unchanged at v3.2.0 as of Kubernetes 1.13. ([#70954](https://github.com/kubernetes/kubernetes/pull/70954)) +- OIDC authentication is unchanged at coreos/go-oidc v2 as of Kubernetes 1.10. ([#58544](https://github.com/kubernetes/kubernetes/pull/58544)) +- Calico is unchanged at v3.3.1 as of Kubernetes 1.13. ([#70932](https://github.com/kubernetes/kubernetes/pull/70932)) +- GLBC remains unchanged at v1.2.3 as of Kubernetes 1.12. ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) +- Ingress-gce remains unchanged at v1.2.3 as of Kubernetes 1.12. ([#66793](https://github.com/kubernetes/kubernetes/pull/66793)) + +### Removed + +- Remove deprecated github.com/kardianos/osext dependency ([#80142](https://github.com/kubernetes/kubernetes/pull/80142)) + +### Detailed go Dependency Changes + +#### Added + +- github.com/Azure/go-autorest/autorest/adal: [v0.5.0](https://github.com/Azure/go-autorest/autorest/adal/tree/v0.5.0) +- github.com/Azure/go-autorest/autorest/date: [v0.1.0](https://github.com/Azure/go-autorest/autorest/date/tree/v0.1.0) +- github.com/Azure/go-autorest/autorest/mocks: [v0.2.0](https://github.com/Azure/go-autorest/autorest/mocks/tree/v0.2.0) +- github.com/Azure/go-autorest/autorest/to: [v0.2.0](https://github.com/Azure/go-autorest/autorest/to/tree/v0.2.0) +- github.com/Azure/go-autorest/autorest/validation: [v0.1.0](https://github.com/Azure/go-autorest/autorest/validation/tree/v0.1.0) +- github.com/Azure/go-autorest/autorest: [v0.9.0](https://github.com/Azure/go-autorest/autorest/tree/v0.9.0) +- github.com/Azure/go-autorest/logger: [v0.1.0](https://github.com/Azure/go-autorest/logger/tree/v0.1.0) +- github.com/Azure/go-autorest/tracing: [v0.5.0](https://github.com/Azure/go-autorest/tracing/tree/v0.5.0) +- github.com/armon/consul-api: [eb2c6b5](https://github.com/armon/consul-api/tree/eb2c6b5) +- github.com/bifurcation/mint: [93c51c6](https://github.com/bifurcation/mint/tree/93c51c6) +- github.com/caddyserver/caddy: [v1.0.3](https://github.com/caddyserver/caddy/tree/v1.0.3) +- github.com/cenkalti/backoff: [v2.1.1+incompatible](https://github.com/cenkalti/backoff/tree/v2.1.1) +- github.com/checkpoint-restore/go-criu: [bdb7599](https://github.com/checkpoint-restore/go-criu/tree/bdb7599) +- github.com/cheekybits/genny: [9127e81](https://github.com/cheekybits/genny/tree/9127e81) +- github.com/coredns/corefile-migration: [v1.0.2](https://github.com/coredns/corefile-migration/tree/v1.0.2) +- github.com/coreos/go-etcd: [v2.0.0+incompatible](https://github.com/coreos/go-etcd/tree/v2.0.0) +- github.com/dustin/go-humanize: [v1.0.0](https://github.com/dustin/go-humanize/tree/v1.0.0) +- github.com/fatih/color: [v1.6.0](https://github.com/fatih/color/tree/v1.6.0) +- github.com/flynn/go-shlex: [3f9db97](https://github.com/flynn/go-shlex/tree/3f9db97) +- github.com/go-acme/lego: [v2.5.0+incompatible](https://github.com/go-acme/lego/tree/v2.5.0) +- github.com/go-bindata/go-bindata: [v3.1.1+incompatible](https://github.com/go-bindata/go-bindata/tree/v3.1.1) +- github.com/go-logr/logr: [v0.1.0](https://github.com/go-logr/logr/tree/v0.1.0) +- github.com/google/martian: [v2.1.0+incompatible](https://github.com/google/martian/tree/v2.1.0) +- github.com/google/pprof: [3ea8567](https://github.com/google/pprof/tree/3ea8567) +- github.com/google/renameio: [v0.1.0](https://github.com/google/renameio/tree/v0.1.0) +- github.com/googleapis/gax-go/v2: [v2.0.4](https://github.com/googleapis/gax-go/v2/tree/v2.0.4) +- github.com/hashicorp/go-syslog: [v1.0.0](https://github.com/hashicorp/go-syslog/tree/v1.0.0) +- github.com/jimstudt/http-authentication: [3eca13d](https://github.com/jimstudt/http-authentication/tree/3eca13d) +- github.com/kisielk/errcheck: [v1.2.0](https://github.com/kisielk/errcheck/tree/v1.2.0) +- github.com/kisielk/gotool: [v1.0.0](https://github.com/kisielk/gotool/tree/v1.0.0) +- github.com/klauspost/cpuid: [v1.2.0](https://github.com/klauspost/cpuid/tree/v1.2.0) +- github.com/kr/pty: [v1.1.5](https://github.com/kr/pty/tree/v1.1.5) +- github.com/kylelemons/godebug: [d65d576](https://github.com/kylelemons/godebug/tree/d65d576) +- github.com/lucas-clemente/aes12: [cd47fb3](https://github.com/lucas-clemente/aes12/tree/cd47fb3) +- github.com/lucas-clemente/quic-clients: [v0.1.0](https://github.com/lucas-clemente/quic-clients/tree/v0.1.0) +- github.com/lucas-clemente/quic-go-certificates: [d2f8652](https://github.com/lucas-clemente/quic-go-certificates/tree/d2f8652) +- github.com/lucas-clemente/quic-go: [v0.10.2](https://github.com/lucas-clemente/quic-go/tree/v0.10.2) +- github.com/marten-seemann/qtls: [v0.2.3](https://github.com/marten-seemann/qtls/tree/v0.2.3) +- github.com/mattn/go-colorable: [v0.0.9](https://github.com/mattn/go-colorable/tree/v0.0.9) +- github.com/mattn/go-isatty: [v0.0.3](https://github.com/mattn/go-isatty/tree/v0.0.3) +- github.com/mholt/certmagic: [6a42ef9](https://github.com/mholt/certmagic/tree/6a42ef9) +- github.com/mitchellh/go-homedir: [v1.1.0](https://github.com/mitchellh/go-homedir/tree/v1.1.0) +- github.com/naoina/go-stringutil: [v0.1.0](https://github.com/naoina/go-stringutil/tree/v0.1.0) +- github.com/naoina/toml: [v0.1.1](https://github.com/naoina/toml/tree/v0.1.1) +- github.com/rogpeppe/go-internal: [v1.3.0](https://github.com/rogpeppe/go-internal/tree/v1.3.0) +- github.com/thecodeteam/goscaleio: [v0.1.0](https://github.com/thecodeteam/goscaleio/tree/v0.1.0) +- github.com/ugorji/go/codec: [d75b2dc](https://github.com/ugorji/go/codec/tree/d75b2dc) +- github.com/xordataexchange/crypt: [b2862e3](https://github.com/xordataexchange/crypt/tree/b2862e3) +- go.opencensus.io: v0.21.0 +- golang.org/x/mod: 4bf6d31 +- gopkg.in/airbrake/gobrake.v2: v2.0.9 +- gopkg.in/errgo.v2: v2.1.0 +- gopkg.in/gemnasium/logrus-airbrake-hook.v2: v2.1.2 +- gopkg.in/mcuadros/go-syslog.v2: v2.2.1 +- gotest.tools/gotestsum: v0.3.5 +- honnef.co/go/tools: v0.0.1-2019.2.2 + +#### Changed + +- cloud.google.com/go: v0.34.0 → v0.38.0 +- github.com/Azure/azure-sdk-for-go: [v21.4.0+incompatible → v32.5.0+incompatible](https://github.com/Azure/azure-sdk-for-go/compare/v21.4.0...v32.5.0) +- github.com/BurntSushi/toml: [v0.3.0 → v0.3.1](https://github.com/BurntSushi/toml/compare/v0.3.0...v0.3.1) +- github.com/GoogleCloudPlatform/k8s-cloud-provider: [f8e9959 → 27a4ced](https://github.com/GoogleCloudPlatform/k8s-cloud-provider/compare/f8e9959...27a4ced) +- github.com/PuerkitoBio/purell: [v1.1.0 → v1.1.1](https://github.com/PuerkitoBio/purell/compare/v1.1.0...v1.1.1) +- github.com/asaskevich/govalidator: [f9ffefc → f61b66f](https://github.com/asaskevich/govalidator/compare/f9ffefc...f61b66f) +- github.com/client9/misspell: [9ce5d97 → v0.3.4](https://github.com/client9/misspell/compare/9ce5d97...v0.3.4) +- github.com/containernetworking/cni: [v0.6.0 → v0.7.1](https://github.com/containernetworking/cni/compare/v0.6.0...v0.7.1) +- github.com/coreos/etcd: [v3.3.13+incompatible → v3.3.15+incompatible](https://github.com/coreos/etcd/compare/v3.3.13...v3.3.15) +- github.com/coreos/go-oidc: [065b426 → v2.1.0+incompatible](https://github.com/coreos/go-oidc/compare/065b426...v2.1.0) +- github.com/coreos/go-semver: [e214231 → v0.3.0](https://github.com/coreos/go-semver/compare/e214231...v0.3.0) +- github.com/cpuguy83/go-md2man: [v1.0.4 → v1.0.10](https://github.com/cpuguy83/go-md2man/compare/v1.0.4...v1.0.10) +- github.com/cyphar/filepath-securejoin: [ae69057 → v0.2.2](https://github.com/cyphar/filepath-securejoin/compare/ae69057...v0.2.2) +- github.com/dgrijalva/jwt-go: [01aeca5 → v3.2.0+incompatible](https://github.com/dgrijalva/jwt-go/compare/01aeca5...v3.2.0) +- github.com/docker/distribution: [edc3ab2 → v2.7.1+incompatible](https://github.com/docker/distribution/compare/edc3ab2...v2.7.1) +- github.com/emicklei/go-restful: [ff4f55a → v2.9.5+incompatible](https://github.com/emicklei/go-restful/compare/ff4f55a...v2.9.5) +- github.com/evanphx/json-patch: [5858425 → v4.2.0+incompatible](https://github.com/evanphx/json-patch/compare/5858425...v4.2.0) +- github.com/fatih/camelcase: [f6a740d → v1.0.0](https://github.com/fatih/camelcase/compare/f6a740d...v1.0.0) +- github.com/go-openapi/analysis: [v0.17.2 → v0.19.2](https://github.com/go-openapi/analysis/compare/v0.17.2...v0.19.2) +- github.com/go-openapi/errors: [v0.17.2 → v0.19.2](https://github.com/go-openapi/errors/compare/v0.17.2...v0.19.2) +- github.com/go-openapi/jsonpointer: [v0.19.0 → v0.19.2](https://github.com/go-openapi/jsonpointer/compare/v0.19.0...v0.19.2) +- github.com/go-openapi/jsonreference: [v0.19.0 → v0.19.2](https://github.com/go-openapi/jsonreference/compare/v0.19.0...v0.19.2) +- github.com/go-openapi/loads: [v0.17.2 → v0.19.2](https://github.com/go-openapi/loads/compare/v0.17.2...v0.19.2) +- github.com/go-openapi/runtime: [v0.17.2 → v0.19.0](https://github.com/go-openapi/runtime/compare/v0.17.2...v0.19.0) +- github.com/go-openapi/spec: [v0.17.2 → v0.19.2](https://github.com/go-openapi/spec/compare/v0.17.2...v0.19.2) +- github.com/go-openapi/strfmt: [v0.17.0 → v0.19.0](https://github.com/go-openapi/strfmt/compare/v0.17.0...v0.19.0) +- github.com/go-openapi/swag: [v0.17.2 → v0.19.2](https://github.com/go-openapi/swag/compare/v0.17.2...v0.19.2) +- github.com/go-openapi/validate: [v0.18.0 → v0.19.2](https://github.com/go-openapi/validate/compare/v0.18.0...v0.19.2) +- github.com/godbus/dbus: [c7fdd8b → v4.1.0+incompatible](https://github.com/godbus/dbus/compare/c7fdd8b...v4.1.0) +- github.com/gogo/protobuf: [342cbe0 → 65acae2](https://github.com/gogo/protobuf/compare/342cbe0...65acae2) +- github.com/golang/mock: [bd3c8e8 → v1.2.0](https://github.com/golang/mock/compare/bd3c8e8...v1.2.0) +- github.com/golang/protobuf: [v1.2.0 → v1.3.1](https://github.com/golang/protobuf/compare/v1.2.0...v1.3.1) +- github.com/google/btree: [7d79101 → 4030bb1](https://github.com/google/btree/compare/7d79101...4030bb1) +- github.com/google/cadvisor: [9db8c7d → v0.34.0](https://github.com/google/cadvisor/compare/9db8c7d...v0.34.0) +- github.com/google/gofuzz: [24818f7 → v1.0.0](https://github.com/google/gofuzz/compare/24818f7...v1.0.0) +- github.com/google/uuid: [v1.0.0 → v1.1.1](https://github.com/google/uuid/compare/v1.0.0...v1.1.1) +- github.com/gophercloud/gophercloud: [c818fa6 → v0.1.0](https://github.com/gophercloud/gophercloud/compare/c818fa6...v0.1.0) +- github.com/gorilla/websocket: [4201258 → v1.4.0](https://github.com/gorilla/websocket/compare/4201258...v1.4.0) +- github.com/grpc-ecosystem/go-grpc-prometheus: [2500245 → v1.2.0](https://github.com/grpc-ecosystem/go-grpc-prometheus/compare/2500245...v1.2.0) +- github.com/hashicorp/golang-lru: [v0.5.0 → v0.5.1](https://github.com/hashicorp/golang-lru/compare/v0.5.0...v0.5.1) +- github.com/hashicorp/hcl: [d8c773c → v1.0.0](https://github.com/hashicorp/hcl/compare/d8c773c...v1.0.0) +- github.com/heketi/heketi: [558b292 → v9.0.0+incompatible](https://github.com/heketi/heketi/compare/558b292...v9.0.0) +- github.com/jonboulle/clockwork: [72f9bd7 → v0.1.0](https://github.com/jonboulle/clockwork/compare/72f9bd7...v0.1.0) +- github.com/json-iterator/go: [ab8a2e0 → v1.1.7](https://github.com/json-iterator/go/compare/ab8a2e0...v1.1.7) +- github.com/kr/pretty: [f31442d → v0.1.0](https://github.com/kr/pretty/compare/f31442d...v0.1.0) +- github.com/kr/text: [6807e77 → v0.1.0](https://github.com/kr/text/compare/6807e77...v0.1.0) +- github.com/libopenstorage/openstorage: [093a0c3 → v1.0.0](https://github.com/libopenstorage/openstorage/compare/093a0c3...v1.0.0) +- github.com/magiconair/properties: [61b492c → v1.8.1](https://github.com/magiconair/properties/compare/61b492c...v1.8.1) +- github.com/mailru/easyjson: [60711f1 → 94de47d](https://github.com/mailru/easyjson/compare/60711f1...94de47d) +- github.com/mattn/go-shellwords: [f8471b0 → v1.0.5](https://github.com/mattn/go-shellwords/compare/f8471b0...v1.0.5) +- github.com/miekg/dns: [5d001d0 → v1.1.4](https://github.com/miekg/dns/compare/5d001d0...v1.1.4) +- github.com/mistifyio/go-zfs: [1b4ae6f → v2.1.1+incompatible](https://github.com/mistifyio/go-zfs/compare/1b4ae6f...v2.1.1) +- github.com/mitchellh/go-wordwrap: [ad45545 → v1.0.0](https://github.com/mitchellh/go-wordwrap/compare/ad45545...v1.0.0) +- github.com/mvdan/xurls: [1b768d7 → v1.1.0](https://github.com/mvdan/xurls/compare/1b768d7...v1.1.0) +- github.com/onsi/ginkgo: [v1.6.0 → v1.8.0](https://github.com/onsi/ginkgo/compare/v1.6.0...v1.8.0) +- github.com/onsi/gomega: [5533ce8 → v1.5.0](https://github.com/onsi/gomega/compare/5533ce8...v1.5.0) +- github.com/opencontainers/go-digest: [a6d0ee4 → v1.0.0-rc1](https://github.com/opencontainers/go-digest/compare/a6d0ee4...v1.0.0-rc1) +- github.com/opencontainers/image-spec: [372ad78 → v1.0.1](https://github.com/opencontainers/image-spec/compare/372ad78...v1.0.1) +- github.com/opencontainers/runc: [f000fe1 → 6cc5158](https://github.com/opencontainers/runc/compare/f000fe1...6cc5158) +- github.com/opencontainers/selinux: [4a2974b → v1.2.2](https://github.com/opencontainers/selinux/compare/4a2974b...v1.2.2) +- github.com/robfig/cron: [df38d32 → v1.1.0](https://github.com/robfig/cron/compare/df38d32...v1.1.0) +- github.com/russross/blackfriday: [300106c → v1.5.2](https://github.com/russross/blackfriday/compare/300106c...v1.5.2) +- github.com/seccomp/libseccomp-golang: [1b506fc → v0.9.1](https://github.com/seccomp/libseccomp-golang/compare/1b506fc...v0.9.1) +- github.com/sirupsen/logrus: [v1.2.0 → v1.4.2](https://github.com/sirupsen/logrus/compare/v1.2.0...v1.4.2) +- github.com/spf13/afero: [b28a7ef → v1.2.2](https://github.com/spf13/afero/compare/b28a7ef...v1.2.2) +- github.com/spf13/cast: [e31f36f → v1.3.0](https://github.com/spf13/cast/compare/e31f36f...v1.3.0) +- github.com/spf13/cobra: [c439c4f → v0.0.5](https://github.com/spf13/cobra/compare/c439c4f...v0.0.5) +- github.com/spf13/jwalterweatherman: [33c24e7 → v1.1.0](https://github.com/spf13/jwalterweatherman/compare/33c24e7...v1.1.0) +- github.com/spf13/pflag: [v1.0.1 → v1.0.3](https://github.com/spf13/pflag/compare/v1.0.1...v1.0.3) +- github.com/spf13/viper: [7fb2782 → v1.3.2](https://github.com/spf13/viper/compare/7fb2782...v1.3.2) +- github.com/stretchr/objx: [v0.1.1 → v0.2.0](https://github.com/stretchr/objx/compare/v0.1.1...v0.2.0) +- github.com/stretchr/testify: [v1.2.2 → v1.3.0](https://github.com/stretchr/testify/compare/v1.2.2...v1.3.0) +- golang.org/x/net: 65e2d4e → cdfb69a +- golang.org/x/tools: aa82965 → 6e04913 +- google.golang.org/api: 583d854 → 5213b80 +- google.golang.org/genproto: 09f6ed2 → 54afdca +- google.golang.org/grpc: v1.13.0 → v1.23.0 +- gopkg.in/check.v1: 20d25e2 → 788fd78 +- gopkg.in/natefinch/lumberjack.v2: 20b71e5 → v2.0.0 +- gopkg.in/square/go-jose.v2: 89060de → v2.2.2 +- gopkg.in/yaml.v2: v2.2.1 → v2.2.2 +- k8s.io/gengo: f8a0810 → 26a6646 +- k8s.io/klog: v0.3.1 → v0.4.0 +- k8s.io/kube-openapi: b3a7cee → 743ec37 +- k8s.io/utils: c2654d5 → 581e001 +- sigs.k8s.io/structured-merge-diff: e85c7b2 → 6149e45 + +#### Removed + +- github.com/Azure/go-autorest: [v11.1.2+incompatible](https://github.com/Azure/go-autorest/tree/v11.1.2) +- github.com/codedellemc/goscaleio: [20e2ce2](https://github.com/codedellemc/goscaleio/tree/20e2ce2) +- github.com/d2g/dhcp4: [a1d1b6c](https://github.com/d2g/dhcp4/tree/a1d1b6c) +- github.com/d2g/dhcp4client: [6e570ed](https://github.com/d2g/dhcp4client/tree/6e570ed) +- github.com/jteeuwen/go-bindata: [a0ff256](https://github.com/jteeuwen/go-bindata/tree/a0ff256) +- github.com/kardianos/osext: [8fef92e](https://github.com/kardianos/osext/tree/8fef92e) +- github.com/kr/fs: [2788f0d](https://github.com/kr/fs/tree/2788f0d) +- github.com/marstr/guid: [8bdf7d1](https://github.com/marstr/guid/tree/8bdf7d1) +- github.com/mholt/caddy: [2de4950](https://github.com/mholt/caddy/tree/2de4950) +- github.com/natefinch/lumberjack: [v2.0.0+incompatible](https://github.com/natefinch/lumberjack/tree/v2.0.0) +- github.com/pkg/sftp: [4d0e916](https://github.com/pkg/sftp/tree/4d0e916) +- github.com/shurcooL/sanitized_anchor_name: [10ef21a](https://github.com/shurcooL/sanitized_anchor_name/tree/10ef21a) +- github.com/sigma/go-inotify: [c87b6cf](https://github.com/sigma/go-inotify/tree/c87b6cf) +- github.com/vmware/photon-controller-go-sdk: [4a435da](https://github.com/vmware/photon-controller-go-sdk/tree/4a435da) +- github.com/xanzy/go-cloudstack: [1e2cbf6](https://github.com/xanzy/go-cloudstack/tree/1e2cbf6) +- gopkg.in/yaml.v1: 9f9df34 +- [v1.16.0-rc.2](#v1160-rc2) +- [v1.16.0-rc.1](#v1160-rc1) +- [v1.16.0-beta.2](#v1160-beta2) +- [v1.16.0-beta.1](#v1160-beta1) +- [v1.16.0-alpha.3](#v1160-alpha3) +- [v1.16.0-alpha.2](#v1160-alpha2) +- [v1.16.0-alpha.1](#v1160-alpha1) + + + +# v1.16.0-rc.2 + +[Documentation](https://docs.k8s.io) + +## Downloads for v1.16.0-rc.2 + + +filename | sha512 hash +-------- | ----------- +[kubernetes.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes.tar.gz) | `68837f83bcf380e22b50f145fb64404584e96e5714a6c0cbc1ba76e290dc267f6b53194e2b51f19c1145ae7c3e5874124d35ff430cda15f67b0f9c954803389c` +[kubernetes-src.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-src.tar.gz) | `922552ed60d425fa6d126ffb34db6a7f123e1b9104e751edaed57b4992826620383446e6cf4f8a9fd55aac72f95a69b45e53274a41aaa838c2c2ae15ff4ddad2` + +### Client Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-client-darwin-386.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-darwin-386.tar.gz) | `d0df8f57f4d9c2822badc507345f82f87d0e8e49c79ca907a0e4e4dd634db964b84572f88b8ae7eaf50a20965378d464e0d1e7f588e84e926edfb741b859e7d2` +[kubernetes-client-darwin-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-darwin-amd64.tar.gz) | `0bc7daaf1165189b57dcdbe59f402731830b6f4db53b853350056822602579d52fe43ce5ac6b7d4b6d89d81036ae94eab6b7167e78011a96792acfbf6892fa39` +[kubernetes-client-linux-386.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-386.tar.gz) | `7735c607bb99b47924140a6a3e794912b2b97b6b54024af1de5db6765b8cc518cba6b145c25dc67c8d8f827805d9a61f676b4ae67b8ef86cfda2fe76de822c6a` +[kubernetes-client-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-amd64.tar.gz) | `d35f70cea4780a80c24588bc760c38c138d73e5f80f9fe89d952075c24cbf179dd504c2bd7ddb1756c2632ffbcc69a334684710a2d702443043998f66bec4a25` +[kubernetes-client-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-arm.tar.gz) | `e1fc50b6884c42e92649a231db60e35d4e13e58728e4af7f6eca8b0baa719108cdd960db1f1dbd623085610dbccf7f17df733de1faf10ebf6cd1977ecd7f6213` +[kubernetes-client-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-arm64.tar.gz) | `defc25fe403c20ef322b2149be28a5b44c28c7284f11bcf193a07d7f45110ce2bd6227d3a4aa48859aaeb67796809962785651ca9f76121fb9534366b40c4b7d` +[kubernetes-client-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-ppc64le.tar.gz) | `e87b16c948d09ddbc5d6e3fab05ad3c5a58aa7836d4f42c59edab640465531869c92ecdfa2845ec3eecd95b8ccba3dafdd9337f4c313763c6e5105b8740f2dca` +[kubernetes-client-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-linux-s390x.tar.gz) | `2c25a1860fa81cea05a1840d6a200a3a794cc50cfe45a4efec57d7122208b1354e86f698437bbe5c915d6fb70ef9525f844edc0fa63387ab8c1586a6b22008a5` +[kubernetes-client-windows-386.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-windows-386.tar.gz) | `267654a7ecfa37c800c1c94ea78343f5466783881cfac62091cfbd8c62489f04bd74a7a39a08253cb51d7ba52c207f56da371f992f61c1468b595c094f0e080f` +[kubernetes-client-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-client-windows-amd64.tar.gz) | `bd4c25b80e54f9fc0c07f64550d020878f899e4e3a28ca57dd532fdbab9ab700d296d2890185591ac27bce6fde336ab90f3102a6797e174d233db76f24f5ac1b` + +### Server Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-server-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-server-linux-amd64.tar.gz) | `13a93bb9bd5599b669af7bd25537ee81cefd6d8c73bedfbac845703c01950c70b2aa39f94f2346d935bc167bae435dbcd6e1758341b634102265657e1b1c1259` +[kubernetes-server-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-server-linux-arm.tar.gz) | `781d127f32d8479bc21beed855ec73e383702e6e982854138adce8edb0ee4d1d4b0c6e723532bc761689d17512c18b1945d05b0e4adb3fe4b98428cce40d52c8` +[kubernetes-server-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-server-linux-arm64.tar.gz) | `6d6dfa49288e4a4ce77ca4f7e83a51c78a2b1844dd95df10cb12fff5a104e750d8e4e117b631448e066487c4c71648e822c87ed83a213f17f27f8c7ecb328ca4` +[kubernetes-server-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-server-linux-ppc64le.tar.gz) | `97804d87ea984167fdbdedcfb38380bd98bb2ef150c1a631c6822905ce5270931a907226d5ddefc8d98d5326610daa79a08964fc4d7e8b438832beb966efd214` +[kubernetes-server-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-server-linux-s390x.tar.gz) | `d45bd651c7f4b6e62ceb661c2ec70afca06a8d1fde1e50bb7783d05401c37823cf21b9f0d3ac87e6b91eeec9d03fc539c3713fd46beff6207e8ebac1bf9d1dd5` + +### Node Binaries + +filename | sha512 hash +-------- | ----------- +[kubernetes-node-linux-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-linux-amd64.tar.gz) | `42c57b59ce43f8961e427d622ee9cfa85cc23468779945262d59aa8cd31afd495c7abaaef7263b9db60ec939ba5e9898ebc3281e8ec81298237123ce4739cbff` +[kubernetes-node-linux-arm.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-linux-arm.tar.gz) | `034a5611909df462ef6408f5ba5ff5ebfb4e1178b2ad06a59097560040c4fcdb163faec48ab4297ca6c21282d7b146f9a5eebd3f2573f7d6d7189d6d29f2cf34` +[kubernetes-node-linux-arm64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-linux-arm64.tar.gz) | `df1493fa2d67b59eaf02096889223bbf0d71797652d3cbd89e8a3106ff6012ea17d25daaa4baf9f26c2e061afb4b69e3e6814ba66e9c4744f04230c922fbc251` +[kubernetes-node-linux-ppc64le.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-linux-ppc64le.tar.gz) | `812a5057bbf832c93f741cc39d04fc0087e36b81b6b123ec5ef02465f7ab145c5152cfc1f7c76032240695c7d7ab71ddb9a2a4f5e1f1a2abb63f32afa3fb6c7c` +[kubernetes-node-linux-s390x.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-linux-s390x.tar.gz) | `2a58a4b201631789d4309ddc665829aedcc05ec4fe6ad6e4d965ef3283a381b8a4980b4b728cfe9a38368dac49921f61ac6938f0208b671afd2327f2013db22a` +[kubernetes-node-windows-amd64.tar.gz](https://dl.k8s.io/v1.16.0-rc.2/kubernetes-node-windows-amd64.tar.gz) | `7fb09e7667715f539766398fc1bbbc4bf17c64913ca09d4e3535dfc4d1ba2bf6f1a3fcc6d81dbf473ba3f10fd29c537ce5debc17268698048ce7b378802a6c46` + +## Changelog since v1.16.0-rc.1 + +### Other notable changes + +* Single static pod files and pod files from http endpoints cannot be larger than 10 MB. HTTP probe payloads are now truncated to 10KB. ([#82669](https://github.com/kubernetes/kubernetes/pull/82669), [@rphillips](https://github.com/rphillips)) +* Restores compatibility with <=1.15.x custom resources by not publishing OpenAPI for non-structural custom resource definitions ([#82653](https://github.com/kubernetes/kubernetes/pull/82653), [@liggitt](https://github.com/liggitt)) +* Fixes regression in logging spurious stack traces when proxied connections are closed by the backend ([#82588](https://github.com/kubernetes/kubernetes/pull/82588), [@liggitt](https://github.com/liggitt)) + + + # v1.16.0-rc.1 [Documentation](https://docs.k8s.io) @@ -776,7 +1691,7 @@ filename | sha512 hash * - http_404_request_total (the number of 404 requests handled) * - http_404_request_duration_ms (the amount of time the server took to respond in ms) * Also includes percentile groupings. The directory for the default 404 handler includes instructions on how to enable prometheus for monitoring and setting alerts. -* The kube-apiserver has improved behavior for both startup and shutdown sequences and also now exposes ` eadyz` for readiness checking. Readyz includes all existing healthz checks but also adds a shutdown check. When a cluster admin initiates a shutdown, the kube-apiserver will try to process existing requests (for the duration of request timeout) before killing the apiserver process. ([#78458](https://github.com/kubernetes/kubernetes/pull/78458), [@logicalhan](https://github.com/logicalhan)) +* The kube-apiserver has improved behavior for both startup and shutdown sequences and also now exposes `/readyz` for readiness checking. Readyz includes all existing healthz checks but also adds a shutdown check. When a cluster admin initiates a shutdown, the kube-apiserver will try to process existing requests (for the duration of request timeout) before killing the apiserver process. ([#78458](https://github.com/kubernetes/kubernetes/pull/78458), [@logicalhan](https://github.com/logicalhan)) * The apiserver also now takes an optional flag "--maximum-startup-sequence-duration". This allows you to explicitly define an upper bound on the apiserver startup sequences before healthz begins to fail. By keeping the kubelet liveness initial delay short, this can enable quick kubelet recovery as soon as we have a boot sequence which has not completed in our expected time frame, despite lack of completion from longer boot sequences (like RBAC). Kube-apiserver behavior when the value of this flag is zero is backwards compatible (this is as the defaulted value of the flag). * fix: make azure disk URI as case insensitive ([#79020](https://github.com/kubernetes/kubernetes/pull/79020), [@andyzhangx](https://github.com/andyzhangx)) * Enable cadvisor ProcessMetrics collecting. ([#79002](https://github.com/kubernetes/kubernetes/pull/79002), [@jiayingz](https://github.com/jiayingz)) diff --git a/vendor/k8s.io/kubernetes/api/api-rules/codegen_violation_exceptions.list b/vendor/k8s.io/kubernetes/api/api-rules/codegen_violation_exceptions.list index 96607faab07b..a20637beff27 100644 --- a/vendor/k8s.io/kubernetes/api/api-rules/codegen_violation_exceptions.list +++ b/vendor/k8s.io/kubernetes/api/api-rules/codegen_violation_exceptions.list @@ -28,6 +28,7 @@ API rule violation: list_type_missing,k8s.io/apimachinery/pkg/runtime,RawExtensi API rule violation: list_type_missing,k8s.io/apimachinery/pkg/runtime,Unknown,Raw API rule violation: list_type_missing,k8s.io/code-generator/_examples/apiserver/apis/example/v1,TestTypeList,Items API rule violation: list_type_missing,k8s.io/code-generator/_examples/apiserver/apis/example2/v1,TestTypeList,Items +API rule violation: list_type_missing,k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1,TestTypeList,Items API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,APIResourceList,APIResources API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,Duration,Duration API rule violation: names_match,k8s.io/apimachinery/pkg/apis/meta/v1,InternalEvent,Object @@ -40,3 +41,4 @@ API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,ContentT API rule violation: names_match,k8s.io/apimachinery/pkg/runtime,Unknown,Raw API rule violation: names_match,k8s.io/code-generator/_examples/apiserver/apis/example/v1,TestTypeStatus,Blah API rule violation: names_match,k8s.io/code-generator/_examples/apiserver/apis/example2/v1,TestTypeStatus,Blah +API rule violation: names_match,k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1,TestTypeStatus,Blah diff --git a/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json b/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json index ec090c453a3b..a959e0f09653 100644 --- a/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json +++ b/vendor/k8s.io/kubernetes/api/openapi-spec/swagger.json @@ -20106,7 +20106,7 @@ }, "info": { "title": "Kubernetes", - "version": "v1.16.0" + "version": "v1.16.2" }, "paths": { "/api/": { diff --git a/vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile b/vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile index be3191deedbf..efcc54f1b533 100644 --- a/vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile +++ b/vendor/k8s.io/kubernetes/build/build-image/cross/Dockerfile @@ -15,7 +15,7 @@ # This file creates a standard build environment for building cross # platform go binary for the architecture kubernetes cares about. -FROM golang:1.12.9 +FROM golang:1.12.10 ENV GOARM 7 ENV KUBE_DYNAMIC_CROSSPLATFORMS \ diff --git a/vendor/k8s.io/kubernetes/build/build-image/cross/VERSION b/vendor/k8s.io/kubernetes/build/build-image/cross/VERSION index 57d32abae28f..22276e2bf8c5 100644 --- a/vendor/k8s.io/kubernetes/build/build-image/cross/VERSION +++ b/vendor/k8s.io/kubernetes/build/build-image/cross/VERSION @@ -1 +1 @@ -v1.12.9-1 +v1.12.10-1 diff --git a/vendor/k8s.io/kubernetes/build/dependencies.yaml b/vendor/k8s.io/kubernetes/build/dependencies.yaml index e917359713c0..7a5310e3c4e0 100644 --- a/vendor/k8s.io/kubernetes/build/dependencies.yaml +++ b/vendor/k8s.io/kubernetes/build/dependencies.yaml @@ -34,7 +34,7 @@ dependencies: - name: "golang" - version: 1.12.9 + version: 1.12.10 refPaths: - path: build/build-image/cross/Dockerfile match: "golang:" diff --git a/vendor/k8s.io/kubernetes/build/root/WORKSPACE b/vendor/k8s.io/kubernetes/build/root/WORKSPACE index 89a9ffca9858..7b3cafc1c532 100644 --- a/vendor/k8s.io/kubernetes/build/root/WORKSPACE +++ b/vendor/k8s.io/kubernetes/build/root/WORKSPACE @@ -44,8 +44,8 @@ http_archive( http_archive( name = "io_bazel_rules_go", - sha256 = "f635b285d7e902ac7327637edbba98a4f110e8202c8f4fb49d2f6ecd837f704a", - urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/0.18.9/rules_go-0.18.9.tar.gz"), + sha256 = "5c02dd4436332f8efa982ce2102daf7c041ae95c2bc7f2d26aceed20dc47def8", + urls = mirror("https://github.com/bazelbuild/rules_go/releases/download/v0.18.11/rules_go-v0.18.11.tar.gz"), ) load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_dependencies") @@ -53,7 +53,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe go_rules_dependencies() go_register_toolchains( - go_version = "1.12.9", + go_version = "1.12.10", ) http_archive( diff --git a/vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest b/vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest index fb66e5b9ba48..c8544548fde5 100644 --- a/vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest +++ b/vendor/k8s.io/kubernetes/cluster/gce/manifests/cluster-autoscaler.manifest @@ -17,7 +17,7 @@ "containers": [ { "name": "cluster-autoscaler", - "image": "k8s.gcr.io/cluster-autoscaler:v1.16.0", + "image": "k8s.gcr.io/cluster-autoscaler:v1.16.1", "livenessProbe": { "httpGet": { "path": "/health-check", diff --git a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs/defaults.go b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs/defaults.go index 516911bfb997..a1a571e24bc8 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs/defaults.go +++ b/vendor/k8s.io/kubernetes/cmd/kubeadm/app/componentconfigs/defaults.go @@ -58,7 +58,7 @@ const ( // DefaultKubeProxyConfiguration assigns default values for the kube-proxy ComponentConfig func DefaultKubeProxyConfiguration(internalcfg *kubeadmapi.ClusterConfiguration) { - externalproxycfg := &kubeproxyconfigv1alpha1.KubeProxyConfiguration{} + externalproxycfg := &kubeproxyconfigv1alpha1.KubeProxyConfiguration{FeatureGates: make(map[string]bool)} kind := "KubeProxyConfiguration" // Do a roundtrip to the external version for defaulting diff --git a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go index 1038bb738cb0..607c4bb4b857 100644 --- a/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go +++ b/vendor/k8s.io/kubernetes/cmd/kubelet/app/server.go @@ -596,14 +596,6 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan } } - // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops - if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 && - kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { - if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil { - return err - } - } - if kubeDeps.Auth == nil { auth, err := BuildAuth(nodeName, kubeDeps.KubeClient, s.KubeletConfiguration) if err != nil { @@ -728,6 +720,14 @@ func run(s *options.KubeletServer, kubeDeps *kubelet.Dependencies, stopCh <-chan return err } + // If the kubelet config controller is available, and dynamic config is enabled, start the config and status sync loops + if utilfeature.DefaultFeatureGate.Enabled(features.DynamicKubeletConfig) && len(s.DynamicConfigDir.Value()) > 0 && + kubeDeps.KubeletConfigController != nil && !standaloneMode && !s.RunOnce { + if err := kubeDeps.KubeletConfigController.StartSync(kubeDeps.KubeClient, kubeDeps.EventClient, string(nodeName)); err != nil { + return err + } + } + if s.HealthzPort > 0 { mux := http.NewServeMux() healthz.InstallHandler(mux) diff --git a/vendor/k8s.io/kubernetes/go.mod b/vendor/k8s.io/kubernetes/go.mod index 646ebeab5e50..4802162b401b 100644 --- a/vendor/k8s.io/kubernetes/go.mod +++ b/vendor/k8s.io/kubernetes/go.mod @@ -16,7 +16,7 @@ require ( github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20190822182118-27a4ced34534 github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab github.com/Microsoft/go-winio v0.4.11 - github.com/Microsoft/hcsshim v0.8.6 + github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d github.com/PuerkitoBio/purell v1.1.1 github.com/Rican7/retry v0.1.0 // indirect github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e @@ -147,7 +147,7 @@ require ( gopkg.in/gcfg.v1 v1.2.0 gopkg.in/ldap.v2 v2.5.1 // indirect gopkg.in/square/go-jose.v2 v2.2.2 - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible gotest.tools/gotestsum v0.3.5 honnef.co/go/tools v0.0.1-2019.2.2 @@ -203,7 +203,7 @@ replace ( github.com/JeffAshton/win_pdh => github.com/JeffAshton/win_pdh v0.0.0-20161109143554-76bb4ee9f0ab github.com/MakeNowJust/heredoc => github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd github.com/Microsoft/go-winio => github.com/Microsoft/go-winio v0.4.11 - github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.8.6 + github.com/Microsoft/hcsshim => github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d github.com/NYTimes/gziphandler => github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 github.com/PuerkitoBio/purell => github.com/PuerkitoBio/purell v1.1.1 github.com/PuerkitoBio/urlesc => github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 @@ -459,7 +459,7 @@ replace ( gopkg.in/square/go-jose.v2 => gopkg.in/square/go-jose.v2 v2.2.2 gopkg.in/tomb.v1 => gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 gopkg.in/warnings.v0 => gopkg.in/warnings.v0 v0.1.1 - gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 => gopkg.in/yaml.v2 v2.2.4 gotest.tools => gotest.tools v2.2.0+incompatible gotest.tools/gotestsum => gotest.tools/gotestsum v0.3.5 honnef.co/go/tools => honnef.co/go/tools v0.0.1-2019.2.2 diff --git a/vendor/k8s.io/kubernetes/go.sum b/vendor/k8s.io/kubernetes/go.sum index 4cf6fcfb5924..1d4613e7f1b6 100644 --- a/vendor/k8s.io/kubernetes/go.sum +++ b/vendor/k8s.io/kubernetes/go.sum @@ -35,8 +35,8 @@ github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd h1:sjQovDkwrZp github.com/MakeNowJust/heredoc v0.0.0-20170808103936-bb23615498cd/go.mod h1:64YHyfSL2R96J44Nlwm39UHepQbyR5q10x7iYa1ks2E= github.com/Microsoft/go-winio v0.4.11 h1:zoIOcVf0xPN1tnMVbTtEdI+P8OofVk3NObnwOQ6nK2Q= github.com/Microsoft/go-winio v0.4.11/go.mod h1:VhR8bwka0BXejwEJY73c50VrPtXAaKcyvVC4A4RozmA= -github.com/Microsoft/hcsshim v0.8.6 h1:ZfF0+zZeYdzMIVMZHKtDKJvLHj76XCuVae/jNkjj0IA= -github.com/Microsoft/hcsshim v0.8.6/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d h1:u64+IetywsPQ0gJ/4cXBJ/KiXV9xTKRMoaCOzW9PI3g= +github.com/Microsoft/hcsshim v0.0.0-20190417211021-672e52e9209d/go.mod h1:Op3hHsoHPAvb6lceZHDtd9OkTew38wNoXnJs8iY7rUg= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46 h1:lsxEuwrXEAokXB9qhlbKWPpo3KMLZQ5WB5WLQRW1uq0= github.com/NYTimes/gziphandler v0.0.0-20170623195520-56545f4a5d46/go.mod h1:3wb06e3pkSAbeQ52E9H9iFoQsEEwGN64994WTCIhntQ= github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI= @@ -506,8 +506,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.1 h1:XM28wIgFzaBmeZ5dNHIpWLQpt/9DGKxk+rCg/22nnYE= gopkg.in/warnings.v0 v0.1.1/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= gotest.tools/gotestsum v0.3.5 h1:VePOWRsuWFYpfp/G8mbmOZKxO5T3501SEGQRUdvq7h0= diff --git a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go index d41552d70cef..48998dca1755 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner.go @@ -202,6 +202,11 @@ func (tc *TokenCleaner) evalSecret(o interface{}) { klog.V(3).Infof("Error deleting Secret: %v", err) } } else if ttl > 0 { - tc.queue.AddAfter(o, ttl) + key, err := controller.KeyFunc(o) + if err != nil { + utilruntime.HandleError(err) + return + } + tc.queue.AddAfter(key, ttl) } } diff --git a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go index 6eea6321ba07..df56ea8a86cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/bootstrap/tokencleaner_test.go @@ -110,10 +110,11 @@ func TestCleanerExpiredAt(t *testing.T) { secret := newTokenSecret("tokenID", "tokenSecret") addSecretExpiration(secret, timeString(2*time.Second)) + secrets.Informer().GetIndexer().Add(secret) + cleaner.enqueueSecrets(secret) expected := []core.Action{} verifyFunc := func() { - secrets.Informer().GetIndexer().Add(secret) - cleaner.evalSecret(secret) + cleaner.processNextWorkItem() verifyActions(t, expected, cl.Actions()) } // token has not expired currently diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go index 17fcf98f9a71..025bf9d5ace0 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils.go @@ -771,94 +771,6 @@ func (s ActivePods) Less(i, j int) bool { return false } -// ActivePodsWithRanks is a sortable list of pods and a list of corresponding -// ranks which will be considered during sorting. The two lists must have equal -// length. After sorting, the pods will be ordered as follows, applying each -// rule in turn until one matches: -// -// 1. If only one of the pods is assigned to a node, the pod that is not -// assigned comes before the pod that is. -// 2. If the pods' phases differ, a pending pod comes before a pod whose phase -// is unknown, and a pod whose phase is unknown comes before a running pod. -// 3. If exactly one of the pods is ready, the pod that is not ready comes -// before the ready pod. -// 4. If the pods' ranks differ, the pod with greater rank comes before the pod -// with lower rank. -// 5. If both pods are ready but have not been ready for the same amount of -// time, the pod that has been ready for a shorter amount of time comes -// before the pod that has been ready for longer. -// 6. If one pod has a container that has restarted more than any container in -// the other pod, the pod with the container with more restarts comes -// before the other pod. -// 7. If the pods' creation times differ, the pod that was created more recently -// comes before the older pod. -// -// If none of these rules matches, the second pod comes before the first pod. -// -// The intention of this ordering is to put pods that should be preferred for -// deletion first in the list. -type ActivePodsWithRanks struct { - // Pods is a list of pods. - Pods []*v1.Pod - - // Rank is a ranking of pods. This ranking is used during sorting when - // comparing two pods that are both scheduled, in the same phase, and - // having the same ready status. - Rank []int -} - -func (s ActivePodsWithRanks) Len() int { - return len(s.Pods) -} - -func (s ActivePodsWithRanks) Swap(i, j int) { - s.Pods[i], s.Pods[j] = s.Pods[j], s.Pods[i] - s.Rank[i], s.Rank[j] = s.Rank[j], s.Rank[i] -} - -// Less compares two pods with corresponding ranks and returns true if the first -// one should be preferred for deletion. -func (s ActivePodsWithRanks) Less(i, j int) bool { - // 1. Unassigned < assigned - // If only one of the pods is unassigned, the unassigned one is smaller - if s.Pods[i].Spec.NodeName != s.Pods[j].Spec.NodeName && (len(s.Pods[i].Spec.NodeName) == 0 || len(s.Pods[j].Spec.NodeName) == 0) { - return len(s.Pods[i].Spec.NodeName) == 0 - } - // 2. PodPending < PodUnknown < PodRunning - m := map[v1.PodPhase]int{v1.PodPending: 0, v1.PodUnknown: 1, v1.PodRunning: 2} - if m[s.Pods[i].Status.Phase] != m[s.Pods[j].Status.Phase] { - return m[s.Pods[i].Status.Phase] < m[s.Pods[j].Status.Phase] - } - // 3. Not ready < ready - // If only one of the pods is not ready, the not ready one is smaller - if podutil.IsPodReady(s.Pods[i]) != podutil.IsPodReady(s.Pods[j]) { - return !podutil.IsPodReady(s.Pods[i]) - } - // 4. Doubled up < not doubled up - // If one of the two pods is on the same node as one or more additional - // ready pods that belong to the same replicaset, whichever pod has more - // colocated ready pods is less - if s.Rank[i] != s.Rank[j] { - return s.Rank[i] > s.Rank[j] - } - // TODO: take availability into account when we push minReadySeconds information from deployment into pods, - // see https://github.com/kubernetes/kubernetes/issues/22065 - // 5. Been ready for empty time < less time < more time - // If both pods are ready, the latest ready one is smaller - if podutil.IsPodReady(s.Pods[i]) && podutil.IsPodReady(s.Pods[j]) && !podReadyTime(s.Pods[i]).Equal(podReadyTime(s.Pods[j])) { - return afterOrZero(podReadyTime(s.Pods[i]), podReadyTime(s.Pods[j])) - } - // 6. Pods with containers with higher restart counts < lower restart counts - if maxContainerRestarts(s.Pods[i]) != maxContainerRestarts(s.Pods[j]) { - return maxContainerRestarts(s.Pods[i]) > maxContainerRestarts(s.Pods[j]) - } - // 7. Empty creation time pods < newer pods < older pods - if !s.Pods[i].CreationTimestamp.Equal(&s.Pods[j].CreationTimestamp) { - return afterOrZero(&s.Pods[i].CreationTimestamp, &s.Pods[j].CreationTimestamp) - } - return false -} - // afterOrZero checks if time t1 is after time t2; if one of them // is zero, the zero time is seen as after non-zero time. func afterOrZero(t1, t2 *metav1.Time) bool { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go index 837a10b22a8e..3bdda84a662a 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/controller_utils_test.go @@ -429,96 +429,6 @@ func TestSortingActivePods(t *testing.T) { } } -func TestSortingActivePodsWithRanks(t *testing.T) { - now := metav1.Now() - then := metav1.Time{Time: now.AddDate(0, -1, 0)} - zeroTime := metav1.Time{} - pod := func(podName, nodeName string, phase v1.PodPhase, ready bool, restarts int32, readySince metav1.Time, created metav1.Time) *v1.Pod { - var conditions []v1.PodCondition - var containerStatuses []v1.ContainerStatus - if ready { - conditions = []v1.PodCondition{{Type: v1.PodReady, Status: v1.ConditionTrue, LastTransitionTime: readySince}} - containerStatuses = []v1.ContainerStatus{{RestartCount: restarts}} - } - return &v1.Pod{ - ObjectMeta: metav1.ObjectMeta{ - CreationTimestamp: created, - Name: podName, - }, - Spec: v1.PodSpec{NodeName: nodeName}, - Status: v1.PodStatus{ - Conditions: conditions, - ContainerStatuses: containerStatuses, - Phase: phase, - }, - } - } - var ( - unscheduledPod = pod("unscheduled", "", v1.PodPending, false, 0, zeroTime, zeroTime) - scheduledPendingPod = pod("pending", "node", v1.PodPending, false, 0, zeroTime, zeroTime) - unknownPhasePod = pod("unknown-phase", "node", v1.PodUnknown, false, 0, zeroTime, zeroTime) - runningNotReadyPod = pod("not-ready", "node", v1.PodRunning, false, 0, zeroTime, zeroTime) - runningReadyNoLastTransitionTimePod = pod("ready-no-last-transition-time", "node", v1.PodRunning, true, 0, zeroTime, zeroTime) - runningReadyNow = pod("ready-now", "node", v1.PodRunning, true, 0, now, now) - runningReadyThen = pod("ready-then", "node", v1.PodRunning, true, 0, then, then) - runningReadyNowHighRestarts = pod("ready-high-restarts", "node", v1.PodRunning, true, 9001, now, now) - runningReadyNowCreatedThen = pod("ready-now-created-then", "node", v1.PodRunning, true, 0, now, then) - ) - equalityTests := []*v1.Pod{ - unscheduledPod, - scheduledPendingPod, - unknownPhasePod, - runningNotReadyPod, - runningReadyNowCreatedThen, - runningReadyNow, - runningReadyThen, - runningReadyNowHighRestarts, - runningReadyNowCreatedThen, - } - for _, pod := range equalityTests { - podsWithRanks := ActivePodsWithRanks{ - Pods: []*v1.Pod{pod, pod}, - Rank: []int{1, 1}, - } - if podsWithRanks.Less(0, 1) || podsWithRanks.Less(1, 0) { - t.Errorf("expected pod %q not to be less than than itself", pod.Name) - } - } - type podWithRank struct { - pod *v1.Pod - rank int - } - inequalityTests := []struct { - lesser, greater podWithRank - }{ - {podWithRank{unscheduledPod, 1}, podWithRank{scheduledPendingPod, 2}}, - {podWithRank{unscheduledPod, 2}, podWithRank{scheduledPendingPod, 1}}, - {podWithRank{scheduledPendingPod, 1}, podWithRank{unknownPhasePod, 2}}, - {podWithRank{unknownPhasePod, 1}, podWithRank{runningNotReadyPod, 2}}, - {podWithRank{runningNotReadyPod, 1}, podWithRank{runningReadyNoLastTransitionTimePod, 1}}, - {podWithRank{runningReadyNoLastTransitionTimePod, 1}, podWithRank{runningReadyNow, 1}}, - {podWithRank{runningReadyNow, 2}, podWithRank{runningReadyNoLastTransitionTimePod, 1}}, - {podWithRank{runningReadyNow, 1}, podWithRank{runningReadyThen, 1}}, - {podWithRank{runningReadyNow, 2}, podWithRank{runningReadyThen, 1}}, - {podWithRank{runningReadyNowHighRestarts, 1}, podWithRank{runningReadyNow, 1}}, - {podWithRank{runningReadyNow, 2}, podWithRank{runningReadyNowHighRestarts, 1}}, - {podWithRank{runningReadyNow, 1}, podWithRank{runningReadyNowCreatedThen, 1}}, - {podWithRank{runningReadyNowCreatedThen, 2}, podWithRank{runningReadyNow, 1}}, - } - for _, test := range inequalityTests { - podsWithRanks := ActivePodsWithRanks{ - Pods: []*v1.Pod{test.lesser.pod, test.greater.pod}, - Rank: []int{test.lesser.rank, test.greater.rank}, - } - if !podsWithRanks.Less(0, 1) { - t.Errorf("expected pod %q with rank %v to be less than %q with rank %v", podsWithRanks.Pods[0].Name, podsWithRanks.Rank[0], podsWithRanks.Pods[1].Name, podsWithRanks.Rank[1]) - } - if podsWithRanks.Less(1, 0) { - t.Errorf("expected pod %q with rank %v not to be less than %v with rank %v", podsWithRanks.Pods[1].Name, podsWithRanks.Rank[1], podsWithRanks.Pods[0].Name, podsWithRanks.Rank[0]) - } - } -} - func TestActiveReplicaSetsFiltering(t *testing.T) { var replicaSets []*apps.ReplicaSet replicaSets = append(replicaSets, newReplicaSet("zero", 0)) diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD index 00da56d365eb..c3c879f8b34b 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/BUILD @@ -24,7 +24,6 @@ go_library( "//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/labels:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library", - "//staging/src/k8s.io/apimachinery/pkg/types:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library", "//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library", "//staging/src/k8s.io/client-go/informers/apps/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go index 1940062a61d0..b61330afd579 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set.go @@ -41,7 +41,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/labels" "k8s.io/apimachinery/pkg/runtime/schema" - "k8s.io/apimachinery/pkg/types" utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apimachinery/pkg/util/wait" appsinformers "k8s.io/client-go/informers/apps/v1" @@ -194,43 +193,6 @@ func (rsc *ReplicaSetController) Run(workers int, stopCh <-chan struct{}) { <-stopCh } -// getReplicaSetsWithSameController returns a list of ReplicaSets with the same -// owner as the given ReplicaSet. -func (rsc *ReplicaSetController) getReplicaSetsWithSameController(rs *apps.ReplicaSet) []*apps.ReplicaSet { - controllerRef := metav1.GetControllerOf(rs) - if controllerRef == nil { - utilruntime.HandleError(fmt.Errorf("ReplicaSet has no controller: %v", rs)) - return nil - } - - allRSs, err := rsc.rsLister.ReplicaSets(rs.Namespace).List(labels.Everything()) - if err != nil { - utilruntime.HandleError(err) - return nil - } - - var relatedRSs []*apps.ReplicaSet - for _, r := range allRSs { - if ref := metav1.GetControllerOf(r); ref != nil && ref.UID == controllerRef.UID { - relatedRSs = append(relatedRSs, r) - } - } - - if klog.V(2) { - var related string - if len(relatedRSs) > 0 { - var relatedNames []string - for _, r := range relatedRSs { - relatedNames = append(relatedNames, r.Name) - } - related = ": " + strings.Join(relatedNames, ", ") - } - klog.Infof("Found %d related %vs for %v %s/%s%s", len(relatedRSs), rsc.Kind, rsc.Kind, rs.Namespace, rs.Name, related) - } - - return relatedRSs -} - // getPodReplicaSets returns a list of ReplicaSets matching the given pod. func (rsc *ReplicaSetController) getPodReplicaSets(pod *v1.Pod) []*apps.ReplicaSet { rss, err := rsc.rsLister.GetPodReplicaSets(pod) @@ -553,11 +515,8 @@ func (rsc *ReplicaSetController) manageReplicas(filteredPods []*v1.Pod, rs *apps } klog.V(2).Infof("Too many replicas for %v %s/%s, need %d, deleting %d", rsc.Kind, rs.Namespace, rs.Name, *(rs.Spec.Replicas), diff) - relatedPods, err := rsc.getIndirectlyRelatedPods(rs) - utilruntime.HandleError(err) - // Choose which Pods to delete, preferring those in earlier phases of startup. - podsToDelete := getPodsToDelete(filteredPods, relatedPods, diff) + podsToDelete := getPodsToDelete(filteredPods, diff) // Snapshot the UIDs (ns/name) of the pods we're expecting to see // deleted, so we know to record their expectations exactly once either @@ -722,67 +681,18 @@ func slowStartBatch(count int, initialBatchSize int, fn func() error) (int, erro return successes, nil } -// getIndirectlyRelatedPods returns all pods that are owned by any ReplicaSet -// that is owned by the given ReplicaSet's owner. -func (rsc *ReplicaSetController) getIndirectlyRelatedPods(rs *apps.ReplicaSet) ([]*v1.Pod, error) { - var relatedPods []*v1.Pod - seen := make(map[types.UID]*apps.ReplicaSet) - for _, relatedRS := range rsc.getReplicaSetsWithSameController(rs) { - selector, err := metav1.LabelSelectorAsSelector(relatedRS.Spec.Selector) - if err != nil { - return nil, err - } - pods, err := rsc.podLister.Pods(relatedRS.Namespace).List(selector) - if err != nil { - return nil, err - } - for _, pod := range pods { - if otherRS, found := seen[pod.UID]; found { - klog.V(5).Infof("Pod %s/%s is owned by both %v %s/%s and %v %s/%s", pod.Namespace, pod.Name, rsc.Kind, otherRS.Namespace, otherRS.Name, rsc.Kind, relatedRS.Namespace, relatedRS.Name) - continue - } - seen[pod.UID] = relatedRS - relatedPods = append(relatedPods, pod) - } - } - if klog.V(4) { - var relatedNames []string - for _, related := range relatedPods { - relatedNames = append(relatedNames, related.Name) - } - klog.Infof("Found %d related pods for %v %s/%s: %v", len(relatedPods), rsc.Kind, rs.Namespace, rs.Name, strings.Join(relatedNames, ", ")) - } - return relatedPods, nil -} - -func getPodsToDelete(filteredPods, relatedPods []*v1.Pod, diff int) []*v1.Pod { +func getPodsToDelete(filteredPods []*v1.Pod, diff int) []*v1.Pod { // No need to sort pods if we are about to delete all of them. // diff will always be <= len(filteredPods), so not need to handle > case. if diff < len(filteredPods) { - podsWithRanks := getPodsRankedByRelatedPodsOnSameNode(filteredPods, relatedPods) - sort.Sort(podsWithRanks) + // Sort the pods in the order such that not-ready < ready, unscheduled + // < scheduled, and pending < running. This ensures that we delete pods + // in the earlier stages whenever possible. + sort.Sort(controller.ActivePods(filteredPods)) } return filteredPods[:diff] } -// getPodsRankedByRelatedPodsOnSameNode returns an ActivePodsWithRanks value -// that wraps podsToRank and assigns each pod a rank equal to the number of -// active pods in relatedPods that are colocated on the same node with the pod. -// relatedPods generally should be a superset of podsToRank. -func getPodsRankedByRelatedPodsOnSameNode(podsToRank, relatedPods []*v1.Pod) controller.ActivePodsWithRanks { - podsOnNode := make(map[string]int) - for _, pod := range relatedPods { - if controller.IsPodActive(pod) { - podsOnNode[pod.Spec.NodeName]++ - } - } - ranks := make([]int, len(podsToRank)) - for i, pod := range podsToRank { - ranks[i] = podsOnNode[pod.Spec.NodeName] - } - return controller.ActivePodsWithRanks{Pods: podsToRank, Rank: ranks} -} - func getPodKeys(pods []*v1.Pod) []string { podKeys := make([]string, 0, len(pods)) for _, pod := range pods { diff --git a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go index 4f6dafe3ebaa..99847bc7a92d 100644 --- a/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go +++ b/vendor/k8s.io/kubernetes/pkg/controller/replicaset/replica_set_test.go @@ -23,7 +23,6 @@ import ( "net/http/httptest" "net/url" "reflect" - "sort" "strings" "sync" "testing" @@ -81,16 +80,12 @@ func skipListerFunc(verb string, url url.URL) bool { var alwaysReady = func() bool { return true } func newReplicaSet(replicas int, selectorMap map[string]string) *apps.ReplicaSet { - isController := true rs := &apps.ReplicaSet{ TypeMeta: metav1.TypeMeta{APIVersion: "v1", Kind: "ReplicaSet"}, ObjectMeta: metav1.ObjectMeta{ - UID: uuid.NewUUID(), - Name: "foobar", - Namespace: metav1.NamespaceDefault, - OwnerReferences: []metav1.OwnerReference{ - {UID: "123", Controller: &isController}, - }, + UID: uuid.NewUUID(), + Name: "foobar", + Namespace: metav1.NamespaceDefault, ResourceVersion: "18", }, Spec: apps.ReplicaSetSpec{ @@ -141,7 +136,6 @@ func newPod(name string, rs *apps.ReplicaSet, status v1.PodPhase, lastTransition } return &v1.Pod{ ObjectMeta: metav1.ObjectMeta{ - UID: uuid.NewUUID(), Name: name, Namespace: rs.Namespace, Labels: rs.Spec.Selector.MatchLabels, @@ -348,68 +342,6 @@ func TestSyncReplicaSetDormancy(t *testing.T) { fakeHandler.ValidateRequestCount(t, 2) } -func TestGetReplicaSetsWithSameController(t *testing.T) { - someRS := newReplicaSet(1, map[string]string{"foo": "bar"}) - someRS.Name = "rs1" - relatedRS := newReplicaSet(1, map[string]string{"foo": "baz"}) - relatedRS.Name = "rs2" - unrelatedRS := newReplicaSet(1, map[string]string{"foo": "quux"}) - unrelatedRS.Name = "rs3" - unrelatedRS.ObjectMeta.OwnerReferences[0].UID = "456" - pendingDeletionRS := newReplicaSet(1, map[string]string{"foo": "xyzzy"}) - pendingDeletionRS.Name = "rs4" - pendingDeletionRS.ObjectMeta.OwnerReferences[0].UID = "789" - now := metav1.Now() - pendingDeletionRS.DeletionTimestamp = &now - - stopCh := make(chan struct{}) - defer close(stopCh) - manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas) - testCases := []struct { - name string - rss []*apps.ReplicaSet - rs *apps.ReplicaSet - expectedRSs []*apps.ReplicaSet - }{ - { - name: "expect to get back a ReplicaSet that is pending deletion", - rss: []*apps.ReplicaSet{pendingDeletionRS, unrelatedRS}, - rs: pendingDeletionRS, - expectedRSs: []*apps.ReplicaSet{pendingDeletionRS}, - }, - { - name: "expect to get back only the given ReplicaSet if there is no related ReplicaSet", - rss: []*apps.ReplicaSet{someRS, unrelatedRS}, - rs: someRS, - expectedRSs: []*apps.ReplicaSet{someRS}, - }, - { - name: "expect to get back the given ReplicaSet as well as any related ReplicaSet but not an unrelated ReplicaSet", - rss: []*apps.ReplicaSet{someRS, relatedRS, unrelatedRS}, - rs: someRS, - expectedRSs: []*apps.ReplicaSet{someRS, relatedRS}, - }, - } - for _, c := range testCases { - for _, r := range c.rss { - informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r) - } - actualRSs := manager.getReplicaSetsWithSameController(c.rs) - var actualRSNames, expectedRSNames []string - for _, r := range actualRSs { - actualRSNames = append(actualRSNames, r.Name) - } - for _, r := range c.expectedRSs { - expectedRSNames = append(expectedRSNames, r.Name) - } - sort.Strings(actualRSNames) - sort.Strings(expectedRSNames) - if !reflect.DeepEqual(actualRSNames, expectedRSNames) { - t.Errorf("Got [%s]; expected [%s]", strings.Join(actualRSNames, ", "), strings.Join(expectedRSNames, ", ")) - } - } -} - func TestPodControllerLookup(t *testing.T) { stopCh := make(chan struct{}) defer close(stopCh) @@ -476,87 +408,6 @@ func TestPodControllerLookup(t *testing.T) { } } -// byName sorts pods by their names. -type byName []*v1.Pod - -func (pods byName) Len() int { return len(pods) } -func (pods byName) Swap(i, j int) { pods[i], pods[j] = pods[j], pods[i] } -func (pods byName) Less(i, j int) bool { return pods[i].Name < pods[j].Name } - -func TestRelatedPodsLookup(t *testing.T) { - someRS := newReplicaSet(1, map[string]string{"foo": "bar"}) - someRS.Name = "foo1" - relatedRS := newReplicaSet(1, map[string]string{"foo": "baz"}) - relatedRS.Name = "foo2" - unrelatedRS := newReplicaSet(1, map[string]string{"foo": "quux"}) - unrelatedRS.Name = "bar1" - unrelatedRS.ObjectMeta.OwnerReferences[0].UID = "456" - pendingDeletionRS := newReplicaSet(1, map[string]string{"foo": "xyzzy"}) - pendingDeletionRS.Name = "foo3" - pendingDeletionRS.ObjectMeta.OwnerReferences[0].UID = "789" - now := metav1.Now() - pendingDeletionRS.DeletionTimestamp = &now - pod1 := newPod("pod1", someRS, v1.PodRunning, nil, true) - pod2 := newPod("pod2", someRS, v1.PodRunning, nil, true) - pod3 := newPod("pod3", relatedRS, v1.PodRunning, nil, true) - pod4 := newPod("pod4", unrelatedRS, v1.PodRunning, nil, true) - - stopCh := make(chan struct{}) - defer close(stopCh) - manager, informers := testNewReplicaSetControllerFromClient(clientset.NewForConfigOrDie(&restclient.Config{Host: "", ContentConfig: restclient.ContentConfig{GroupVersion: &schema.GroupVersion{Group: "", Version: "v1"}}}), stopCh, BurstReplicas) - testCases := []struct { - name string - rss []*apps.ReplicaSet - pods []*v1.Pod - rs *apps.ReplicaSet - expectedPodNames []string - }{ - { - name: "expect to get a pod even if its owning ReplicaSet is pending deletion", - rss: []*apps.ReplicaSet{pendingDeletionRS, unrelatedRS}, - rs: pendingDeletionRS, - pods: []*v1.Pod{newPod("pod", pendingDeletionRS, v1.PodRunning, nil, true)}, - expectedPodNames: []string{"pod"}, - }, - { - name: "expect to get only the ReplicaSet's own pods if there is no related ReplicaSet", - rss: []*apps.ReplicaSet{someRS, unrelatedRS}, - rs: someRS, - pods: []*v1.Pod{pod1, pod2, pod4}, - expectedPodNames: []string{"pod1", "pod2"}, - }, - { - name: "expect to get own pods as well as any related ReplicaSet's but not an unrelated ReplicaSet's", - rss: []*apps.ReplicaSet{someRS, relatedRS, unrelatedRS}, - rs: someRS, - pods: []*v1.Pod{pod1, pod2, pod3, pod4}, - expectedPodNames: []string{"pod1", "pod2", "pod3"}, - }, - } - for _, c := range testCases { - for _, r := range c.rss { - informers.Apps().V1().ReplicaSets().Informer().GetIndexer().Add(r) - } - for _, pod := range c.pods { - informers.Core().V1().Pods().Informer().GetIndexer().Add(pod) - manager.addPod(pod) - } - actualPods, err := manager.getIndirectlyRelatedPods(c.rs) - if err != nil { - t.Errorf("Unexpected error from getIndirectlyRelatedPods: %v", err) - } - var actualPodNames []string - for _, pod := range actualPods { - actualPodNames = append(actualPodNames, pod.Name) - } - sort.Strings(actualPodNames) - sort.Strings(c.expectedPodNames) - if !reflect.DeepEqual(actualPodNames, c.expectedPodNames) { - t.Errorf("Got [%s]; expected [%s]", strings.Join(actualPodNames, ", "), strings.Join(c.expectedPodNames, ", ")) - } - } -} - func TestWatchControllers(t *testing.T) { fakeWatch := watch.NewFake() client := fake.NewSimpleClientset() @@ -1594,19 +1445,10 @@ func TestGetPodsToDelete(t *testing.T) { Status: v1.ConditionFalse, }, } - // a scheduled, running, ready pod on fake-node-1 - scheduledRunningReadyPodOnNode1 := newPod("scheduled-running-ready-pod-on-node-1", rs, v1.PodRunning, nil, true) - scheduledRunningReadyPodOnNode1.Spec.NodeName = "fake-node-1" - scheduledRunningReadyPodOnNode1.Status.Conditions = []v1.PodCondition{ - { - Type: v1.PodReady, - Status: v1.ConditionTrue, - }, - } - // a scheduled, running, ready pod on fake-node-2 - scheduledRunningReadyPodOnNode2 := newPod("scheduled-running-ready-pod-on-node-2", rs, v1.PodRunning, nil, true) - scheduledRunningReadyPodOnNode2.Spec.NodeName = "fake-node-2" - scheduledRunningReadyPodOnNode2.Status.Conditions = []v1.PodCondition{ + // a scheduled, running, ready pod + scheduledRunningReadyPod := newPod("scheduled-running-ready-pod", rs, v1.PodRunning, nil, true) + scheduledRunningReadyPod.Spec.NodeName = "fake-node" + scheduledRunningReadyPod.Status.Conditions = []v1.PodCondition{ { Type: v1.PodReady, Status: v1.ConditionTrue, @@ -1614,10 +1456,8 @@ func TestGetPodsToDelete(t *testing.T) { } tests := []struct { - name string - pods []*v1.Pod - // related defaults to pods if nil. - related []*v1.Pod + name string + pods []*v1.Pod diff int expectedPodsToDelete []*v1.Pod }{ @@ -1625,136 +1465,93 @@ func TestGetPodsToDelete(t *testing.T) { // an unscheduled, pending pod // a scheduled, pending pod // a scheduled, running, not-ready pod - // a scheduled, running, ready pod on same node as a related pod - // a scheduled, running, ready pod not on node with related pods + // a scheduled, running, ready pod // Note that a pending pod cannot be ready { - name: "len(pods) = 0 (i.e., diff = 0 too)", - pods: []*v1.Pod{}, - diff: 0, - expectedPodsToDelete: []*v1.Pod{}, - }, - { - name: "diff = len(pods)", - pods: []*v1.Pod{ - scheduledRunningNotReadyPod, - scheduledRunningReadyPodOnNode1, - }, - diff: 2, - expectedPodsToDelete: []*v1.Pod{scheduledRunningNotReadyPod, scheduledRunningReadyPodOnNode1}, + "len(pods) = 0 (i.e., diff = 0 too)", + []*v1.Pod{}, + 0, + []*v1.Pod{}, }, { - name: "diff < len(pods)", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, + "diff = len(pods)", + []*v1.Pod{ scheduledRunningNotReadyPod, + scheduledRunningReadyPod, }, - diff: 1, - expectedPodsToDelete: []*v1.Pod{scheduledRunningNotReadyPod}, + 2, + []*v1.Pod{scheduledRunningNotReadyPod, scheduledRunningReadyPod}, }, { - name: "various pod phases and conditions, diff = len(pods)", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, - scheduledRunningNotReadyPod, - scheduledPendingPod, - unscheduledPendingPod, - }, - diff: 6, - expectedPodsToDelete: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, + "diff < len(pods)", + []*v1.Pod{ + scheduledRunningReadyPod, scheduledRunningNotReadyPod, - scheduledPendingPod, - unscheduledPendingPod, }, + 1, + []*v1.Pod{scheduledRunningNotReadyPod}, }, { - name: "various pod phases and conditions, diff = len(pods), relatedPods empty", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, + "various pod phases and conditions, diff = len(pods)", + []*v1.Pod{ + scheduledRunningReadyPod, scheduledRunningNotReadyPod, scheduledPendingPod, unscheduledPendingPod, }, - related: []*v1.Pod{}, - diff: 6, - expectedPodsToDelete: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, + 4, + []*v1.Pod{ + scheduledRunningReadyPod, scheduledRunningNotReadyPod, scheduledPendingPod, unscheduledPendingPod, }, }, { - name: "scheduled vs unscheduled, diff < len(pods)", - pods: []*v1.Pod{ + "scheduled vs unscheduled, diff < len(pods)", + []*v1.Pod{ scheduledPendingPod, unscheduledPendingPod, }, - diff: 1, - expectedPodsToDelete: []*v1.Pod{ + 1, + []*v1.Pod{ unscheduledPendingPod, }, }, { - name: "ready vs not-ready, diff < len(pods)", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, + "ready vs not-ready, diff < len(pods)", + []*v1.Pod{ + scheduledRunningReadyPod, scheduledRunningNotReadyPod, scheduledRunningNotReadyPod, }, - diff: 2, - expectedPodsToDelete: []*v1.Pod{ + 2, + []*v1.Pod{ scheduledRunningNotReadyPod, scheduledRunningNotReadyPod, }, }, { - name: "ready and colocated with another ready pod vs not colocated, diff < len(pods)", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, - }, - related: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, - scheduledRunningReadyPodOnNode2, - }, - diff: 1, - expectedPodsToDelete: []*v1.Pod{ - scheduledRunningReadyPodOnNode2, - }, - }, - { - name: "pending vs running, diff < len(pods)", - pods: []*v1.Pod{ + "pending vs running, diff < len(pods)", + []*v1.Pod{ scheduledPendingPod, scheduledRunningNotReadyPod, }, - diff: 1, - expectedPodsToDelete: []*v1.Pod{ + 1, + []*v1.Pod{ scheduledPendingPod, }, }, { - name: "various pod phases and conditions, diff < len(pods)", - pods: []*v1.Pod{ - scheduledRunningReadyPodOnNode1, - scheduledRunningReadyPodOnNode2, + "various pod phases and conditions, diff < len(pods)", + []*v1.Pod{ + scheduledRunningReadyPod, scheduledRunningNotReadyPod, scheduledPendingPod, unscheduledPendingPod, }, - diff: 3, - expectedPodsToDelete: []*v1.Pod{ + 3, + []*v1.Pod{ unscheduledPendingPod, scheduledPendingPod, scheduledRunningNotReadyPod, @@ -1763,11 +1560,7 @@ func TestGetPodsToDelete(t *testing.T) { } for _, test := range tests { - related := test.related - if related == nil { - related = test.pods - } - podsToDelete := getPodsToDelete(test.pods, related, test.diff) + podsToDelete := getPodsToDelete(test.pods, test.diff) if len(podsToDelete) != len(test.expectedPodsToDelete) { t.Errorf("%s: unexpected pods to delete, expected %v, got %v", test.name, test.expectedPodsToDelete, podsToDelete) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go index c5fd99019da6..54a5394503fa 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/cm/cpumanager/topology/topology.go @@ -293,7 +293,7 @@ func GetNUMANodeInfo() (NUMANodeInfo, error) { // nil NUMANodeInfo, indicating that no NUMA information is available // on this machine. This should implicitly be interpreted as having a // single NUMA node with id 0 for all CPUs. - nodelist, err := ioutil.ReadFile("/sys/devices/system/node/possible") + nodelist, err := ioutil.ReadFile("/sys/devices/system/node/online") if err != nil { return nil, nil } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go index 2df59c6b05db..76b2fecd11b8 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming.go @@ -42,6 +42,8 @@ type streamingRuntime struct { var _ streaming.Runtime = &streamingRuntime{} +const maxMsgSize = 1024 * 1024 * 16 + func (r *streamingRuntime) Exec(containerID string, cmd []string, in io.Reader, out, err io.WriteCloser, tty bool, resize <-chan remotecommand.TerminalSize) error { return r.exec(containerID, cmd, in, out, err, tty, resize, 0) } @@ -78,8 +80,8 @@ func (ds *dockerService) ExecSync(_ context.Context, req *runtimeapi.ExecSyncReq var stdoutBuffer, stderrBuffer bytes.Buffer err := ds.streamingRuntime.exec(req.ContainerId, req.Cmd, nil, // in - ioutils.WriteCloserWrapper(&stdoutBuffer), - ioutils.WriteCloserWrapper(&stderrBuffer), + ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stdoutBuffer, maxMsgSize)), + ioutils.WriteCloserWrapper(ioutils.LimitWriter(&stderrBuffer, maxMsgSize)), false, // tty nil, // resize timeout) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go index 3d78d183bc81..151bd86066cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/docker_streaming_windows.go @@ -28,7 +28,7 @@ import ( func (r *streamingRuntime) portForward(podSandboxID string, port int32, stream io.ReadWriteCloser) error { stderr := new(bytes.Buffer) - err := r.exec(podSandboxID, []string{"wincat.exe", "localhost", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0) + err := r.exec(podSandboxID, []string{"wincat.exe", "127.0.0.1", fmt.Sprint(port)}, stream, stream, ioutils.WriteCloserWrapper(stderr), false, nil, 0) if err != nil { return fmt.Errorf("%v: %s", err, stderr.String()) } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go index 8e8dc4accac0..0575a434df86 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux.go @@ -80,10 +80,7 @@ const ( "ipam": { "type": "host-local", "ranges": [%s], - "routes": [ - { "dst": "%s" }, - { "dst": "%s" } - ] + "routes": [%s] } }` ) @@ -283,7 +280,7 @@ func (plugin *kubenetNetworkPlugin) Event(name string, details map[string]interf //setup hairpinMode setHairpin := plugin.hairpinMode == kubeletconfig.HairpinVeth - json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), zeroCIDRv4, zeroCIDRv6) + json := fmt.Sprintf(NET_CONFIG_TEMPLATE, BridgeName, plugin.mtu, network.DefaultInterfaceName, setHairpin, plugin.getRangesConfig(), plugin.getRoutesConfig()) klog.V(4).Infof("CNI network config set to %v", json) plugin.netConfig, err = libcni.ConfFromBytes([]byte(json)) if err != nil { @@ -844,6 +841,29 @@ func (plugin *kubenetNetworkPlugin) getRangesConfig() string { return strings.Join(ranges[:], ",") } +// given a n cidrs assigned to nodes, +// create bridge routes configuration that conforms to them +func (plugin *kubenetNetworkPlugin) getRoutesConfig() string { + var ( + routes []string + hasV4, hasV6 bool + ) + for _, thisCIDR := range plugin.podCIDRs { + if thisCIDR.IP.To4() != nil { + hasV4 = true + } else { + hasV6 = true + } + } + if hasV4 { + routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv4)) + } + if hasV6 { + routes = append(routes, fmt.Sprintf(`{"dst": "%s"}`, zeroCIDRv6)) + } + return strings.Join(routes, ",") +} + func (plugin *kubenetNetworkPlugin) addPodIP(id kubecontainer.ContainerID, ip string) { plugin.mu.Lock() defer plugin.mu.Unlock() diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go index bfe5eadf1709..2ef3475bff3c 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/dockershim/network/kubenet/kubenet_linux_test.go @@ -18,6 +18,7 @@ package kubenet import ( "fmt" + "net" "strings" "testing" @@ -312,4 +313,33 @@ func TestTearDownWithoutRuntime(t *testing.T) { } } +func TestGetRoutesConifg(t *testing.T) { + for _, test := range []struct { + cidrs []string + routes string + }{ + { + cidrs: []string{"10.0.0.1/24"}, + routes: `{"dst": "0.0.0.0/0"}`, + }, + { + cidrs: []string{"2001:4860:4860::8888/32"}, + routes: `{"dst": "::/0"}`, + }, + { + cidrs: []string{"2001:4860:4860::8888/32", "10.0.0.1/24"}, + routes: `{"dst": "0.0.0.0/0"},{"dst": "::/0"}`, + }, + } { + var cidrs []*net.IPNet + for _, c := range test.cidrs { + _, cidr, err := net.ParseCIDR(c) + assert.NoError(t, err) + cidrs = append(cidrs, cidr) + } + fakeKubenet := &kubenetNetworkPlugin{podCIDRs: cidrs} + assert.Equal(t, test.routes, fakeKubenet.getRoutesConfig()) + } +} + //TODO: add unit test for each implementation of network plugin interface diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go index 4f2bf5385e34..7dfa78829569 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/metrics/metrics.go @@ -46,7 +46,6 @@ const ( PLEGRelistIntervalKey = "pleg_relist_interval_seconds" EvictionsKey = "evictions" EvictionStatsAgeKey = "eviction_stats_age_seconds" - PreemptionsKey = "preemptions" DeprecatedPodWorkerLatencyKey = "pod_worker_latency_microseconds" DeprecatedPodStartLatencyKey = "pod_start_latency_microseconds" DeprecatedCgroupManagerOperationsKey = "cgroup_manager_latency_microseconds" @@ -244,18 +243,6 @@ var ( }, []string{"eviction_signal"}, ) - // Preemptions is a Counter that tracks the cumulative number of pod preemptions initiated by the kubelet. - // Broken down by preemption signal. A preemption is only recorded for one resource, the sum of all signals - // is the number of preemptions on the given node. - Preemptions = metrics.NewCounterVec( - &metrics.CounterOpts{ - Subsystem: KubeletSubsystem, - Name: PreemptionsKey, - Help: "Cumulative number of pod preemptions by preemption resource", - StabilityLevel: metrics.ALPHA, - }, - []string{"preemption_signal"}, - ) // DevicePluginRegistrationCount is a Counter that tracks the cumulative number of device plugin registrations. // Broken down by resource name. DevicePluginRegistrationCount = metrics.NewCounterVec( @@ -516,7 +503,6 @@ func Register(containerCache kubecontainer.RuntimeCache, collectors ...prometheu legacyregistry.MustRegister(RuntimeOperationsErrors) legacyregistry.MustRegister(Evictions) legacyregistry.MustRegister(EvictionStatsAge) - legacyregistry.MustRegister(Preemptions) legacyregistry.MustRegister(DevicePluginRegistrationCount) legacyregistry.MustRegister(DevicePluginAllocationDuration) legacyregistry.MustRegister(DeprecatedPodWorkerLatency) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go index 81eeff357bb2..86e3333a68ce 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/preemption/preemption.go @@ -28,7 +28,6 @@ import ( "k8s.io/kubernetes/pkg/kubelet/events" "k8s.io/kubernetes/pkg/kubelet/eviction" "k8s.io/kubernetes/pkg/kubelet/lifecycle" - "k8s.io/kubernetes/pkg/kubelet/metrics" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/scheduler/algorithm/predicates" @@ -112,11 +111,6 @@ func (c *CriticalPodAdmissionHandler) evictPodsToFreeRequests(admitPod *v1.Pod, // In future syncPod loops, the kubelet will retry the pod deletion steps that it was stuck on. continue } - if len(insufficientResources) > 0 { - metrics.Preemptions.WithLabelValues(insufficientResources[0].resourceName.String()).Inc() - } else { - metrics.Preemptions.WithLabelValues("").Inc() - } klog.Infof("preemption: pod %s evicted successfully", format.Pod(pod)) } return nil diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD index 90ccc7d0bbe8..a9d2e14b8eba 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/BUILD @@ -58,6 +58,7 @@ go_test( "//pkg/kubelet/prober/results:go_default_library", "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/status/testing:go_default_library", + "//pkg/kubelet/util/ioutils:go_default_library", "//pkg/probe:go_default_library", "//pkg/probe/exec:go_default_library", "//staging/src/k8s.io/api/core/v1:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go index 173a23e77f29..dd94f1705b23 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober.go @@ -252,63 +252,68 @@ func formatURL(scheme string, host string, port int, path string) *url.URL { type execInContainer struct { // run executes a command in a container. Combined stdout and stderr output is always returned. An // error is returned if one occurred. - run func() ([]byte, error) + run func() ([]byte, error) + writer io.Writer } func (pb *prober) newExecInContainer(container v1.Container, containerID kubecontainer.ContainerID, cmd []string, timeout time.Duration) exec.Cmd { - return execInContainer{func() ([]byte, error) { + return &execInContainer{run: func() ([]byte, error) { return pb.runner.RunInContainer(containerID, cmd, timeout) }} } -func (eic execInContainer) Run() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Run() error { + return nil } -func (eic execInContainer) CombinedOutput() ([]byte, error) { +func (eic *execInContainer) CombinedOutput() ([]byte, error) { return eic.run() } -func (eic execInContainer) Output() ([]byte, error) { +func (eic *execInContainer) Output() ([]byte, error) { return nil, fmt.Errorf("unimplemented") } -func (eic execInContainer) SetDir(dir string) { +func (eic *execInContainer) SetDir(dir string) { //unimplemented } -func (eic execInContainer) SetStdin(in io.Reader) { +func (eic *execInContainer) SetStdin(in io.Reader) { //unimplemented } -func (eic execInContainer) SetStdout(out io.Writer) { - //unimplemented +func (eic *execInContainer) SetStdout(out io.Writer) { + eic.writer = out } -func (eic execInContainer) SetStderr(out io.Writer) { - //unimplemented +func (eic *execInContainer) SetStderr(out io.Writer) { + eic.writer = out } -func (eic execInContainer) SetEnv(env []string) { +func (eic *execInContainer) SetEnv(env []string) { //unimplemented } -func (eic execInContainer) Stop() { +func (eic *execInContainer) Stop() { //unimplemented } -func (eic execInContainer) Start() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Start() error { + data, err := eic.run() + if eic.writer != nil { + eic.writer.Write(data) + } + return err } -func (eic execInContainer) Wait() error { - return fmt.Errorf("unimplemented") +func (eic *execInContainer) Wait() error { + return nil } -func (eic execInContainer) StdoutPipe() (io.ReadCloser, error) { +func (eic *execInContainer) StdoutPipe() (io.ReadCloser, error) { return nil, fmt.Errorf("unimplemented") } -func (eic execInContainer) StderrPipe() (io.ReadCloser, error) { +func (eic *execInContainer) StderrPipe() (io.ReadCloser, error) { return nil, fmt.Errorf("unimplemented") } diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go index dab63587059e..41b4ddbc9712 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/prober/prober_test.go @@ -17,10 +17,12 @@ limitations under the License. package prober import ( + "bytes" "errors" "fmt" "net/http" "reflect" + "strings" "testing" "k8s.io/api/core/v1" @@ -29,6 +31,7 @@ import ( kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" containertest "k8s.io/kubernetes/pkg/kubelet/container/testing" "k8s.io/kubernetes/pkg/kubelet/prober/results" + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/probe" execprobe "k8s.io/kubernetes/pkg/probe/exec" ) @@ -329,23 +332,38 @@ func TestProbe(t *testing.T) { } func TestNewExecInContainer(t *testing.T) { + limit := 1024 + tenKilobyte := strings.Repeat("logs-123", 128*10) + tests := []struct { - name string - err error + name string + stdout string + expected string + err error }{ { - name: "no error", - err: nil, + name: "no error", + stdout: "foo", + expected: "foo", + err: nil, + }, + { + name: "no error", + stdout: tenKilobyte, + expected: tenKilobyte[0:limit], + err: nil, }, { - name: "error - make sure we get output", - err: errors.New("bad"), + name: "error - make sure we get output", + stdout: "foo", + expected: "foo", + err: errors.New("bad"), }, } for _, test := range tests { runner := &containertest.FakeContainerCommandRunner{ - Stdout: "foo", + Stdout: test.stdout, Err: test.err, } prober := &prober{ @@ -357,7 +375,16 @@ func TestNewExecInContainer(t *testing.T) { cmd := []string{"/foo", "bar"} exec := prober.newExecInContainer(container, containerID, cmd, 0) - actualOutput, err := exec.CombinedOutput() + var dataBuffer bytes.Buffer + writer := ioutils.LimitWriter(&dataBuffer, int64(limit)) + exec.SetStderr(writer) + exec.SetStdout(writer) + err := exec.Start() + if err == nil { + err = exec.Wait() + } + actualOutput := dataBuffer.Bytes() + if e, a := containerID, runner.ContainerID; e != a { t.Errorf("%s: container id: expected %v, got %v", test.name, e, a) } @@ -365,7 +392,7 @@ func TestNewExecInContainer(t *testing.T) { t.Errorf("%s: cmd: expected %v, got %v", test.name, e, a) } // this isn't 100% foolproof as a bug in a real ContainerCommandRunner where it fails to copy to stdout/stderr wouldn't be caught by this test - if e, a := "foo", string(actualOutput); e != a { + if e, a := test.expected, string(actualOutput); e != a { t.Errorf("%s: output: expected %q, got %q", test.name, e, a) } if e, a := fmt.Sprintf("%v", test.err), fmt.Sprintf("%v", err); e != a { diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD index e84deec931ec..43bd26260847 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/BUILD @@ -1,9 +1,6 @@ package(default_visibility = ["//visibility:public"]) -load( - "@io_bazel_rules_go//go:def.bzl", - "go_library", -) +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", @@ -23,3 +20,10 @@ filegroup( srcs = [":package-srcs"], tags = ["automanaged"], ) + +go_test( + name = "go_default_test", + srcs = ["ioutils_test.go"], + embed = [":go_default_library"], + deps = ["//vendor/github.com/stretchr/testify/assert:go_default_library"], +) diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go index 42f1998c794e..1b2b5a6d5ddb 100644 --- a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils.go @@ -35,3 +35,36 @@ func (w *writeCloserWrapper) Close() error { func WriteCloserWrapper(w io.Writer) io.WriteCloser { return &writeCloserWrapper{w} } + +// LimitWriter is a copy of the standard library ioutils.LimitReader, +// applied to the writer interface. +// LimitWriter returns a Writer that writes to w +// but stops with EOF after n bytes. +// The underlying implementation is a *LimitedWriter. +func LimitWriter(w io.Writer, n int64) io.Writer { return &LimitedWriter{w, n} } + +// A LimitedWriter writes to W but limits the amount of +// data returned to just N bytes. Each call to Write +// updates N to reflect the new amount remaining. +// Write returns EOF when N <= 0 or when the underlying W returns EOF. +type LimitedWriter struct { + W io.Writer // underlying writer + N int64 // max bytes remaining +} + +func (l *LimitedWriter) Write(p []byte) (n int, err error) { + if l.N <= 0 { + return 0, io.ErrShortWrite + } + truncated := false + if int64(len(p)) > l.N { + p = p[0:l.N] + truncated = true + } + n, err = l.W.Write(p) + l.N -= int64(n) + if err == nil && truncated { + err = io.ErrShortWrite + } + return +} diff --git a/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils_test.go b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils_test.go new file mode 100644 index 000000000000..524a4aed67d7 --- /dev/null +++ b/vendor/k8s.io/kubernetes/pkg/kubelet/util/ioutils/ioutils_test.go @@ -0,0 +1,93 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package ioutils + +import ( + "bytes" + "fmt" + "math/rand" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestLimitWriter(t *testing.T) { + r := rand.New(rand.NewSource(1234)) // Fixed source to prevent flakes. + + tests := []struct { + inputSize, limit, writeSize int64 + }{ + // Single write tests + {100, 101, 100}, + {100, 100, 100}, + {100, 99, 100}, + {1, 1, 1}, + {100, 10, 100}, + {100, 0, 100}, + {100, -1, 100}, + // Multi write tests + {100, 101, 10}, + {100, 100, 10}, + {100, 99, 10}, + {100, 10, 10}, + {100, 0, 10}, + {100, -1, 10}, + } + + for _, test := range tests { + t.Run(fmt.Sprintf("inputSize=%d limit=%d writes=%d", test.inputSize, test.limit, test.writeSize), func(t *testing.T) { + input := make([]byte, test.inputSize) + r.Read(input) + output := &bytes.Buffer{} + w := LimitWriter(output, test.limit) + + var ( + err error + written int64 + n int + ) + for written < test.inputSize && err == nil { + n, err = w.Write(input[written : written+test.writeSize]) + written += int64(n) + } + + expectWritten := bounded(0, test.inputSize, test.limit) + assert.EqualValues(t, expectWritten, written) + if expectWritten <= 0 { + assert.Empty(t, output) + } else { + assert.Equal(t, input[:expectWritten], output.Bytes()) + } + + if test.limit < test.inputSize { + assert.Error(t, err) + } else { + assert.NoError(t, err) + } + }) + } +} + +func bounded(min, val, max int64) int64 { + if max < val { + val = max + } + if val < min { + val = min + } + return val +} diff --git a/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD b/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD index 317dc0fc0cda..a22faf032c28 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD +++ b/vendor/k8s.io/kubernetes/pkg/probe/exec/BUILD @@ -11,6 +11,7 @@ go_library( srcs = ["exec.go"], importpath = "k8s.io/kubernetes/pkg/probe/exec", deps = [ + "//pkg/kubelet/util/ioutils:go_default_library", "//pkg/probe:go_default_library", "//vendor/k8s.io/klog:go_default_library", "//vendor/k8s.io/utils/exec:go_default_library", diff --git a/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go b/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go index a6ae523aa629..b8cfe0d2ad7e 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go +++ b/vendor/k8s.io/kubernetes/pkg/probe/exec/exec.go @@ -17,10 +17,17 @@ limitations under the License. package exec import ( + "bytes" + + "k8s.io/kubernetes/pkg/kubelet/util/ioutils" "k8s.io/kubernetes/pkg/probe" - "k8s.io/utils/exec" "k8s.io/klog" + "k8s.io/utils/exec" +) + +const ( + maxReadLength = 10 * 1 << 10 // 10KB ) // New creates a Prober. @@ -39,7 +46,17 @@ type execProber struct{} // from executing a command. Returns the Result status, command output, and // errors if any. func (pr execProber) Probe(e exec.Cmd) (probe.Result, string, error) { - data, err := e.CombinedOutput() + var dataBuffer bytes.Buffer + writer := ioutils.LimitWriter(&dataBuffer, maxReadLength) + + e.SetStderr(writer) + e.SetStdout(writer) + err := e.Start() + if err == nil { + err = e.Wait() + } + data := dataBuffer.Bytes() + klog.V(4).Infof("Exec probe response: %q", string(data)) if err != nil { exit, ok := err.(exec.ExitError) diff --git a/vendor/k8s.io/kubernetes/pkg/probe/exec/exec_test.go b/vendor/k8s.io/kubernetes/pkg/probe/exec/exec_test.go index 1d8eebb06343..300b3d8746cc 100644 --- a/vendor/k8s.io/kubernetes/pkg/probe/exec/exec_test.go +++ b/vendor/k8s.io/kubernetes/pkg/probe/exec/exec_test.go @@ -19,6 +19,7 @@ package exec import ( "fmt" "io" + "strings" "testing" "k8s.io/kubernetes/pkg/probe" @@ -28,6 +29,7 @@ type FakeCmd struct { out []byte stdout []byte err error + writer io.Writer } func (f *FakeCmd) Run() error { @@ -46,15 +48,25 @@ func (f *FakeCmd) SetDir(dir string) {} func (f *FakeCmd) SetStdin(in io.Reader) {} -func (f *FakeCmd) SetStdout(out io.Writer) {} +func (f *FakeCmd) SetStdout(out io.Writer) { + f.writer = out +} -func (f *FakeCmd) SetStderr(out io.Writer) {} +func (f *FakeCmd) SetStderr(out io.Writer) { + f.writer = out +} func (f *FakeCmd) SetEnv(env []string) {} func (f *FakeCmd) Stop() {} -func (f *FakeCmd) Start() error { return nil } +func (f *FakeCmd) Start() error { + if f.writer != nil { + f.writer.Write(f.out) + return f.err + } + return f.err +} func (f *FakeCmd) Wait() error { return nil } @@ -90,20 +102,26 @@ func (f *fakeExitError) ExitStatus() int { func TestExec(t *testing.T) { prober := New() + tenKilobyte := strings.Repeat("logs-123", 128*10) // 8*128*10=10240 = 10KB of text. + elevenKilobyte := strings.Repeat("logs-123", 8*128*11) // 8*128*11=11264 = 11KB of text. + tests := []struct { expectedStatus probe.Result expectError bool + input string output string err error }{ // Ok - {probe.Success, false, "OK", nil}, + {probe.Success, false, "OK", "OK", nil}, // Ok - {probe.Success, false, "OK", &fakeExitError{true, 0}}, + {probe.Success, false, "OK", "OK", &fakeExitError{true, 0}}, + // Ok - truncated output + {probe.Success, false, elevenKilobyte, tenKilobyte, nil}, // Run returns error - {probe.Unknown, true, "", fmt.Errorf("test error")}, + {probe.Unknown, true, "", "", fmt.Errorf("test error")}, // Unhealthy - {probe.Failure, false, "Fail", &fakeExitError{true, 1}}, + {probe.Failure, false, "Fail", "", &fakeExitError{true, 1}}, } for i, test := range tests { fake := FakeCmd{ diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go index f720320a7103..7d95065e7328 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity.go @@ -51,17 +51,16 @@ func NewInterPodAffinityPriority( } type podAffinityPriorityMap struct { - // nodes contain all nodes that should be considered + // nodes contain all nodes that should be considered. nodes []*v1.Node - // counts store the mapping from node name to so-far computed score of - // the node. - counts map[string]*int64 + // counts store the so-far computed score for each node. + counts []int64 } func newPodAffinityPriorityMap(nodes []*v1.Node) *podAffinityPriorityMap { return &podAffinityPriorityMap{ nodes: nodes, - counts: make(map[string]*int64, len(nodes)), + counts: make([]int64, len(nodes)), } } @@ -73,9 +72,9 @@ func (p *podAffinityPriorityMap) processTerm(term *v1.PodAffinityTerm, podDefini } match := priorityutil.PodMatchesTermsNamespaceAndSelector(podToCheck, namespaces, selector) if match { - for _, node := range p.nodes { + for i, node := range p.nodes { if priorityutil.NodesHaveSameTopologyKey(node, fixedNode, term.TopologyKey) { - atomic.AddInt64(p.counts[node.Name], weight) + atomic.AddInt64(&p.counts[i], weight) } } } @@ -102,17 +101,11 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node hasAffinityConstraints := affinity != nil && affinity.PodAffinity != nil hasAntiAffinityConstraints := affinity != nil && affinity.PodAntiAffinity != nil - // priorityMap stores the mapping from node name to so-far computed score of - // the node. + // pm stores (1) all nodes that should be considered and (2) the so-far computed score for each node. pm := newPodAffinityPriorityMap(nodes) allNodeNames := make([]string, 0, len(nodeNameToInfo)) - lazyInit := hasAffinityConstraints || hasAntiAffinityConstraints for name := range nodeNameToInfo { allNodeNames = append(allNodeNames, name) - // if pod has affinity defined, or target node has affinityPods - if lazyInit || len(nodeNameToInfo[name].PodsWithAffinity()) != 0 { - pm.counts[name] = new(int64) - } } // convert the topology key based weights to the node name based weights @@ -216,25 +209,22 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node return nil, err } - for _, node := range nodes { - if pm.counts[node.Name] == nil { - continue - } - if *pm.counts[node.Name] > maxCount { - maxCount = *pm.counts[node.Name] + for i := range nodes { + if pm.counts[i] > maxCount { + maxCount = pm.counts[i] } - if *pm.counts[node.Name] < minCount { - minCount = *pm.counts[node.Name] + if pm.counts[i] < minCount { + minCount = pm.counts[i] } } // calculate final priority score for each node result := make(schedulerapi.HostPriorityList, 0, len(nodes)) maxMinDiff := maxCount - minCount - for _, node := range nodes { + for i, node := range nodes { fScore := float64(0) - if maxMinDiff > 0 && pm.counts[node.Name] != nil { - fScore = float64(schedulerapi.MaxPriority) * (float64(*pm.counts[node.Name]-minCount) / float64(maxCount-minCount)) + if maxMinDiff > 0 { + fScore = float64(schedulerapi.MaxPriority) * (float64(pm.counts[i]-minCount) / float64(maxCount-minCount)) } result = append(result, schedulerapi.HostPriority{Host: node.Name, Score: int(fScore)}) if klog.V(10) { diff --git a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go index efbebf37f75b..fa47805f6190 100644 --- a/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go +++ b/vendor/k8s.io/kubernetes/pkg/scheduler/algorithm/priorities/interpod_affinity_test.go @@ -506,6 +506,22 @@ func TestInterPodAffinityPriority(t *testing.T) { expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: 0}, {Host: "machine3", Score: schedulerapi.MaxPriority}, {Host: "machine4", Score: 0}}, name: "Affinity and Anti Affinity and symmetry: considered only preferredDuringSchedulingIgnoredDuringExecution in both pod affinity & anti affinity & symmetry", }, + // Cover https://github.com/kubernetes/kubernetes/issues/82796 which panics upon: + // 1. Some nodes in a topology don't have pods with affinity, but other nodes in the same topology have. + // 2. The incoming pod doesn't have affinity. + { + pod: &v1.Pod{Spec: v1.PodSpec{NodeName: ""}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + pods: []*v1.Pod{ + {Spec: v1.PodSpec{NodeName: "machine1"}, ObjectMeta: metav1.ObjectMeta{Labels: podLabelSecurityS1}}, + {Spec: v1.PodSpec{NodeName: "machine2", Affinity: stayWithS1InRegionAwayFromS2InAz}}, + }, + nodes: []*v1.Node{ + {ObjectMeta: metav1.ObjectMeta{Name: "machine1", Labels: labelRgChina}}, + {ObjectMeta: metav1.ObjectMeta{Name: "machine2", Labels: labelRgChina}}, + }, + expectedList: []schedulerapi.HostPriority{{Host: "machine1", Score: schedulerapi.MaxPriority}, {Host: "machine2", Score: schedulerapi.MaxPriority}}, + name: "Avoid panic when partial nodes in a topology don't have pods with affinity", + }, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/api/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/api/go.sum index 2a074c5e7f54..2fca62dca130 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/api/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/api/go.sum @@ -90,8 +90,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod index c270ee4a5fe5..a8422f2a197f 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.mod @@ -21,7 +21,7 @@ require ( github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 google.golang.org/grpc v1.23.0 - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 k8s.io/apiserver v0.0.0 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum index 1590c100f8c2..7ef23be6c028 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/go.sum @@ -349,6 +349,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD index 32d6d981f49e..1ee6b4c5436d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/BUILD @@ -48,6 +48,7 @@ go_test( srcs = [ "convert_test.go", "goopenapi_test.go", + "unfold_test.go", "validation_test.go", ], embed = [":go_default_library"], diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go index 23d09eb9ef78..10fcb9cf36a3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/structural.go @@ -30,7 +30,7 @@ type Structural struct { Generic Extensions - *ValueValidation + ValueValidation *ValueValidation } // +k8s:deepcopy-gen=true diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go index d135757ee7b2..5c69a4f300e2 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold.go @@ -35,13 +35,16 @@ func (s *Structural) Unfold() *Structural { return false } - if s.AnyOf == nil { - s.AnyOf = []NestedValueValidation{ + if s.ValueValidation == nil { + s.ValueValidation = &ValueValidation{} + } + if s.ValueValidation.AnyOf == nil { + s.ValueValidation.AnyOf = []NestedValueValidation{ {ForbiddenGenerics: Generic{Type: "integer"}}, {ForbiddenGenerics: Generic{Type: "string"}}, } } else { - s.AllOf = append([]NestedValueValidation{ + s.ValueValidation.AllOf = append([]NestedValueValidation{ { ValueValidation: ValueValidation{ AnyOf: []NestedValueValidation{ @@ -50,7 +53,7 @@ func (s *Structural) Unfold() *Structural { }, }, }, - }, s.AllOf...) + }, s.ValueValidation.AllOf...) } return true diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold_test.go new file mode 100644 index 000000000000..e4086df47551 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver/schema/unfold_test.go @@ -0,0 +1,32 @@ +/* +Copyright 2019 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package schema + +import ( + "testing" +) + +// TestStructuralUnfoldNilValueValidation tests that Unfold() does not crash +// on a IntOrString pattern with nil ValueValidation. +func TestStructuralUnfoldIntOrString(t *testing.T) { + schema := Structural{ + Extensions: Extensions{ + XIntOrString: true, + }, + } + schema.Unfold() +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.mod b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.mod index cf76e6ee1666..730a85b4d4cf 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.mod +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.mod @@ -30,7 +30,7 @@ require ( golang.org/x/text v0.3.2 // indirect gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 // indirect gopkg.in/inf.v0 v0.9.0 - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 k8s.io/klog v0.4.0 k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf sigs.k8s.io/yaml v1.1.0 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.sum index e21152cae48c..e0987db9e140 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apimachinery/go.sum @@ -106,8 +106,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod index 322f7b7cb404..a99ebce8257a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.mod @@ -47,7 +47,7 @@ require ( google.golang.org/grpc v1.23.0 gopkg.in/natefinch/lumberjack.v2 v2.0.0 gopkg.in/square/go-jose.v2 v2.2.2 - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible // indirect k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum index 59c940bb9b46..1b66306b6e07 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/go.sum @@ -268,6 +268,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go index 19a6700e1b23..17b8b4a1af55 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config.go @@ -384,6 +384,17 @@ type CompletedConfig struct { *completedConfig } +// AddHealthChecks adds a health check to our config to be exposed by the health endpoints +// of our configured apiserver. We should prefer this to adding healthChecks directly to +// the config unless we explicitly want to add a healthcheck only to a specific health endpoint. +func (c *Config) AddHealthChecks(healthChecks ...healthz.HealthChecker) { + for _, check := range healthChecks { + c.HealthzChecks = append(c.HealthzChecks, check) + c.LivezChecks = append(c.LivezChecks, check) + c.ReadyzChecks = append(c.ReadyzChecks, check) + } +} + // Complete fills in any fields not set that are required to have valid data and can be derived // from other fields. If you're going to `ApplyOptions`, do that first. It's mutating the receiver. func (c *Config) Complete(informers informers.SharedInformerFactory) CompletedConfig { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config_selfclient.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config_selfclient.go index da1b1e7335b2..2af852ee4d87 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config_selfclient.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/config_selfclient.go @@ -36,12 +36,9 @@ func (s *SecureServingInfo) NewClientConfig(caCert []byte) (*restclient.Config, } return &restclient.Config{ - // Increase QPS limits. The client is currently passed to all admission plugins, - // and those can be throttled in case of higher load on apiserver - see #22340 and #22422 - // for more details. Once #22422 is fixed, we may want to remove it. - QPS: 50, - Burst: 100, - Host: "https://" + net.JoinHostPort(host, port), + // Do not limit loopback client QPS. + QPS: -1, + Host: "https://" + net.JoinHostPort(host, port), // override the ServerName to select our loopback certificate via SNI. This name is also // used by the client to compare the returns server certificate against. TLSClientConfig: restclient.TLSClientConfig{ diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go index 559aca7e2b71..23ac0034d8d3 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/filters/timeout.go @@ -28,6 +28,7 @@ import ( "time" apierrors "k8s.io/apimachinery/pkg/api/errors" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" "k8s.io/apiserver/pkg/endpoints/metrics" apirequest "k8s.io/apiserver/pkg/endpoints/request" ) @@ -119,6 +120,23 @@ func (t *timeoutHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) { } return case <-after: + defer func() { + // resultCh needs to have a reader, since the function doing + // the work needs to send to it. This is defer'd to ensure it runs + // ever if the post timeout work itself panics. + go func() { + res := <-resultCh + if res != nil { + switch t := res.(type) { + case error: + utilruntime.HandleError(t) + default: + utilruntime.HandleError(fmt.Errorf("%v", res)) + } + } + }() + }() + postTimeoutFn() tw.timeout(err) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go index c65acac1fd7c..d530d809875d 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/apiserver/pkg/server/options/etcd.go @@ -202,7 +202,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { if err != nil { return err } - c.HealthzChecks = append(c.HealthzChecks, healthz.NamedCheck("etcd", func(r *http.Request) error { + c.AddHealthChecks(healthz.NamedCheck("etcd", func(r *http.Request) error { return healthCheck() })) @@ -211,8 +211,7 @@ func (s *EtcdOptions) addEtcdHealthEndpoint(c *server.Config) error { if err != nil { return err } - - c.HealthzChecks = append(c.HealthzChecks, kmsPluginHealthzChecks...) + c.AddHealthChecks(kmsPluginHealthzChecks...) } return nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cli-runtime/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cli-runtime/go.sum index c2e75b0b0ab2..fefebe59b29a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cli-runtime/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cli-runtime/go.sum @@ -207,6 +207,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/go.sum index 4962f8964061..66b94277f68c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/go.sum @@ -175,6 +175,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/shared_informer.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/shared_informer.go index c37423b66521..f59a0852fe0a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/shared_informer.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/client-go/tools/cache/shared_informer.go @@ -209,7 +209,7 @@ func WaitForNamedCacheSync(controllerName string, stopCh <-chan struct{}, cacheS // if the controller should shutdown // callers should prefer WaitForNamedCacheSync() func WaitForCacheSync(stopCh <-chan struct{}, cacheSyncs ...InformerSynced) bool { - err := wait.PollUntil(syncedPollPeriod, + err := wait.PollImmediateUntil(syncedPollPeriod, func() (bool, error) { for _, syncFunc := range cacheSyncs { if !syncFunc() { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/go.sum index 118aef9c91e4..69875548b509 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cloud-provider/go.sum @@ -158,6 +158,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/go.sum index f90bab5da838..072ca2d348d9 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/cluster-bootstrap/go.sum @@ -91,8 +91,8 @@ gopkg.in/square/go-jose.v2 v2.2.2/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76 gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/doc.go new file mode 100644 index 000000000000..4104119d3b11 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/doc.go @@ -0,0 +1,21 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:deepcopy-gen=package +// +groupName=example.dots.apiserver.code-generator.k8s.io +// +groupGoName=ThirdExample + +package example3 // import "k8s.io/code-generator/_examples/apiserver/apis/example3.io" diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/install/install.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/install/install.go new file mode 100644 index 000000000000..e7a4cc3fc5e3 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/install/install.go @@ -0,0 +1,33 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package install installs the experimental API group, making it available as +// an option to all of the API encoding/decoding machinery. +package install + +import ( + "k8s.io/apimachinery/pkg/runtime" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + example3 "k8s.io/code-generator/_examples/apiserver/apis/example3.io" + "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" +) + +// Install registers the API group and adds types to a scheme +func Install(scheme *runtime.Scheme) { + utilruntime.Must(example3.AddToScheme(scheme)) + utilruntime.Must(v1.AddToScheme(scheme)) + utilruntime.Must(scheme.SetVersionPriority(v1.SchemeGroupVersion)) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/register.go new file mode 100644 index 000000000000..b77b2b1807c6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/register.go @@ -0,0 +1,45 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package example3 + +import ( + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var SchemeGroupVersion = schema.GroupVersion{Group: "example.dots.apiserver.code-generator.k8s.io", Version: runtime.APIVersionInternal} + +var ( + SchemeBuilder = runtime.NewSchemeBuilder(addKnownTypes) + AddToScheme = SchemeBuilder.AddToScheme +) + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to the given scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TestType{}, + &TestTypeList{}, + ) + + scheme.AddKnownTypes(SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/types.go new file mode 100644 index 000000000000..0c49b45f3672 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/types.go @@ -0,0 +1,44 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package example3 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestType is a top-level type. A client is created for it. +type TestType struct { + metav1.TypeMeta + metav1.ObjectMeta + Status TestTypeStatus +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestTypeList is a top-level list type. The client methods for lists are automatically created. +// You are not supposed to create a separated client for this one. +type TestTypeList struct { + metav1.TypeMeta + metav1.ListMeta + + Items []TestType +} + +type TestTypeStatus struct { + Blah string +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/doc.go new file mode 100644 index 000000000000..24f3f82c7d20 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/doc.go @@ -0,0 +1,24 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +k8s:openapi-gen=true +// +k8s:deepcopy-gen=package +// +k8s:defaulter-gen=TypeMeta +// +groupName=example.dots.apiserver.code-generator.k8s.io +// +k8s:conversion-gen=k8s.io/code-generator/_examples/apiserver/apis/example3.io +// +groupGoName=ThirdExample + +package v1 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/register.go new file mode 100644 index 000000000000..3e0a393f7cc0 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/register.go @@ -0,0 +1,59 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" +) + +var SchemeGroupVersion = schema.GroupVersion{Group: "example.dots.apiserver.code-generator.k8s.io", Version: "v1"} + +var ( + // TODO: move SchemeBuilder with zz_generated.deepcopy.go to k8s.io/api. + // localSchemeBuilder and AddToScheme will stay in k8s.io/kubernetes. + SchemeBuilder runtime.SchemeBuilder + localSchemeBuilder = &SchemeBuilder + AddToScheme = localSchemeBuilder.AddToScheme +) + +func init() { + // We only register manually written functions here. The registration of the + // generated functions takes place in the generated files. The separation + // makes the code compile even when the generated files are missing. + localSchemeBuilder.Register(addKnownTypes) +} + +// Resource takes an unqualified resource and returns a Group qualified GroupResource +func Resource(resource string) schema.GroupResource { + return SchemeGroupVersion.WithResource(resource).GroupResource() +} + +// Adds the list of known types to api.Scheme. +func addKnownTypes(scheme *runtime.Scheme) error { + scheme.AddKnownTypes(SchemeGroupVersion, + &TestType{}, + &TestTypeList{}, + ) + + scheme.AddKnownTypes(SchemeGroupVersion, + &metav1.Status{}, + ) + metav1.AddToGroupVersion(scheme, SchemeGroupVersion) + return nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/types.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/types.go new file mode 100644 index 000000000000..5c2ebc4d6f46 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/types.go @@ -0,0 +1,47 @@ +/* +Copyright 2015 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package v1 + +import metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + +// +genclient +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestType is a top-level type. A client is created for it. +type TestType struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ObjectMeta `json:"metadata,omitempty"` + // +optional + Status TestTypeStatus `json:"status,omitempty"` +} + +// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object + +// TestTypeList is a top-level list type. The client methods for lists are automatically created. +// You are not supposed to create a separated client for this one. +type TestTypeList struct { + metav1.TypeMeta `json:",inline"` + // +optional + metav1.ListMeta `json:"metadata,omitempty"` + + Items []TestType `json:"items"` +} + +type TestTypeStatus struct { + Blah string +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.conversion.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.conversion.go new file mode 100644 index 000000000000..5c65ac566875 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.conversion.go @@ -0,0 +1,137 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by conversion-gen. DO NOT EDIT. + +package v1 + +import ( + unsafe "unsafe" + + conversion "k8s.io/apimachinery/pkg/conversion" + runtime "k8s.io/apimachinery/pkg/runtime" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" +) + +func init() { + localSchemeBuilder.Register(RegisterConversions) +} + +// RegisterConversions adds conversion functions to the given scheme. +// Public to allow building arbitrary schemes. +func RegisterConversions(s *runtime.Scheme) error { + if err := s.AddGeneratedConversionFunc((*TestType)(nil), (*example3io.TestType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TestType_To_example3io_TestType(a.(*TestType), b.(*example3io.TestType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*example3io.TestType)(nil), (*TestType)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_example3io_TestType_To_v1_TestType(a.(*example3io.TestType), b.(*TestType), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TestTypeList)(nil), (*example3io.TestTypeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TestTypeList_To_example3io_TestTypeList(a.(*TestTypeList), b.(*example3io.TestTypeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*example3io.TestTypeList)(nil), (*TestTypeList)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_example3io_TestTypeList_To_v1_TestTypeList(a.(*example3io.TestTypeList), b.(*TestTypeList), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*TestTypeStatus)(nil), (*example3io.TestTypeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_v1_TestTypeStatus_To_example3io_TestTypeStatus(a.(*TestTypeStatus), b.(*example3io.TestTypeStatus), scope) + }); err != nil { + return err + } + if err := s.AddGeneratedConversionFunc((*example3io.TestTypeStatus)(nil), (*TestTypeStatus)(nil), func(a, b interface{}, scope conversion.Scope) error { + return Convert_example3io_TestTypeStatus_To_v1_TestTypeStatus(a.(*example3io.TestTypeStatus), b.(*TestTypeStatus), scope) + }); err != nil { + return err + } + return nil +} + +func autoConvert_v1_TestType_To_example3io_TestType(in *TestType, out *example3io.TestType, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_v1_TestTypeStatus_To_example3io_TestTypeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_v1_TestType_To_example3io_TestType is an autogenerated conversion function. +func Convert_v1_TestType_To_example3io_TestType(in *TestType, out *example3io.TestType, s conversion.Scope) error { + return autoConvert_v1_TestType_To_example3io_TestType(in, out, s) +} + +func autoConvert_example3io_TestType_To_v1_TestType(in *example3io.TestType, out *TestType, s conversion.Scope) error { + out.ObjectMeta = in.ObjectMeta + if err := Convert_example3io_TestTypeStatus_To_v1_TestTypeStatus(&in.Status, &out.Status, s); err != nil { + return err + } + return nil +} + +// Convert_example3io_TestType_To_v1_TestType is an autogenerated conversion function. +func Convert_example3io_TestType_To_v1_TestType(in *example3io.TestType, out *TestType, s conversion.Scope) error { + return autoConvert_example3io_TestType_To_v1_TestType(in, out, s) +} + +func autoConvert_v1_TestTypeList_To_example3io_TestTypeList(in *TestTypeList, out *example3io.TestTypeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]example3io.TestType)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_v1_TestTypeList_To_example3io_TestTypeList is an autogenerated conversion function. +func Convert_v1_TestTypeList_To_example3io_TestTypeList(in *TestTypeList, out *example3io.TestTypeList, s conversion.Scope) error { + return autoConvert_v1_TestTypeList_To_example3io_TestTypeList(in, out, s) +} + +func autoConvert_example3io_TestTypeList_To_v1_TestTypeList(in *example3io.TestTypeList, out *TestTypeList, s conversion.Scope) error { + out.ListMeta = in.ListMeta + out.Items = *(*[]TestType)(unsafe.Pointer(&in.Items)) + return nil +} + +// Convert_example3io_TestTypeList_To_v1_TestTypeList is an autogenerated conversion function. +func Convert_example3io_TestTypeList_To_v1_TestTypeList(in *example3io.TestTypeList, out *TestTypeList, s conversion.Scope) error { + return autoConvert_example3io_TestTypeList_To_v1_TestTypeList(in, out, s) +} + +func autoConvert_v1_TestTypeStatus_To_example3io_TestTypeStatus(in *TestTypeStatus, out *example3io.TestTypeStatus, s conversion.Scope) error { + out.Blah = in.Blah + return nil +} + +// Convert_v1_TestTypeStatus_To_example3io_TestTypeStatus is an autogenerated conversion function. +func Convert_v1_TestTypeStatus_To_example3io_TestTypeStatus(in *TestTypeStatus, out *example3io.TestTypeStatus, s conversion.Scope) error { + return autoConvert_v1_TestTypeStatus_To_example3io_TestTypeStatus(in, out, s) +} + +func autoConvert_example3io_TestTypeStatus_To_v1_TestTypeStatus(in *example3io.TestTypeStatus, out *TestTypeStatus, s conversion.Scope) error { + out.Blah = in.Blah + return nil +} + +// Convert_example3io_TestTypeStatus_To_v1_TestTypeStatus is an autogenerated conversion function. +func Convert_example3io_TestTypeStatus_To_v1_TestTypeStatus(in *example3io.TestTypeStatus, out *TestTypeStatus, s conversion.Scope) error { + return autoConvert_example3io_TestTypeStatus_To_v1_TestTypeStatus(in, out, s) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.deepcopy.go new file mode 100644 index 000000000000..ec5a6e97461c --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.deepcopy.go @@ -0,0 +1,101 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestType) DeepCopyInto(out *TestType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestType. +func (in *TestType) DeepCopy() *TestType { + if in == nil { + return nil + } + out := new(TestType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TestType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeList. +func (in *TestTypeList) DeepCopy() *TestTypeList { + if in == nil { + return nil + } + out := new(TestTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeStatus) DeepCopyInto(out *TestTypeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeStatus. +func (in *TestTypeStatus) DeepCopy() *TestTypeStatus { + if in == nil { + return nil + } + out := new(TestTypeStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.defaults.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.defaults.go new file mode 100644 index 000000000000..cce2e603a69a --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1/zz_generated.defaults.go @@ -0,0 +1,32 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by defaulter-gen. DO NOT EDIT. + +package v1 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// RegisterDefaults adds defaulters functions to the given scheme. +// Public to allow building arbitrary schemes. +// All generated defaulters are covering - they call all nested defaulters. +func RegisterDefaults(scheme *runtime.Scheme) error { + return nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/zz_generated.deepcopy.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/zz_generated.deepcopy.go new file mode 100644 index 000000000000..17f1fea4a7bc --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/apis/example3.io/zz_generated.deepcopy.go @@ -0,0 +1,101 @@ +// +build !ignore_autogenerated + +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by deepcopy-gen. DO NOT EDIT. + +package example3 + +import ( + runtime "k8s.io/apimachinery/pkg/runtime" +) + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestType) DeepCopyInto(out *TestType) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Status = in.Status + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestType. +func (in *TestType) DeepCopy() *TestType { + if in == nil { + return nil + } + out := new(TestType) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestType) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeList) DeepCopyInto(out *TestTypeList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]TestType, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeList. +func (in *TestTypeList) DeepCopy() *TestTypeList { + if in == nil { + return nil + } + out := new(TestTypeList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *TestTypeList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *TestTypeStatus) DeepCopyInto(out *TestTypeStatus) { + *out = *in + return +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new TestTypeStatus. +func (in *TestTypeStatus) DeepCopy() *TestTypeStatus { + if in == nil { + return nil + } + out := new(TestTypeStatus) + in.DeepCopyInto(out) + return out +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go index a9dc68d887bf..2c8a86f671cd 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/clientset.go @@ -26,12 +26,14 @@ import ( flowcontrol "k8s.io/client-go/util/flowcontrol" exampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion" secondexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion" + thirdexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion" ) type Interface interface { Discovery() discovery.DiscoveryInterface Example() exampleinternalversion.ExampleInterface SecondExample() secondexampleinternalversion.SecondExampleInterface + ThirdExample() thirdexampleinternalversion.ThirdExampleInterface } // Clientset contains the clients for groups. Each group has exactly one @@ -40,6 +42,7 @@ type Clientset struct { *discovery.DiscoveryClient example *exampleinternalversion.ExampleClient secondExample *secondexampleinternalversion.SecondExampleClient + thirdExample *thirdexampleinternalversion.ThirdExampleClient } // Example retrieves the ExampleClient @@ -52,6 +55,11 @@ func (c *Clientset) SecondExample() secondexampleinternalversion.SecondExampleIn return c.secondExample } +// ThirdExample retrieves the ThirdExampleClient +func (c *Clientset) ThirdExample() thirdexampleinternalversion.ThirdExampleInterface { + return c.thirdExample +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -81,6 +89,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.thirdExample, err = thirdexampleinternalversion.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -95,6 +107,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.example = exampleinternalversion.NewForConfigOrDie(c) cs.secondExample = secondexampleinternalversion.NewForConfigOrDie(c) + cs.thirdExample = thirdexampleinternalversion.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -105,6 +118,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.example = exampleinternalversion.New(c) cs.secondExample = secondexampleinternalversion.New(c) + cs.thirdExample = thirdexampleinternalversion.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go index bd17e82b7f5b..1c98e7c04333 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/clientset_generated.go @@ -29,6 +29,8 @@ import ( fakeexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example/internalversion/fake" secondexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion" fakesecondexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example2/internalversion/fake" + thirdexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion" + fakethirdexampleinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -87,3 +89,8 @@ func (c *Clientset) Example() exampleinternalversion.ExampleInterface { func (c *Clientset) SecondExample() secondexampleinternalversion.SecondExampleInterface { return &fakesecondexampleinternalversion.FakeSecondExample{Fake: &c.Fake} } + +// ThirdExample retrieves the ThirdExampleClient +func (c *Clientset) ThirdExample() thirdexampleinternalversion.ThirdExampleInterface { + return &fakethirdexampleinternalversion.FakeThirdExample{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go index 5713015ce528..e29e4e0aa673 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/fake/register.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" exampleinternalversion "k8s.io/code-generator/_examples/apiserver/apis/example" secondexampleinternalversion "k8s.io/code-generator/_examples/apiserver/apis/example2" + thirdexampleinternalversion "k8s.io/code-generator/_examples/apiserver/apis/example3.io" ) var scheme = runtime.NewScheme() @@ -34,6 +35,7 @@ var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ exampleinternalversion.AddToScheme, secondexampleinternalversion.AddToScheme, + thirdexampleinternalversion.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go index 97b03068e329..933d5ed4dfc8 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme/register.go @@ -25,6 +25,7 @@ import ( serializer "k8s.io/apimachinery/pkg/runtime/serializer" example "k8s.io/code-generator/_examples/apiserver/apis/example/install" secondexample "k8s.io/code-generator/_examples/apiserver/apis/example2/install" + thirdexample "k8s.io/code-generator/_examples/apiserver/apis/example3.io/install" ) var Scheme = runtime.NewScheme() @@ -40,4 +41,5 @@ func init() { func Install(scheme *runtime.Scheme) { example.Install(scheme) secondexample.Install(scheme) + thirdexample.Install(scheme) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/doc.go new file mode 100644 index 000000000000..86602442babd --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package internalversion diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/example3.io_client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/example3.io_client.go new file mode 100644 index 000000000000..dab480b07c98 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/example3.io_client.go @@ -0,0 +1,96 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + rest "k8s.io/client-go/rest" + "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme" +) + +type ThirdExampleInterface interface { + RESTClient() rest.Interface + TestTypesGetter +} + +// ThirdExampleClient is used to interact with features provided by the example.dots.apiserver.code-generator.k8s.io group. +type ThirdExampleClient struct { + restClient rest.Interface +} + +func (c *ThirdExampleClient) TestTypes(namespace string) TestTypeInterface { + return newTestTypes(c, namespace) +} + +// NewForConfig creates a new ThirdExampleClient for the given config. +func NewForConfig(c *rest.Config) (*ThirdExampleClient, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ThirdExampleClient{client}, nil +} + +// NewForConfigOrDie creates a new ThirdExampleClient for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ThirdExampleClient { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ThirdExampleClient for the given RESTClient. +func New(c rest.Interface) *ThirdExampleClient { + return &ThirdExampleClient{c} +} + +func setConfigDefaults(config *rest.Config) error { + config.APIPath = "/apis" + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + if config.GroupVersion == nil || config.GroupVersion.Group != scheme.Scheme.PrioritizedVersionsForGroup("example.dots.apiserver.code-generator.k8s.io")[0].Group { + gv := scheme.Scheme.PrioritizedVersionsForGroup("example.dots.apiserver.code-generator.k8s.io")[0] + config.GroupVersion = &gv + } + config.NegotiatedSerializer = scheme.Codecs + + if config.QPS == 0 { + config.QPS = 5 + } + if config.Burst == 0 { + config.Burst = 10 + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ThirdExampleClient) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_example3.io_client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_example3.io_client.go new file mode 100644 index 000000000000..790fdb903371 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_example3.io_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion" +) + +type FakeThirdExample struct { + *testing.Fake +} + +func (c *FakeThirdExample) TestTypes(namespace string) internalversion.TestTypeInterface { + return &FakeTestTypes{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeThirdExample) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go new file mode 100644 index 000000000000..5baf7b89f911 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/fake/fake_testtype.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" +) + +// FakeTestTypes implements TestTypeInterface +type FakeTestTypes struct { + Fake *FakeThirdExample + ns string +} + +var testtypesResource = schema.GroupVersionResource{Group: "example.dots.apiserver.code-generator.k8s.io", Version: "", Resource: "testtypes"} + +var testtypesKind = schema.GroupVersionKind{Group: "example.dots.apiserver.code-generator.k8s.io", Version: "", Kind: "TestType"} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *FakeTestTypes) Get(name string, options v1.GetOptions) (result *example3io.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(testtypesResource, c.ns, name), &example3io.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3io.TestType), err +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *FakeTestTypes) List(opts v1.ListOptions) (result *example3io.TestTypeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(testtypesResource, testtypesKind, c.ns, opts), &example3io.TestTypeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &example3io.TestTypeList{ListMeta: obj.(*example3io.TestTypeList).ListMeta} + for _, item := range obj.(*example3io.TestTypeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *FakeTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(testtypesResource, c.ns, opts)) + +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Create(testType *example3io.TestType) (result *example3io.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example3io.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3io.TestType), err +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Update(testType *example3io.TestType) (result *example3io.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example3io.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3io.TestType), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeTestTypes) UpdateStatus(testType *example3io.TestType) (*example3io.TestType, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example3io.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3io.TestType), err +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *FakeTestTypes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(testtypesResource, c.ns, name), &example3io.TestType{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(testtypesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &example3io.TestTypeList{}) + return err +} + +// Patch applies the patch and returns the patched testType. +func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example3io.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3io.TestType), err +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/generated_expansion.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/generated_expansion.go new file mode 100644 index 000000000000..50bdbd254a4a --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +type TestTypeExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go new file mode 100644 index 000000000000..5ec5b472702e --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/internalversion/typed/example3.io/internalversion/testtype.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package internalversion + +import ( + "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" + scheme "k8s.io/code-generator/_examples/apiserver/clientset/internalversion/scheme" +) + +// TestTypesGetter has a method to return a TestTypeInterface. +// A group's client should implement this interface. +type TestTypesGetter interface { + TestTypes(namespace string) TestTypeInterface +} + +// TestTypeInterface has methods to work with TestType resources. +type TestTypeInterface interface { + Create(*example3io.TestType) (*example3io.TestType, error) + Update(*example3io.TestType) (*example3io.TestType, error) + UpdateStatus(*example3io.TestType) (*example3io.TestType, error) + Delete(name string, options *v1.DeleteOptions) error + DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error + Get(name string, options v1.GetOptions) (*example3io.TestType, error) + List(opts v1.ListOptions) (*example3io.TestTypeList, error) + Watch(opts v1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) + TestTypeExpansion +} + +// testTypes implements TestTypeInterface +type testTypes struct { + client rest.Interface + ns string +} + +// newTestTypes returns a TestTypes +func newTestTypes(c *ThirdExampleClient, namespace string) *testTypes { + return &testTypes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *testTypes) Get(name string, options v1.GetOptions) (result *example3io.TestType, err error) { + result = &example3io.TestType{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *testTypes) List(opts v1.ListOptions) (result *example3io.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &example3io.TestTypeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *testTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Create(testType *example3io.TestType) (result *example3io.TestType, err error) { + result = &example3io.TestType{} + err = c.client.Post(). + Namespace(c.ns). + Resource("testtypes"). + Body(testType). + Do(). + Into(result) + return +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Update(testType *example3io.TestType) (result *example3io.TestType, err error) { + result = &example3io.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + Body(testType). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *testTypes) UpdateStatus(testType *example3io.TestType) (result *example3io.TestType, err error) { + result = &example3io.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + SubResource("status"). + Body(testType). + Do(). + Into(result) + return +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *testTypes) Delete(name string, options *v1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *testTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched testType. +func (c *testTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example3io.TestType, err error) { + result = &example3io.TestType{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("testtypes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go index 7136f17a1d10..312ad73ccafe 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/clientset.go @@ -26,12 +26,14 @@ import ( flowcontrol "k8s.io/client-go/util/flowcontrol" examplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1" secondexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1" + thirdexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1" ) type Interface interface { Discovery() discovery.DiscoveryInterface ExampleV1() examplev1.ExampleV1Interface SecondExampleV1() secondexamplev1.SecondExampleV1Interface + ThirdExampleV1() thirdexamplev1.ThirdExampleV1Interface } // Clientset contains the clients for groups. Each group has exactly one @@ -40,6 +42,7 @@ type Clientset struct { *discovery.DiscoveryClient exampleV1 *examplev1.ExampleV1Client secondExampleV1 *secondexamplev1.SecondExampleV1Client + thirdExampleV1 *thirdexamplev1.ThirdExampleV1Client } // ExampleV1 retrieves the ExampleV1Client @@ -52,6 +55,11 @@ func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return c.secondExampleV1 } +// ThirdExampleV1 retrieves the ThirdExampleV1Client +func (c *Clientset) ThirdExampleV1() thirdexamplev1.ThirdExampleV1Interface { + return c.thirdExampleV1 +} + // Discovery retrieves the DiscoveryClient func (c *Clientset) Discovery() discovery.DiscoveryInterface { if c == nil { @@ -81,6 +89,10 @@ func NewForConfig(c *rest.Config) (*Clientset, error) { if err != nil { return nil, err } + cs.thirdExampleV1, err = thirdexamplev1.NewForConfig(&configShallowCopy) + if err != nil { + return nil, err + } cs.DiscoveryClient, err = discovery.NewDiscoveryClientForConfig(&configShallowCopy) if err != nil { @@ -95,6 +107,7 @@ func NewForConfigOrDie(c *rest.Config) *Clientset { var cs Clientset cs.exampleV1 = examplev1.NewForConfigOrDie(c) cs.secondExampleV1 = secondexamplev1.NewForConfigOrDie(c) + cs.thirdExampleV1 = thirdexamplev1.NewForConfigOrDie(c) cs.DiscoveryClient = discovery.NewDiscoveryClientForConfigOrDie(c) return &cs @@ -105,6 +118,7 @@ func New(c rest.Interface) *Clientset { var cs Clientset cs.exampleV1 = examplev1.New(c) cs.secondExampleV1 = secondexamplev1.New(c) + cs.thirdExampleV1 = thirdexamplev1.New(c) cs.DiscoveryClient = discovery.NewDiscoveryClient(c) return &cs diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go index 42690b1ecba3..760b901c32ce 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/clientset_generated.go @@ -29,6 +29,8 @@ import ( fakeexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example/v1/fake" secondexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1" fakesecondexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example2/v1/fake" + thirdexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1" + fakethirdexamplev1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake" ) // NewSimpleClientset returns a clientset that will respond with the provided objects. @@ -87,3 +89,8 @@ func (c *Clientset) ExampleV1() examplev1.ExampleV1Interface { func (c *Clientset) SecondExampleV1() secondexamplev1.SecondExampleV1Interface { return &fakesecondexamplev1.FakeSecondExampleV1{Fake: &c.Fake} } + +// ThirdExampleV1 retrieves the ThirdExampleV1Client +func (c *Clientset) ThirdExampleV1() thirdexamplev1.ThirdExampleV1Interface { + return &fakethirdexamplev1.FakeThirdExampleV1{Fake: &c.Fake} +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go index 75282b82f05f..470946254cc4 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/fake/register.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" examplev1 "k8s.io/code-generator/_examples/apiserver/apis/example/v1" secondexamplev1 "k8s.io/code-generator/_examples/apiserver/apis/example2/v1" + thirdexamplev1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" ) var scheme = runtime.NewScheme() @@ -34,6 +35,7 @@ var parameterCodec = runtime.NewParameterCodec(scheme) var localSchemeBuilder = runtime.SchemeBuilder{ examplev1.AddToScheme, secondexamplev1.AddToScheme, + thirdexamplev1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go index e4c8dc1020b2..9919a8f5a729 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme/register.go @@ -26,6 +26,7 @@ import ( utilruntime "k8s.io/apimachinery/pkg/util/runtime" examplev1 "k8s.io/code-generator/_examples/apiserver/apis/example/v1" secondexamplev1 "k8s.io/code-generator/_examples/apiserver/apis/example2/v1" + thirdexamplev1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" ) var Scheme = runtime.NewScheme() @@ -34,6 +35,7 @@ var ParameterCodec = runtime.NewParameterCodec(Scheme) var localSchemeBuilder = runtime.SchemeBuilder{ examplev1.AddToScheme, secondexamplev1.AddToScheme, + thirdexamplev1.AddToScheme, } // AddToScheme adds all types of this clientset into the given scheme. This allows composition diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/doc.go new file mode 100644 index 000000000000..3af5d054f102 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// This package has the automatically generated typed clients. +package v1 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/example3.io_client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/example3.io_client.go new file mode 100644 index 000000000000..53c51b35c7cd --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/example3.io_client.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + rest "k8s.io/client-go/rest" + v1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" + "k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme" +) + +type ThirdExampleV1Interface interface { + RESTClient() rest.Interface + TestTypesGetter +} + +// ThirdExampleV1Client is used to interact with features provided by the example.dots.apiserver.code-generator.k8s.io group. +type ThirdExampleV1Client struct { + restClient rest.Interface +} + +func (c *ThirdExampleV1Client) TestTypes(namespace string) TestTypeInterface { + return newTestTypes(c, namespace) +} + +// NewForConfig creates a new ThirdExampleV1Client for the given config. +func NewForConfig(c *rest.Config) (*ThirdExampleV1Client, error) { + config := *c + if err := setConfigDefaults(&config); err != nil { + return nil, err + } + client, err := rest.RESTClientFor(&config) + if err != nil { + return nil, err + } + return &ThirdExampleV1Client{client}, nil +} + +// NewForConfigOrDie creates a new ThirdExampleV1Client for the given config and +// panics if there is an error in the config. +func NewForConfigOrDie(c *rest.Config) *ThirdExampleV1Client { + client, err := NewForConfig(c) + if err != nil { + panic(err) + } + return client +} + +// New creates a new ThirdExampleV1Client for the given RESTClient. +func New(c rest.Interface) *ThirdExampleV1Client { + return &ThirdExampleV1Client{c} +} + +func setConfigDefaults(config *rest.Config) error { + gv := v1.SchemeGroupVersion + config.GroupVersion = &gv + config.APIPath = "/apis" + config.NegotiatedSerializer = scheme.Codecs.WithoutConversion() + + if config.UserAgent == "" { + config.UserAgent = rest.DefaultKubernetesUserAgent() + } + + return nil +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *ThirdExampleV1Client) RESTClient() rest.Interface { + if c == nil { + return nil + } + return c.restClient +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/doc.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/doc.go new file mode 100644 index 000000000000..16f44399065e --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/doc.go @@ -0,0 +1,20 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +// Package fake has the automatically generated clients. +package fake diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_example3.io_client.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_example3.io_client.go new file mode 100644 index 000000000000..bbaf71a3b091 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_example3.io_client.go @@ -0,0 +1,40 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + rest "k8s.io/client-go/rest" + testing "k8s.io/client-go/testing" + v1 "k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1" +) + +type FakeThirdExampleV1 struct { + *testing.Fake +} + +func (c *FakeThirdExampleV1) TestTypes(namespace string) v1.TestTypeInterface { + return &FakeTestTypes{c, namespace} +} + +// RESTClient returns a RESTClient that is used to communicate +// with API server by this client implementation. +func (c *FakeThirdExampleV1) RESTClient() rest.Interface { + var ret *rest.RESTClient + return ret +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go new file mode 100644 index 000000000000..6a823ab3490f --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/fake/fake_testtype.go @@ -0,0 +1,140 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package fake + +import ( + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + labels "k8s.io/apimachinery/pkg/labels" + schema "k8s.io/apimachinery/pkg/runtime/schema" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + testing "k8s.io/client-go/testing" + example3iov1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" +) + +// FakeTestTypes implements TestTypeInterface +type FakeTestTypes struct { + Fake *FakeThirdExampleV1 + ns string +} + +var testtypesResource = schema.GroupVersionResource{Group: "example.dots.apiserver.code-generator.k8s.io", Version: "v1", Resource: "testtypes"} + +var testtypesKind = schema.GroupVersionKind{Group: "example.dots.apiserver.code-generator.k8s.io", Version: "v1", Kind: "TestType"} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *FakeTestTypes) Get(name string, options v1.GetOptions) (result *example3iov1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewGetAction(testtypesResource, c.ns, name), &example3iov1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3iov1.TestType), err +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *FakeTestTypes) List(opts v1.ListOptions) (result *example3iov1.TestTypeList, err error) { + obj, err := c.Fake. + Invokes(testing.NewListAction(testtypesResource, testtypesKind, c.ns, opts), &example3iov1.TestTypeList{}) + + if obj == nil { + return nil, err + } + + label, _, _ := testing.ExtractFromListOptions(opts) + if label == nil { + label = labels.Everything() + } + list := &example3iov1.TestTypeList{ListMeta: obj.(*example3iov1.TestTypeList).ListMeta} + for _, item := range obj.(*example3iov1.TestTypeList).Items { + if label.Matches(labels.Set(item.Labels)) { + list.Items = append(list.Items, item) + } + } + return list, err +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *FakeTestTypes) Watch(opts v1.ListOptions) (watch.Interface, error) { + return c.Fake. + InvokesWatch(testing.NewWatchAction(testtypesResource, c.ns, opts)) + +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Create(testType *example3iov1.TestType) (result *example3iov1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewCreateAction(testtypesResource, c.ns, testType), &example3iov1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3iov1.TestType), err +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *FakeTestTypes) Update(testType *example3iov1.TestType) (result *example3iov1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateAction(testtypesResource, c.ns, testType), &example3iov1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3iov1.TestType), err +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). +func (c *FakeTestTypes) UpdateStatus(testType *example3iov1.TestType) (*example3iov1.TestType, error) { + obj, err := c.Fake. + Invokes(testing.NewUpdateSubresourceAction(testtypesResource, "status", c.ns, testType), &example3iov1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3iov1.TestType), err +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *FakeTestTypes) Delete(name string, options *v1.DeleteOptions) error { + _, err := c.Fake. + Invokes(testing.NewDeleteAction(testtypesResource, c.ns, name), &example3iov1.TestType{}) + + return err +} + +// DeleteCollection deletes a collection of objects. +func (c *FakeTestTypes) DeleteCollection(options *v1.DeleteOptions, listOptions v1.ListOptions) error { + action := testing.NewDeleteCollectionAction(testtypesResource, c.ns, listOptions) + + _, err := c.Fake.Invokes(action, &example3iov1.TestTypeList{}) + return err +} + +// Patch applies the patch and returns the patched testType. +func (c *FakeTestTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *example3iov1.TestType, err error) { + obj, err := c.Fake. + Invokes(testing.NewPatchSubresourceAction(testtypesResource, c.ns, name, pt, data, subresources...), &example3iov1.TestType{}) + + if obj == nil { + return nil, err + } + return obj.(*example3iov1.TestType), err +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/generated_expansion.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/generated_expansion.go new file mode 100644 index 000000000000..d513810d0d79 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/generated_expansion.go @@ -0,0 +1,21 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +type TestTypeExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go new file mode 100644 index 000000000000..84a6b9571b69 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/clientset/versioned/typed/example3.io/v1/testtype.go @@ -0,0 +1,191 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by client-gen. DO NOT EDIT. + +package v1 + +import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + types "k8s.io/apimachinery/pkg/types" + watch "k8s.io/apimachinery/pkg/watch" + rest "k8s.io/client-go/rest" + v1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" + scheme "k8s.io/code-generator/_examples/apiserver/clientset/versioned/scheme" +) + +// TestTypesGetter has a method to return a TestTypeInterface. +// A group's client should implement this interface. +type TestTypesGetter interface { + TestTypes(namespace string) TestTypeInterface +} + +// TestTypeInterface has methods to work with TestType resources. +type TestTypeInterface interface { + Create(*v1.TestType) (*v1.TestType, error) + Update(*v1.TestType) (*v1.TestType, error) + UpdateStatus(*v1.TestType) (*v1.TestType, error) + Delete(name string, options *metav1.DeleteOptions) error + DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error + Get(name string, options metav1.GetOptions) (*v1.TestType, error) + List(opts metav1.ListOptions) (*v1.TestTypeList, error) + Watch(opts metav1.ListOptions) (watch.Interface, error) + Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) + TestTypeExpansion +} + +// testTypes implements TestTypeInterface +type testTypes struct { + client rest.Interface + ns string +} + +// newTestTypes returns a TestTypes +func newTestTypes(c *ThirdExampleV1Client, namespace string) *testTypes { + return &testTypes{ + client: c.RESTClient(), + ns: namespace, + } +} + +// Get takes name of the testType, and returns the corresponding testType object, and an error if there is any. +func (c *testTypes) Get(name string, options metav1.GetOptions) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + VersionedParams(&options, scheme.ParameterCodec). + Do(). + Into(result) + return +} + +// List takes label and field selectors, and returns the list of TestTypes that match those selectors. +func (c *testTypes) List(opts metav1.ListOptions) (result *v1.TestTypeList, err error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + result = &v1.TestTypeList{} + err = c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Do(). + Into(result) + return +} + +// Watch returns a watch.Interface that watches the requested testTypes. +func (c *testTypes) Watch(opts metav1.ListOptions) (watch.Interface, error) { + var timeout time.Duration + if opts.TimeoutSeconds != nil { + timeout = time.Duration(*opts.TimeoutSeconds) * time.Second + } + opts.Watch = true + return c.client.Get(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&opts, scheme.ParameterCodec). + Timeout(timeout). + Watch() +} + +// Create takes the representation of a testType and creates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Create(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Post(). + Namespace(c.ns). + Resource("testtypes"). + Body(testType). + Do(). + Into(result) + return +} + +// Update takes the representation of a testType and updates it. Returns the server's representation of the testType, and an error, if there is any. +func (c *testTypes) Update(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + Body(testType). + Do(). + Into(result) + return +} + +// UpdateStatus was generated because the type contains a Status member. +// Add a +genclient:noStatus comment above the type to avoid generating UpdateStatus(). + +func (c *testTypes) UpdateStatus(testType *v1.TestType) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Put(). + Namespace(c.ns). + Resource("testtypes"). + Name(testType.Name). + SubResource("status"). + Body(testType). + Do(). + Into(result) + return +} + +// Delete takes name of the testType and deletes it. Returns an error if one occurs. +func (c *testTypes) Delete(name string, options *metav1.DeleteOptions) error { + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + Name(name). + Body(options). + Do(). + Error() +} + +// DeleteCollection deletes a collection of objects. +func (c *testTypes) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error { + var timeout time.Duration + if listOptions.TimeoutSeconds != nil { + timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second + } + return c.client.Delete(). + Namespace(c.ns). + Resource("testtypes"). + VersionedParams(&listOptions, scheme.ParameterCodec). + Timeout(timeout). + Body(options). + Do(). + Error() +} + +// Patch applies the patch and returns the patched testType. +func (c *testTypes) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *v1.TestType, err error) { + result = &v1.TestType{} + err = c.client.Patch(pt). + Namespace(c.ns). + Resource("testtypes"). + SubResource(subresources...). + Name(name). + Body(data). + Do(). + Into(result) + return +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/interface.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/interface.go new file mode 100644 index 000000000000..8d80f8f8d6d6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package example3 + +import ( + v1 "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1" + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // V1 provides access to shared informers for resources in V1. + V1() v1.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// V1 returns a new v1.Interface. +func (g *group) V1() v1.Interface { + return v1.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/interface.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/interface.go new file mode 100644 index 000000000000..1ccc8f3ca05d --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // TestTypes returns a TestTypeInformer. + TestTypes() TestTypeInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// TestTypes returns a TestTypeInformer. +func (v *version) TestTypes() TestTypeInformer { + return &testTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go new file mode 100644 index 000000000000..dddf77204cf6 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io/v1/testtype.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package v1 + +import ( + time "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + example3iov1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" + versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" + v1 "k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1" +) + +// TestTypeInformer provides access to a shared informer and lister for +// TestTypes. +type TestTypeInformer interface { + Informer() cache.SharedIndexInformer + Lister() v1.TestTypeLister +} + +type testTypeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTestTypeInformer(client versioned.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options metav1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ThirdExampleV1().TestTypes(namespace).List(options) + }, + WatchFunc: func(options metav1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ThirdExampleV1().TestTypes(namespace).Watch(options) + }, + }, + &example3iov1.TestType{}, + resyncPeriod, + indexers, + ) +} + +func (f *testTypeInformer) defaultInformer(client versioned.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *testTypeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&example3iov1.TestType{}, f.defaultInformer) +} + +func (f *testTypeInformer) Lister() v1.TestTypeLister { + return v1.NewTestTypeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go index fc4aa0d392ed..76348cb0ed58 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/factory.go @@ -30,6 +30,7 @@ import ( versioned "k8s.io/code-generator/_examples/apiserver/clientset/versioned" example "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example" example2 "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example2" + example3io "k8s.io/code-generator/_examples/apiserver/informers/externalversions/example3.io" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/externalversions/internalinterfaces" ) @@ -175,6 +176,7 @@ type SharedInformerFactory interface { Example() example.Interface SecondExample() example2.Interface + ThirdExample() example3io.Interface } func (f *sharedInformerFactory) Example() example.Interface { @@ -184,3 +186,7 @@ func (f *sharedInformerFactory) Example() example.Interface { func (f *sharedInformerFactory) SecondExample() example2.Interface { return example2.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) ThirdExample() example3io.Interface { + return example3io.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go index feb7b2028f8d..5745977ababb 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/externalversions/generic.go @@ -25,6 +25,7 @@ import ( cache "k8s.io/client-go/tools/cache" v1 "k8s.io/code-generator/_examples/apiserver/apis/example/v1" example2v1 "k8s.io/code-generator/_examples/apiserver/apis/example2/v1" + example3iov1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -57,6 +58,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case v1.SchemeGroupVersion.WithResource("testtypes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Example().V1().TestTypes().Informer()}, nil + // Group=example.dots.apiserver.code-generator.k8s.io, Version=v1 + case example3iov1.SchemeGroupVersion.WithResource("testtypes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.ThirdExample().V1().TestTypes().Informer()}, nil + // Group=example.test.apiserver.code-generator.k8s.io, Version=v1 case example2v1.SchemeGroupVersion.WithResource("testtypes"): return &genericInformer{resource: resource.GroupResource(), informer: f.SecondExample().V1().TestTypes().Informer()}, nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/interface.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/interface.go new file mode 100644 index 000000000000..3e469b23a5dc --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/interface.go @@ -0,0 +1,46 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package example3 + +import ( + internalversion "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion" + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" +) + +// Interface provides access to each of this group's versions. +type Interface interface { + // InternalVersion provides access to shared informers for resources in InternalVersion. + InternalVersion() internalversion.Interface +} + +type group struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &group{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// InternalVersion returns a new internalversion.Interface. +func (g *group) InternalVersion() internalversion.Interface { + return internalversion.New(g.factory, g.namespace, g.tweakListOptions) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/interface.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/interface.go new file mode 100644 index 000000000000..0fd5f8c3b96f --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/interface.go @@ -0,0 +1,45 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalversion + +import ( + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" +) + +// Interface provides access to all the informers in this group version. +type Interface interface { + // TestTypes returns a TestTypeInformer. + TestTypes() TestTypeInformer +} + +type version struct { + factory internalinterfaces.SharedInformerFactory + namespace string + tweakListOptions internalinterfaces.TweakListOptionsFunc +} + +// New returns a new Interface. +func New(f internalinterfaces.SharedInformerFactory, namespace string, tweakListOptions internalinterfaces.TweakListOptionsFunc) Interface { + return &version{factory: f, namespace: namespace, tweakListOptions: tweakListOptions} +} + +// TestTypes returns a TestTypeInformer. +func (v *version) TestTypes() TestTypeInformer { + return &testTypeInformer{factory: v.factory, namespace: v.namespace, tweakListOptions: v.tweakListOptions} +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go new file mode 100644 index 000000000000..b99a9be2e7f4 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io/internalversion/testtype.go @@ -0,0 +1,89 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by informer-gen. DO NOT EDIT. + +package internalversion + +import ( + time "time" + + v1 "k8s.io/apimachinery/pkg/apis/meta/v1" + runtime "k8s.io/apimachinery/pkg/runtime" + watch "k8s.io/apimachinery/pkg/watch" + cache "k8s.io/client-go/tools/cache" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" + clientsetinternalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" + internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" + internalversion "k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion" +) + +// TestTypeInformer provides access to a shared informer and lister for +// TestTypes. +type TestTypeInformer interface { + Informer() cache.SharedIndexInformer + Lister() internalversion.TestTypeLister +} + +type testTypeInformer struct { + factory internalinterfaces.SharedInformerFactory + tweakListOptions internalinterfaces.TweakListOptionsFunc + namespace string +} + +// NewTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewTestTypeInformer(client clientsetinternalversion.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, namespace, resyncPeriod, indexers, nil) +} + +// NewFilteredTestTypeInformer constructs a new informer for TestType type. +// Always prefer using an informer factory to get a shared informer instead of getting an independent +// one. This reduces memory footprint and number of connections to the server. +func NewFilteredTestTypeInformer(client clientsetinternalversion.Interface, namespace string, resyncPeriod time.Duration, indexers cache.Indexers, tweakListOptions internalinterfaces.TweakListOptionsFunc) cache.SharedIndexInformer { + return cache.NewSharedIndexInformer( + &cache.ListWatch{ + ListFunc: func(options v1.ListOptions) (runtime.Object, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ThirdExample().TestTypes(namespace).List(options) + }, + WatchFunc: func(options v1.ListOptions) (watch.Interface, error) { + if tweakListOptions != nil { + tweakListOptions(&options) + } + return client.ThirdExample().TestTypes(namespace).Watch(options) + }, + }, + &example3io.TestType{}, + resyncPeriod, + indexers, + ) +} + +func (f *testTypeInformer) defaultInformer(client clientsetinternalversion.Interface, resyncPeriod time.Duration) cache.SharedIndexInformer { + return NewFilteredTestTypeInformer(client, f.namespace, resyncPeriod, cache.Indexers{cache.NamespaceIndex: cache.MetaNamespaceIndexFunc}, f.tweakListOptions) +} + +func (f *testTypeInformer) Informer() cache.SharedIndexInformer { + return f.factory.InformerFor(&example3io.TestType{}, f.defaultInformer) +} + +func (f *testTypeInformer) Lister() internalversion.TestTypeLister { + return internalversion.NewTestTypeLister(f.Informer().GetIndexer()) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go index ca6d4defe94e..08fc96a5df72 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/factory.go @@ -30,6 +30,7 @@ import ( internalversion "k8s.io/code-generator/_examples/apiserver/clientset/internalversion" example "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example" example2 "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example2" + example3io "k8s.io/code-generator/_examples/apiserver/informers/internalversion/example3.io" internalinterfaces "k8s.io/code-generator/_examples/apiserver/informers/internalversion/internalinterfaces" ) @@ -175,6 +176,7 @@ type SharedInformerFactory interface { Example() example.Interface SecondExample() example2.Interface + ThirdExample() example3io.Interface } func (f *sharedInformerFactory) Example() example.Interface { @@ -184,3 +186,7 @@ func (f *sharedInformerFactory) Example() example.Interface { func (f *sharedInformerFactory) SecondExample() example2.Interface { return example2.New(f, f.namespace, f.tweakListOptions) } + +func (f *sharedInformerFactory) ThirdExample() example3io.Interface { + return example3io.New(f, f.namespace, f.tweakListOptions) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go index 046ccac959fc..9e9eddb94467 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/informers/internalversion/generic.go @@ -25,6 +25,7 @@ import ( cache "k8s.io/client-go/tools/cache" example "k8s.io/code-generator/_examples/apiserver/apis/example" example2 "k8s.io/code-generator/_examples/apiserver/apis/example2" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" ) // GenericInformer is type of SharedIndexInformer which will locate and delegate to other @@ -57,6 +58,10 @@ func (f *sharedInformerFactory) ForResource(resource schema.GroupVersionResource case example.SchemeGroupVersion.WithResource("testtypes"): return &genericInformer{resource: resource.GroupResource(), informer: f.Example().InternalVersion().TestTypes().Informer()}, nil + // Group=example.dots.apiserver.code-generator.k8s.io, Version=internalVersion + case example3io.SchemeGroupVersion.WithResource("testtypes"): + return &genericInformer{resource: resource.GroupResource(), informer: f.ThirdExample().InternalVersion().TestTypes().Informer()}, nil + // Group=example.test.apiserver.code-generator.k8s.io, Version=internalVersion case example2.SchemeGroupVersion.WithResource("testtypes"): return &genericInformer{resource: resource.GroupResource(), informer: f.SecondExample().InternalVersion().TestTypes().Informer()}, nil diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/expansion_generated.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/expansion_generated.go new file mode 100644 index 000000000000..4bb87150c082 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package internalversion + +// TestTypeListerExpansion allows custom methods to be added to +// TestTypeLister. +type TestTypeListerExpansion interface{} + +// TestTypeNamespaceListerExpansion allows custom methods to be added to +// TestTypeNamespaceLister. +type TestTypeNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/testtype.go new file mode 100644 index 000000000000..f9a452150e29 --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/internalversion/testtype.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package internalversion + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + example3io "k8s.io/code-generator/_examples/apiserver/apis/example3.io" +) + +// TestTypeLister helps list TestTypes. +type TestTypeLister interface { + // List lists all TestTypes in the indexer. + List(selector labels.Selector) (ret []*example3io.TestType, err error) + // TestTypes returns an object that can list and get TestTypes. + TestTypes(namespace string) TestTypeNamespaceLister + TestTypeListerExpansion +} + +// testTypeLister implements the TestTypeLister interface. +type testTypeLister struct { + indexer cache.Indexer +} + +// NewTestTypeLister returns a new TestTypeLister. +func NewTestTypeLister(indexer cache.Indexer) TestTypeLister { + return &testTypeLister{indexer: indexer} +} + +// List lists all TestTypes in the indexer. +func (s *testTypeLister) List(selector labels.Selector) (ret []*example3io.TestType, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*example3io.TestType)) + }) + return ret, err +} + +// TestTypes returns an object that can list and get TestTypes. +func (s *testTypeLister) TestTypes(namespace string) TestTypeNamespaceLister { + return testTypeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TestTypeNamespaceLister helps list and get TestTypes. +type TestTypeNamespaceLister interface { + // List lists all TestTypes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*example3io.TestType, err error) + // Get retrieves the TestType from the indexer for a given namespace and name. + Get(name string) (*example3io.TestType, error) + TestTypeNamespaceListerExpansion +} + +// testTypeNamespaceLister implements the TestTypeNamespaceLister +// interface. +type testTypeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TestTypes in the indexer for a given namespace. +func (s testTypeNamespaceLister) List(selector labels.Selector) (ret []*example3io.TestType, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*example3io.TestType)) + }) + return ret, err +} + +// Get retrieves the TestType from the indexer for a given namespace and name. +func (s testTypeNamespaceLister) Get(name string) (*example3io.TestType, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(example3io.Resource("testtype"), name) + } + return obj.(*example3io.TestType), nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/expansion_generated.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/expansion_generated.go new file mode 100644 index 000000000000..0192e05f0d1e --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/expansion_generated.go @@ -0,0 +1,27 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +// TestTypeListerExpansion allows custom methods to be added to +// TestTypeLister. +type TestTypeListerExpansion interface{} + +// TestTypeNamespaceListerExpansion allows custom methods to be added to +// TestTypeNamespaceLister. +type TestTypeNamespaceListerExpansion interface{} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/testtype.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/testtype.go new file mode 100644 index 000000000000..8dc5c857194b --- /dev/null +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/listers/example3.io/v1/testtype.go @@ -0,0 +1,94 @@ +/* +Copyright The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Code generated by lister-gen. DO NOT EDIT. + +package v1 + +import ( + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/labels" + "k8s.io/client-go/tools/cache" + v1 "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1" +) + +// TestTypeLister helps list TestTypes. +type TestTypeLister interface { + // List lists all TestTypes in the indexer. + List(selector labels.Selector) (ret []*v1.TestType, err error) + // TestTypes returns an object that can list and get TestTypes. + TestTypes(namespace string) TestTypeNamespaceLister + TestTypeListerExpansion +} + +// testTypeLister implements the TestTypeLister interface. +type testTypeLister struct { + indexer cache.Indexer +} + +// NewTestTypeLister returns a new TestTypeLister. +func NewTestTypeLister(indexer cache.Indexer) TestTypeLister { + return &testTypeLister{indexer: indexer} +} + +// List lists all TestTypes in the indexer. +func (s *testTypeLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { + err = cache.ListAll(s.indexer, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TestType)) + }) + return ret, err +} + +// TestTypes returns an object that can list and get TestTypes. +func (s *testTypeLister) TestTypes(namespace string) TestTypeNamespaceLister { + return testTypeNamespaceLister{indexer: s.indexer, namespace: namespace} +} + +// TestTypeNamespaceLister helps list and get TestTypes. +type TestTypeNamespaceLister interface { + // List lists all TestTypes in the indexer for a given namespace. + List(selector labels.Selector) (ret []*v1.TestType, err error) + // Get retrieves the TestType from the indexer for a given namespace and name. + Get(name string) (*v1.TestType, error) + TestTypeNamespaceListerExpansion +} + +// testTypeNamespaceLister implements the TestTypeNamespaceLister +// interface. +type testTypeNamespaceLister struct { + indexer cache.Indexer + namespace string +} + +// List lists all TestTypes in the indexer for a given namespace. +func (s testTypeNamespaceLister) List(selector labels.Selector) (ret []*v1.TestType, err error) { + err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { + ret = append(ret, m.(*v1.TestType)) + }) + return ret, err +} + +// Get retrieves the TestType from the indexer for a given namespace and name. +func (s testTypeNamespaceLister) Get(name string) (*v1.TestType, error) { + obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) + if err != nil { + return nil, err + } + if !exists { + return nil, errors.NewNotFound(v1.Resource("testtype"), name) + } + return obj.(*v1.TestType), nil +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/openapi/zz_generated.openapi.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/openapi/zz_generated.openapi.go index 04de3f916375..ffcee0b073e5 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/openapi/zz_generated.openapi.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/_examples/apiserver/openapi/zz_generated.openapi.go @@ -30,63 +30,66 @@ import ( func GetOpenAPIDefinitions(ref common.ReferenceCallback) map[string]common.OpenAPIDefinition { return map[string]common.OpenAPIDefinition{ - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ExportOptions": schema_pkg_apis_meta_v1_ExportOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), - "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), - "k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref), - "k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref), - "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), - "k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestType": schema_apiserver_apis_example_v1_TestType(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestTypeList": schema_apiserver_apis_example_v1_TestTypeList(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestTypeStatus": schema_apiserver_apis_example_v1_TestTypeStatus(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestType": schema_apiserver_apis_example2_v1_TestType(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestTypeList": schema_apiserver_apis_example2_v1_TestTypeList(ref), - "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestTypeStatus": schema_apiserver_apis_example2_v1_TestTypeStatus(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroup": schema_pkg_apis_meta_v1_APIGroup(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIGroupList": schema_pkg_apis_meta_v1_APIGroupList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResource": schema_pkg_apis_meta_v1_APIResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIResourceList": schema_pkg_apis_meta_v1_APIResourceList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.APIVersions": schema_pkg_apis_meta_v1_APIVersions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.CreateOptions": schema_pkg_apis_meta_v1_CreateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.DeleteOptions": schema_pkg_apis_meta_v1_DeleteOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Duration": schema_pkg_apis_meta_v1_Duration(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ExportOptions": schema_pkg_apis_meta_v1_ExportOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.FieldsV1": schema_pkg_apis_meta_v1_FieldsV1(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GetOptions": schema_pkg_apis_meta_v1_GetOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupKind": schema_pkg_apis_meta_v1_GroupKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupResource": schema_pkg_apis_meta_v1_GroupResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersion": schema_pkg_apis_meta_v1_GroupVersion(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionForDiscovery": schema_pkg_apis_meta_v1_GroupVersionForDiscovery(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionKind": schema_pkg_apis_meta_v1_GroupVersionKind(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.GroupVersionResource": schema_pkg_apis_meta_v1_GroupVersionResource(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.InternalEvent": schema_pkg_apis_meta_v1_InternalEvent(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelector": schema_pkg_apis_meta_v1_LabelSelector(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.LabelSelectorRequirement": schema_pkg_apis_meta_v1_LabelSelectorRequirement(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.List": schema_pkg_apis_meta_v1_List(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta": schema_pkg_apis_meta_v1_ListMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ListOptions": schema_pkg_apis_meta_v1_ListOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ManagedFieldsEntry": schema_pkg_apis_meta_v1_ManagedFieldsEntry(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.MicroTime": schema_pkg_apis_meta_v1_MicroTime(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta": schema_pkg_apis_meta_v1_ObjectMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.OwnerReference": schema_pkg_apis_meta_v1_OwnerReference(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadata": schema_pkg_apis_meta_v1_PartialObjectMetadata(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PartialObjectMetadataList": schema_pkg_apis_meta_v1_PartialObjectMetadataList(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Patch": schema_pkg_apis_meta_v1_Patch(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.PatchOptions": schema_pkg_apis_meta_v1_PatchOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Preconditions": schema_pkg_apis_meta_v1_Preconditions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.RootPaths": schema_pkg_apis_meta_v1_RootPaths(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.ServerAddressByClientCIDR": schema_pkg_apis_meta_v1_ServerAddressByClientCIDR(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Status": schema_pkg_apis_meta_v1_Status(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusCause": schema_pkg_apis_meta_v1_StatusCause(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.StatusDetails": schema_pkg_apis_meta_v1_StatusDetails(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Table": schema_pkg_apis_meta_v1_Table(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableColumnDefinition": schema_pkg_apis_meta_v1_TableColumnDefinition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableOptions": schema_pkg_apis_meta_v1_TableOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRow": schema_pkg_apis_meta_v1_TableRow(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TableRowCondition": schema_pkg_apis_meta_v1_TableRowCondition(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Time": schema_pkg_apis_meta_v1_Time(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.Timestamp": schema_pkg_apis_meta_v1_Timestamp(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.TypeMeta": schema_pkg_apis_meta_v1_TypeMeta(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.UpdateOptions": schema_pkg_apis_meta_v1_UpdateOptions(ref), + "k8s.io/apimachinery/pkg/apis/meta/v1.WatchEvent": schema_pkg_apis_meta_v1_WatchEvent(ref), + "k8s.io/apimachinery/pkg/runtime.RawExtension": schema_k8sio_apimachinery_pkg_runtime_RawExtension(ref), + "k8s.io/apimachinery/pkg/runtime.TypeMeta": schema_k8sio_apimachinery_pkg_runtime_TypeMeta(ref), + "k8s.io/apimachinery/pkg/runtime.Unknown": schema_k8sio_apimachinery_pkg_runtime_Unknown(ref), + "k8s.io/apimachinery/pkg/version.Info": schema_k8sio_apimachinery_pkg_version_Info(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestType": schema_apiserver_apis_example_v1_TestType(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestTypeList": schema_apiserver_apis_example_v1_TestTypeList(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example/v1.TestTypeStatus": schema_apiserver_apis_example_v1_TestTypeStatus(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestType": schema_apiserver_apis_example2_v1_TestType(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestTypeList": schema_apiserver_apis_example2_v1_TestTypeList(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example2/v1.TestTypeStatus": schema_apiserver_apis_example2_v1_TestTypeStatus(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestType": schema_apiserver_apis_example3io_v1_TestType(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestTypeList": schema_apiserver_apis_example3io_v1_TestTypeList(ref), + "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestTypeStatus": schema_apiserver_apis_example3io_v1_TestTypeStatus(ref), } } @@ -2525,3 +2528,108 @@ func schema_apiserver_apis_example2_v1_TestTypeStatus(ref common.ReferenceCallba }, } } + +func schema_apiserver_apis_example3io_v1_TestType(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TestType is a top-level type. A client is created for it.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta"), + }, + }, + "status": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestTypeStatus"), + }, + }, + }, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ObjectMeta", "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestTypeStatus"}, + } +} + +func schema_apiserver_apis_example3io_v1_TestTypeList(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Description: "TestTypeList is a top-level list type. The client methods for lists are automatically created. You are not supposed to create a separated client for this one.", + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "kind": { + SchemaProps: spec.SchemaProps{ + Description: "Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds", + Type: []string{"string"}, + Format: "", + }, + }, + "apiVersion": { + SchemaProps: spec.SchemaProps{ + Description: "APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources", + Type: []string{"string"}, + Format: "", + }, + }, + "metadata": { + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta"), + }, + }, + "items": { + SchemaProps: spec.SchemaProps{ + Type: []string{"array"}, + Items: &spec.SchemaOrArray{ + Schema: &spec.Schema{ + SchemaProps: spec.SchemaProps{ + Ref: ref("k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestType"), + }, + }, + }, + }, + }, + }, + Required: []string{"items"}, + }, + }, + Dependencies: []string{ + "k8s.io/apimachinery/pkg/apis/meta/v1.ListMeta", "k8s.io/code-generator/_examples/apiserver/apis/example3.io/v1.TestType"}, + } +} + +func schema_apiserver_apis_example3io_v1_TestTypeStatus(ref common.ReferenceCallback) common.OpenAPIDefinition { + return common.OpenAPIDefinition{ + Schema: spec.Schema{ + SchemaProps: spec.SchemaProps{ + Type: []string{"object"}, + Properties: map[string]spec.Schema{ + "Blah": { + SchemaProps: spec.SchemaProps{ + Type: []string{"string"}, + Format: "", + }, + }, + }, + Required: []string{"Blah"}, + }, + }, + } +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go index 534a46424343..3966d5387d24 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/cmd/informer-gen/generators/packages.go @@ -288,9 +288,10 @@ func factoryInterfacePackage(basePackage string, boilerplate []byte, clientSetPa func groupPackage(basePackage string, groupVersions clientgentypes.GroupVersions, boilerplate []byte) generator.Package { packagePath := filepath.Join(basePackage, groupVersions.PackageName) + groupPkgName := strings.Split(string(groupVersions.PackageName), ".")[0] return &generator.DefaultPackage{ - PackageName: groupVersions.PackageName, + PackageName: groupPkgName, PackagePath: packagePath, HeaderText: boilerplate, GeneratorFunc: func(c *generator.Context) (generators []generator.Generator) { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.mod b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.mod index 1491331d1035..9c7d28dc69df 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.mod +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.mod @@ -15,6 +15,7 @@ require ( golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac // indirect gonum.org/v1/gonum v0.0.0-20190331200053-3d26580ed485 gonum.org/v1/netlib v0.0.0-20190331212654-76723241ea4e // indirect + gopkg.in/yaml.v2 v2.2.4 // indirect k8s.io/gengo v0.0.0-20190822140433-26a664648505 k8s.io/klog v0.4.0 k8s.io/kube-openapi v0.0.0-20190816220812-743ec37842bf diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.sum index 3a65cee6cf83..c12e1ff03458 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/go.sum @@ -103,6 +103,8 @@ gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8 gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/gengo v0.0.0-20190822140433-26a664648505 h1:ZY6yclUKVbZ+SdWnkfY+Je5vrMpKOxmGeKRbsXVmqYM= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/hack/update-codegen.sh b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/hack/update-codegen.sh index 11ed17ad6da5..baac3d90c6a4 100755 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/hack/update-codegen.sh +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/code-generator/hack/update-codegen.sh @@ -26,7 +26,7 @@ SCRIPT_ROOT=$(dirname "${BASH_SOURCE[0]}")/.. # instead of the $GOPATH directly. For normal projects this can be dropped. "$(dirname "${BASH_SOURCE[0]}")"/../generate-internal-groups.sh all \ k8s.io/code-generator/_examples/apiserver k8s.io/code-generator/_examples/apiserver/apis k8s.io/code-generator/_examples/apiserver/apis \ - "example:v1 example2:v1" \ + "example:v1 example2:v1 example3.io:v1" \ --output-base "$(dirname "${BASH_SOURCE[0]}")/../../.." \ --go-header-file "${SCRIPT_ROOT}/hack/boilerplate.go.txt" "$(dirname "${BASH_SOURCE[0]}")"/../generate-groups.sh all \ diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/component-base/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/component-base/go.sum index a73abab7691b..30cae39f1c18 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/component-base/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/component-base/go.sum @@ -171,6 +171,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib/go.sum index 6bb3ab974af9..d092f95f6d2f 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/csi-translation-lib/go.sum @@ -139,6 +139,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum index 362e2237c2cf..20835ef12bd9 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-aggregator/go.sum @@ -303,6 +303,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager/go.sum index 205ee54a225a..8db464750ebc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-controller-manager/go.sum @@ -147,6 +147,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-proxy/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-proxy/go.sum index 205ee54a225a..8db464750ebc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-proxy/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-proxy/go.sum @@ -147,6 +147,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler/go.sum index 205ee54a225a..8db464750ebc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kube-scheduler/go.sum @@ -147,6 +147,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.mod b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.mod index bee8e06bb9bf..5dc8f682b57c 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.mod +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.mod @@ -33,7 +33,7 @@ require ( github.com/spf13/pflag v1.0.3 github.com/stretchr/testify v1.3.0 golang.org/x/sys v0.0.0-20190616124812-15dcb6c0061f - gopkg.in/yaml.v2 v2.2.2 + gopkg.in/yaml.v2 v2.2.4 gotest.tools v2.2.0+incompatible // indirect k8s.io/api v0.0.0 k8s.io/apimachinery v0.0.0 diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.sum index d4d8e419cf0c..2574f7687acc 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubectl/go.sum @@ -268,6 +268,8 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWD gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubelet/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubelet/go.sum index 2b8d5c31c99a..bbd79922d4b4 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubelet/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/kubelet/go.sum @@ -87,8 +87,8 @@ gopkg.in/inf.v0 v0.9.0/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= -gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= k8s.io/klog v0.0.0-20181102134211-b9b56d5dfc92/go.mod h1:Gq+BEi5rUBO/HRz0bTSXDUcqjScdoY3a9IHpCEIOOfk= k8s.io/klog v0.4.0 h1:lCJCxf/LIowc2IGS9TPjWDyXY4nOmdGdfcwwDQCOURQ= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go index 10518b05e2a4..39e7073fffa4 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common.go @@ -150,17 +150,23 @@ func (c *controllerCommon) AttachDisk(isManagedDisk bool, diskName, diskURI stri // DetachDisk detaches a disk from host. The vhd can be identified by diskName or diskURI. func (c *controllerCommon) DetachDisk(diskName, diskURI string, nodeName types.NodeName) error { - vmset, err := c.getNodeVMSet(nodeName) - if err != nil { - return err - } - instanceid, err := c.cloud.InstanceID(context.TODO(), nodeName) if err != nil { + if err == cloudprovider.InstanceNotFound { + // if host doesn't exist, no need to detach + klog.Warningf("azureDisk - failed to get azure instance id(%q), DetachDisk(%s) will assume disk is already detached", + nodeName, diskURI) + return nil + } klog.Warningf("failed to get azure instance id (%v)", err) return fmt.Errorf("failed to get azure instance id for node %q (%v)", nodeName, err) } + vmset, err := c.getNodeVMSet(nodeName) + if err != nil { + return err + } + klog.V(2).Infof("detach %v from node %q", diskURI, nodeName) // make the lock here as small as possible diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go index 32e6da6bd2ca..477c35b1cf5b 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_common_test.go @@ -88,9 +88,9 @@ func TestCommonDetachDisk(t *testing.T) { expectedErr bool }{ { - desc: "an error shall be returned if there's no such instance corresponding to given nodeName", + desc: "error should not be returned if there's no such instance corresponding to given nodeName", nodeName: "vm1", - expectedErr: true, + expectedErr: false, }, { desc: "no error shall be returned if there's no matching disk according to given diskName", diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go index 5115d8043ec3..0d8468dfd7d5 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_controller_vmss.go @@ -86,8 +86,9 @@ func (ss *scaleSet) AttachDisk(isManagedDisk bool, diskName, diskURI string, nod defer cancel() // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + if err = ss.deleteCacheForNode(vmName); err != nil { + return err + } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - attach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) _, err = ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "attach_disk") @@ -157,8 +158,9 @@ func (ss *scaleSet) DetachDisk(diskName, diskURI string, nodeName types.NodeName defer cancel() // Invalidate the cache right after updating - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + if err = ss.deleteCacheForNode(vmName); err != nil { + return nil, err + } klog.V(2).Infof("azureDisk - update(%s): vm(%s) - detach disk(%s, %s)", nodeResourceGroup, nodeName, diskName, diskURI) return ss.VirtualMachineScaleSetVMsClient.Update(ctx, nodeResourceGroup, ssName, instanceID, newVM, "detach_disk") diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go index 8dfeb4f3a80d..05aa5adfa4d2 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer.go @@ -104,18 +104,38 @@ const ( clusterNameKey = "kubernetes-cluster-name" ) -// GetLoadBalancer returns whether the specified load balancer exists, and +// GetLoadBalancer returns whether the specified load balancer and its components exist, and // if so, what its status is. func (az *Cloud) GetLoadBalancer(ctx context.Context, clusterName string, service *v1.Service) (status *v1.LoadBalancerStatus, exists bool, err error) { - _, status, exists, err = az.getServiceLoadBalancer(service, clusterName, nil, false) + // Since public IP is not a part of the load balancer on Azure, + // there is a chance that we could orphan public IP resources while we delete the load blanacer (kubernetes/kubernetes#80571). + // We need to make sure the existence of the load balancer depends on the load balancer resource and public IP resource on Azure. + existsPip := func() bool { + pipName, _, err := az.determinePublicIPName(clusterName, service) + if err != nil { + return false + } + pipResourceGroup := az.getPublicIPAddressResourceGroup(service) + _, existsPip, err := az.getPublicIPAddress(pipResourceGroup, pipName) + if err != nil { + return false + } + return existsPip + }() + + _, status, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { - return nil, false, err + return nil, existsPip, err } - if !exists { + + // Return exists = false only if the load balancer and the public IP are not found on Azure + if !existsLb && !existsPip { serviceName := getServiceName(service) klog.V(5).Infof("getloadbalancer (cluster:%s) (service:%s) - doesn't exist", clusterName, serviceName) return nil, false, nil } + + // Return exists = true if either the load balancer or the public IP (or both) exists return status, true, nil } @@ -169,6 +189,10 @@ func (az *Cloud) EnsureLoadBalancer(ctx context.Context, clusterName string, ser // UpdateLoadBalancer updates hosts under the specified load balancer. func (az *Cloud) UpdateLoadBalancer(ctx context.Context, clusterName string, service *v1.Service, nodes []*v1.Node) error { + if !az.shouldUpdateLoadBalancer(clusterName, service) { + klog.V(2).Infof("UpdateLoadBalancer: skipping service %s because it is either being deleted or does not exist anymore", service.Name) + return nil + } _, err := az.EnsureLoadBalancer(ctx, clusterName, service, nodes) return err } @@ -475,7 +499,7 @@ func (az *Cloud) findServiceIPAddress(ctx context.Context, clusterName string, s return service.Spec.LoadBalancerIP, nil } - lbStatus, existsLb, err := az.GetLoadBalancer(ctx, clusterName, service) + _, lbStatus, existsLb, err := az.getServiceLoadBalancer(service, clusterName, nil, false) if err != nil { return "", err } @@ -546,8 +570,12 @@ func (az *Cloud) ensurePublicIPExists(service *v1.Service, pipName string, domai if ipv6 { pip.PublicIPAddressVersion = network.IPv6 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv6 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) - // static allocation on IPv6 on Azure is not allowed + pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Dynamic + if az.useStandardLoadBalancer() { + // standard sku must have static allocation method for ipv6 + pip.PublicIPAddressPropertiesFormat.PublicIPAllocationMethod = network.Static + } } else { pip.PublicIPAddressVersion = network.IPv4 klog.V(2).Infof("service(%s): pip(%s) - creating as ipv4 for clusterIP:%v", serviceName, *pip.Name, service.Spec.ClusterIP) @@ -1279,6 +1307,11 @@ func (az *Cloud) reconcileSecurityGroup(clusterName string, service *v1.Service, return &sg, nil } +func (az *Cloud) shouldUpdateLoadBalancer(clusterName string, service *v1.Service) bool { + _, _, existsLb, _ := az.getServiceLoadBalancer(service, clusterName, nil, false) + return existsLb && service.ObjectMeta.DeletionTimestamp == nil +} + func logSafe(s *string) string { if s == nil { return "(nil)" diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go index 22284569ba0d..808b35790108 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_loadbalancer_test.go @@ -25,6 +25,7 @@ import ( "strconv" "strings" "testing" + "time" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" "github.com/Azure/go-autorest/autorest/to" @@ -1895,3 +1896,66 @@ func TestEnsurePublicIPExists(t *testing.T) { assert.Equal(t, test.expectedError, err != nil, "TestCase[%d]: %s", i, test.desc) } } + +func TestShouldUpdateLoadBalancer(t *testing.T) { + testCases := []struct { + desc string + lbHasDeletionTimestamp bool + existsLb bool + expectedOutput bool + }{ + { + desc: "should update a load balancer that does not have a deletion timestamp and exists in Azure", + lbHasDeletionTimestamp: false, + existsLb: true, + expectedOutput: true, + }, + { + desc: "should not update a load balancer that is being deleted / already deleted in K8s", + lbHasDeletionTimestamp: true, + existsLb: true, + expectedOutput: false, + }, + { + desc: "should not update a load balancer that does not exist in Azure", + lbHasDeletionTimestamp: false, + existsLb: false, + expectedOutput: false, + }, + { + desc: "should not update a load balancer that has a deletion timestamp and does not exist in Azure", + lbHasDeletionTimestamp: true, + existsLb: false, + expectedOutput: false, + }, + } + + for i, test := range testCases { + az := getTestCloud() + service := getTestService("test1", v1.ProtocolTCP, nil, 80) + if test.lbHasDeletionTimestamp { + service.ObjectMeta.DeletionTimestamp = &metav1.Time{time.Now()} + } + if test.existsLb { + lb := network.LoadBalancer{ + Name: to.StringPtr("lb1"), + LoadBalancerPropertiesFormat: &network.LoadBalancerPropertiesFormat{ + FrontendIPConfigurations: &[]network.FrontendIPConfiguration{ + { + Name: to.StringPtr("atest1"), + FrontendIPConfigurationPropertiesFormat: &network.FrontendIPConfigurationPropertiesFormat{ + PublicIPAddress: &network.PublicIPAddress{ID: to.StringPtr("id1")}, + }, + }, + }, + }, + } + _, err := az.LoadBalancerClient.CreateOrUpdate(context.TODO(), "rg", *lb.Name, lb, "") + if err != nil { + t.Fatalf("TestCase[%d] meets unexpected error: %v", i, err) + } + } + shouldUpdateLoadBalancer := az.shouldUpdateLoadBalancer(testClusterName, &service) + assert.Equal(t, test.expectedOutput, shouldUpdateLoadBalancer, "TestCase[%d]: %s", i, test.desc) + } +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go index 040de1d88b08..8feac08103be 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss.go @@ -25,6 +25,7 @@ import ( "sort" "strconv" "strings" + "sync" "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" "github.com/Azure/azure-sdk-for-go/services/network/mgmt/2019-06-01/network" @@ -60,10 +61,8 @@ type scaleSet struct { // (e.g. master nodes) may not belong to any scale sets. availabilitySet VMSet - vmssCache *timedCache - vmssVMCache *timedCache - nodeNameToScaleSetMappingCache *timedCache - availabilitySetNodesCache *timedCache + vmssVMCache *timedCache + availabilitySetNodesCache *timedCache } // newScaleSet creates a new scaleSet. @@ -74,22 +73,12 @@ func newScaleSet(az *Cloud) (VMSet, error) { availabilitySet: newAvailabilitySet(az), } - ss.nodeNameToScaleSetMappingCache, err = ss.newNodeNameToScaleSetMappingCache() - if err != nil { - return nil, err - } - ss.availabilitySetNodesCache, err = ss.newAvailabilitySetNodesCache() if err != nil { return nil, err } - ss.vmssCache, err = ss.newVmssCache() - if err != nil { - return nil, err - } - - ss.vmssVMCache, err = ss.newVmssVMCache() + ss.vmssVMCache, err = ss.newVMSSVirtualMachinesCache() if err != nil { return nil, err } @@ -99,39 +88,46 @@ func newScaleSet(az *Cloud) (VMSet, error) { // getVmssVM gets virtualMachineScaleSetVM by nodeName from cache. // It returns cloudprovider.InstanceNotFound if node does not belong to any scale sets. -func (ss *scaleSet) getVmssVM(nodeName string) (ssName, instanceID string, vm compute.VirtualMachineScaleSetVM, err error) { - instanceID, err = getScaleSetVMInstanceID(nodeName) - if err != nil { - return ssName, instanceID, vm, err - } +func (ss *scaleSet) getVmssVM(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { + getter := func(nodeName string) (string, string, *compute.VirtualMachineScaleSetVM, error) { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) + if err != nil { + return "", "", nil, err + } - ssName, err = ss.getScaleSetNameByNodeName(nodeName) - if err != nil { - return ssName, instanceID, vm, err - } + virtualMachines := cached.(*sync.Map) + if vm, ok := virtualMachines.Load(nodeName); ok { + result := vm.(*vmssVirtualMachinesEntry) + return result.vmssName, result.instanceID, result.virtualMachine, nil + } - if ssName == "" { - return "", "", vm, cloudprovider.InstanceNotFound + return "", "", nil, nil } - resourceGroup, err := ss.GetNodeResourceGroup(nodeName) + _, err := getScaleSetVMInstanceID(nodeName) if err != nil { - return "", "", vm, err + return "", "", nil, err } - klog.V(4).Infof("getVmssVM gets scaleSetName (%q) and instanceID (%q) for node %q", ssName, instanceID, nodeName) - key := buildVmssCacheKey(resourceGroup, ss.makeVmssVMName(ssName, instanceID)) - cachedVM, err := ss.vmssVMCache.Get(key) + vmssName, instanceID, vm, err := getter(nodeName) if err != nil { - return ssName, instanceID, vm, err + return "", "", nil, err + } + if vm != nil { + return vmssName, instanceID, vm, nil } - if cachedVM == nil { - klog.Errorf("Can't find node (%q) in any scale sets", nodeName) - return ssName, instanceID, vm, cloudprovider.InstanceNotFound + klog.V(3).Infof("Couldn't find VMSS VM with nodeName %s, refreshing the cache", nodeName) + ss.vmssVMCache.Delete(vmssVirtualMachinesKey) + vmssName, instanceID, vm, err = getter(nodeName) + if err != nil { + return "", "", nil, err } - return ssName, instanceID, *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil + if vm == nil { + return "", "", nil, cloudprovider.InstanceNotFound + } + return vmssName, instanceID, vm, nil } // GetPowerStatusByNodeName returns the power state of the specified node. @@ -158,20 +154,49 @@ func (ss *scaleSet) GetPowerStatusByNodeName(name string) (powerState string, er // getCachedVirtualMachineByInstanceID gets scaleSetVMInfo from cache. // The node must belong to one of scale sets. -func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (vm compute.VirtualMachineScaleSetVM, err error) { - vmName := ss.makeVmssVMName(scaleSetName, instanceID) - key := buildVmssCacheKey(resourceGroup, vmName) - cachedVM, err := ss.vmssVMCache.Get(key) +func (ss *scaleSet) getVmssVMByInstanceID(resourceGroup, scaleSetName, instanceID string) (*compute.VirtualMachineScaleSetVM, error) { + getter := func() (vm *compute.VirtualMachineScaleSetVM, found bool, err error) { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) + if err != nil { + return nil, false, err + } + + virtualMachines := cached.(*sync.Map) + virtualMachines.Range(func(key, value interface{}) bool { + vmEntry := value.(*vmssVirtualMachinesEntry) + if strings.EqualFold(vmEntry.resourceGroup, resourceGroup) && + strings.EqualFold(vmEntry.vmssName, scaleSetName) && + strings.EqualFold(vmEntry.instanceID, instanceID) { + vm = vmEntry.virtualMachine + found = true + return false + } + + return true + }) + + return vm, found, nil + } + + vm, found, err := getter() if err != nil { - return vm, err + return nil, err + } + if found { + return vm, nil } - if cachedVM == nil { - klog.Errorf("couldn't find vmss virtual machine by scaleSetName (%s) and instanceID (%s)", scaleSetName, instanceID) - return vm, cloudprovider.InstanceNotFound + klog.V(3).Infof("Couldn't find VMSS VM with scaleSetName %q and instanceID %q, refreshing the cache", scaleSetName, instanceID) + ss.vmssVMCache.Delete(vmssVirtualMachinesKey) + vm, found, err = getter() + if err != nil { + return nil, err + } + if !found { + return nil, cloudprovider.InstanceNotFound } - return *(cachedVM.(*compute.VirtualMachineScaleSetVM)), nil + return vm, nil } // GetInstanceIDByNodeName gets the cloud provider ID by node name. @@ -463,9 +488,15 @@ func (ss *scaleSet) listScaleSets(resourceGroup string) ([]string, error) { return nil, err } - ssNames := make([]string, len(allScaleSets)) - for i := range allScaleSets { - ssNames[i] = *(allScaleSets[i].Name) + ssNames := make([]string, 0) + for _, vmss := range allScaleSets { + name := *vmss.Name + if vmss.Sku != nil && to.Int64(vmss.Sku.Capacity) == 0 { + klog.V(3).Infof("Capacity of VMSS %q is 0, skipping", name) + continue + } + + ssNames = append(ssNames, name) } return ssNames, nil @@ -500,7 +531,7 @@ func (ss *scaleSet) getAgentPoolScaleSets(nodes []*v1.Node) (*[]string, error) { } nodeName := nodes[nx].Name - ssName, err := ss.getScaleSetNameByNodeName(nodeName) + ssName, _, _, err := ss.getVmssVM(nodeName) if err != nil { return nil, err } @@ -599,7 +630,7 @@ func (ss *scaleSet) GetPrimaryInterface(nodeName string) (network.Interface, err return network.Interface{}, err } - primaryInterfaceID, err := ss.getPrimaryInterfaceID(vm) + primaryInterfaceID, err := ss.getPrimaryInterfaceID(*vm) if err != nil { klog.Errorf("error: ss.GetPrimaryInterface(%s), ss.getPrimaryInterfaceID(), err=%v", nodeName, err) return network.Interface{}, err @@ -816,8 +847,9 @@ func (ss *scaleSet) EnsureHostInPool(service *v1.Service, nodeName types.NodeNam } // Invalidate the cache since we would update it. - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + if err = ss.deleteCacheForNode(vmName); err != nil { + return err + } // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() @@ -1094,8 +1126,9 @@ func (ss *scaleSet) ensureBackendPoolDeletedFromNode(service *v1.Service, nodeNa } // Invalidate the cache since we would update it. - key := buildVmssCacheKey(nodeResourceGroup, ss.makeVmssVMName(ssName, instanceID)) - defer ss.vmssVMCache.Delete(key) + if err = ss.deleteCacheForNode(nodeName); err != nil { + return err + } // Update vmssVM with backoff. ctx, cancel := getContextWithCancel() diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go index a7ef5a5ff193..398dfed0ec5b 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache.go @@ -21,8 +21,12 @@ package azure import ( "fmt" "strings" + "sync" "time" + "github.com/Azure/azure-sdk-for-go/services/compute/mgmt/2019-07-01/compute" + "github.com/Azure/go-autorest/autorest/to" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/klog" ) @@ -31,18 +35,19 @@ var ( vmssNameSeparator = "_" vmssCacheSeparator = "#" - nodeNameToScaleSetMappingKey = "k8sNodeNameToScaleSetMappingKey" - availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" + vmssVirtualMachinesKey = "k8svmssVirtualMachinesKey" + availabilitySetNodesKey = "k8sAvailabilitySetNodesKey" - vmssCacheTTL = time.Minute - vmssVMCacheTTL = time.Minute - availabilitySetNodesCacheTTL = 5 * time.Minute - nodeNameToScaleSetMappingCacheTTL = 5 * time.Minute + availabilitySetNodesCacheTTL = 15 * time.Minute + vmssVirtualMachinesTTL = 10 * time.Minute ) -// nodeNameToScaleSetMapping maps nodeName to scaleSet name. -// The map is required because vmss nodeName is not equal to its vmName. -type nodeNameToScaleSetMapping map[string]string +type vmssVirtualMachinesEntry struct { + resourceGroup string + vmssName string + instanceID string + virtualMachine *compute.VirtualMachineScaleSetVM +} func (ss *scaleSet) makeVmssVMName(scaleSetName, instanceID string) string { return fmt.Sprintf("%s%s%s", scaleSetName, vmssNameSeparator, instanceID) @@ -62,32 +67,9 @@ func extractVmssVMName(name string) (string, string, error) { return ssName, instanceID, nil } -// vmssCache only holds vmss from ss.ResourceGroup because nodes from other resourceGroups -// will be excluded from LB backends. -func (ss *scaleSet) newVmssCache() (*timedCache, error) { +func (ss *scaleSet) newVMSSVirtualMachinesCache() (*timedCache, error) { getter := func(key string) (interface{}, error) { - ctx, cancel := getContextWithCancel() - defer cancel() - result, err := ss.VirtualMachineScaleSetsClient.Get(ctx, ss.ResourceGroup, key) - exists, message, realErr := checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - - if !exists { - klog.V(2).Infof("Virtual machine scale set %q not found with message: %q", key, message) - return nil, nil - } - - return &result, nil - } - - return newTimedcache(vmssCacheTTL, getter) -} - -func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { - getter := func(key string) (interface{}, error) { - localCache := make(nodeNameToScaleSetMapping) + localCache := &sync.Map{} // [nodeName]*vmssVirtualMachinesEntry allResourceGroups, err := ss.GetResourceGroups() if err != nil { @@ -106,14 +88,20 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { return nil, err } - for _, vm := range vms { + for i := range vms { + vm := vms[i] if vm.OsProfile == nil || vm.OsProfile.ComputerName == nil { klog.Warningf("failed to get computerName for vmssVM (%q)", ssName) continue } computerName := strings.ToLower(*vm.OsProfile.ComputerName) - localCache[computerName] = ssName + localCache.Store(computerName, &vmssVirtualMachinesEntry{ + resourceGroup: resourceGroup, + vmssName: ssName, + instanceID: to.String(vm.InstanceID), + virtualMachine: &vm, + }) } } } @@ -121,7 +109,18 @@ func (ss *scaleSet) newNodeNameToScaleSetMappingCache() (*timedCache, error) { return localCache, nil } - return newTimedcache(nodeNameToScaleSetMappingCacheTTL, getter) + return newTimedcache(vmssVirtualMachinesTTL, getter) +} + +func (ss *scaleSet) deleteCacheForNode(nodeName string) error { + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) + if err != nil { + return err + } + + virtualMachines := cached.(*sync.Map) + virtualMachines.Delete(nodeName) + return nil } func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) { @@ -151,109 +150,6 @@ func (ss *scaleSet) newAvailabilitySetNodesCache() (*timedCache, error) { return newTimedcache(availabilitySetNodesCacheTTL, getter) } -func buildVmssCacheKey(resourceGroup, name string) string { - // key is composed of # - return fmt.Sprintf("%s%s%s", strings.ToLower(resourceGroup), vmssCacheSeparator, name) -} - -func extractVmssCacheKey(key string) (string, string, error) { - // key is composed of # - keyItems := strings.Split(key, vmssCacheSeparator) - if len(keyItems) != 2 { - return "", "", fmt.Errorf("key %q is not in format '#'", key) - } - - resourceGroup := keyItems[0] - vmName := keyItems[1] - return resourceGroup, vmName, nil -} - -func (ss *scaleSet) newVmssVMCache() (*timedCache, error) { - getter := func(key string) (interface{}, error) { - // key is composed of # - resourceGroup, vmName, err := extractVmssCacheKey(key) - if err != nil { - return nil, err - } - - // vmName's format is 'scaleSetName_instanceID' - ssName, instanceID, err := extractVmssVMName(vmName) - if err != nil { - return nil, err - } - - // Not found, the VM doesn't belong to any known scale sets. - if ssName == "" { - return nil, nil - } - - ctx, cancel := getContextWithCancel() - defer cancel() - result, err := ss.VirtualMachineScaleSetVMsClient.Get(ctx, resourceGroup, ssName, instanceID) - exists, message, realErr := checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - - if !exists { - klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) - return nil, nil - } - - // Get instanceView for vmssVM. - if result.InstanceView == nil { - viewCtx, viewCancel := getContextWithCancel() - defer viewCancel() - view, err := ss.VirtualMachineScaleSetVMsClient.GetInstanceView(viewCtx, resourceGroup, ssName, instanceID) - // It is possible that the vmssVM gets removed just before this call. So check whether the VM exist again. - exists, message, realErr = checkResourceExistsFromError(err) - if realErr != nil { - return nil, realErr - } - if !exists { - klog.V(2).Infof("Virtual machine scale set VM %q not found with message: %q", key, message) - return nil, nil - } - - result.InstanceView = &view - } - - return &result, nil - } - - return newTimedcache(vmssVMCacheTTL, getter) -} - -func (ss *scaleSet) getScaleSetNameByNodeName(nodeName string) (string, error) { - getScaleSetName := func(nodeName string) (string, error) { - nodeNameMapping, err := ss.nodeNameToScaleSetMappingCache.Get(nodeNameToScaleSetMappingKey) - if err != nil { - return "", err - } - - realMapping := nodeNameMapping.(nodeNameToScaleSetMapping) - if ssName, ok := realMapping[nodeName]; ok { - return ssName, nil - } - - return "", nil - } - - ssName, err := getScaleSetName(nodeName) - if err != nil { - return "", err - } - - if ssName != "" { - return ssName, nil - } - - // ssName is still not found, it is likely that new Nodes are created. - // Force refresh the cache and try again. - ss.nodeNameToScaleSetMappingCache.Delete(nodeNameToScaleSetMappingKey) - return getScaleSetName(nodeName) -} - func (ss *scaleSet) isNodeManagedByAvailabilitySet(nodeName string) (bool, error) { cached, err := ss.availabilitySetNodesCache.Get(availabilitySetNodesKey) if err != nil { diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go index bf70b4ddd4e8..c6596d7bc648 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/azure/azure_vmss_cache_test.go @@ -19,8 +19,11 @@ limitations under the License. package azure import ( + "context" + "sync" "testing" + "github.com/Azure/go-autorest/autorest/to" "github.com/stretchr/testify/assert" ) @@ -67,3 +70,45 @@ func TestExtractVmssVMName(t *testing.T) { assert.Equal(t, c.expectedInstanceID, instanceID, c.description) } } + +func TestVMSSVMCache(t *testing.T) { + vmssName := "vmss" + vmList := []string{"vmssee6c2000000", "vmssee6c2000001", "vmssee6c2000002"} + ss, err := newTestScaleSet(vmssName, "", 0, vmList) + assert.NoError(t, err) + + // validate getting VMSS VM via cache. + virtualMachines, err := ss.VirtualMachineScaleSetVMsClient.List( + context.Background(), "rg", "vmss", "", "", "") + assert.NoError(t, err) + assert.Equal(t, 3, len(virtualMachines)) + for i := range virtualMachines { + vm := virtualMachines[i] + vmName := to.String(vm.OsProfile.ComputerName) + ssName, instanceID, realVM, err := ss.getVmssVM(vmName) + assert.NoError(t, err) + assert.Equal(t, "vmss", ssName) + assert.Equal(t, to.String(vm.InstanceID), instanceID) + assert.Equal(t, &vm, realVM) + } + + // validate deleteCacheForNode(). + vm := virtualMachines[0] + vmName := to.String(vm.OsProfile.ComputerName) + err = ss.deleteCacheForNode(vmName) + assert.NoError(t, err) + + // the VM should be removed from cache after deleteCacheForNode(). + cached, err := ss.vmssVMCache.Get(vmssVirtualMachinesKey) + assert.NoError(t, err) + cachedVirtualMachines := cached.(*sync.Map) + _, ok := cachedVirtualMachines.Load(vmName) + assert.Equal(t, false, ok) + + // the VM should be get back after another cache refresh. + ssName, instanceID, realVM, err := ss.getVmssVM(vmName) + assert.NoError(t, err) + assert.Equal(t, "vmss", ssName) + assert.Equal(t, to.String(vm.InstanceID), instanceID) + assert.Equal(t, &vm, realVM) +} diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum index 57cf3f1d12b5..1bca91cbbcf6 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/go.sum @@ -271,6 +271,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go index 5dda471c6637..23a66539b560 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/legacy-cloud-providers/openstack/util/mount/mount_linux.go @@ -1132,7 +1132,7 @@ func doSafeMakeDir(pathname string, base string, perm os.FileMode) error { return fmt.Errorf("cannot create directory %s: %s", currentPath, err) } // Dive into the created directory - childFD, err := syscall.Openat(parentFD, dir, nofollowFlags, 0) + childFD, err = syscall.Openat(parentFD, dir, nofollowFlags, 0) if err != nil { return fmt.Errorf("cannot open %s: %s", currentPath, err) } diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/metrics/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/metrics/go.sum index b56ede60e77a..5099cf2987e5 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/metrics/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/metrics/go.sum @@ -192,6 +192,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/node-api/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/node-api/go.sum index 13ed7c43c48b..91660c3151d0 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/node-api/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/node-api/go.sum @@ -195,6 +195,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum index 2292363dde52..ccd96757a3ba 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-apiserver/go.sum @@ -300,6 +300,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-cli-plugin/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-cli-plugin/go.sum index c2e75b0b0ab2..fefebe59b29a 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-cli-plugin/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-cli-plugin/go.sum @@ -207,6 +207,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6/go.mod h1:ezvh/TsK7cY6rbqRK0oQQ8IAqLxYwwyPxAX1Pzy0ii0= diff --git a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-controller/go.sum b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-controller/go.sum index 9eae2a133940..5691aa0182b8 100644 --- a/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-controller/go.sum +++ b/vendor/k8s.io/kubernetes/staging/src/k8s.io/sample-controller/go.sum @@ -196,6 +196,8 @@ gopkg.in/yaml.v2 v2.2.1 h1:mUhvW9EsL+naU5Q3cakzfE91YhliOondGd6ZrsDBHQE= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.4 h1:/eiJrUcujPVeJ3xlSWaiNi3uSVmDGBK1pDHUHAnao1I= +gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= k8s.io/gengo v0.0.0-20190128074634-0689ccc1d7d6 h1:4s3/R4+OYYYUKptXPhZKjQ04WJ6EhQQVFdjOFvCazDk= diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go index 3f0cd05dc64c..1a354f20caa8 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/deployment.go @@ -42,7 +42,6 @@ import ( e2edeploy "k8s.io/kubernetes/test/e2e/framework/deployment" e2epod "k8s.io/kubernetes/test/e2e/framework/pod" "k8s.io/kubernetes/test/e2e/framework/replicaset" - e2eservice "k8s.io/kubernetes/test/e2e/framework/service" testutil "k8s.io/kubernetes/test/utils" utilpointer "k8s.io/utils/pointer" ) @@ -120,10 +119,6 @@ var _ = SIGDescribe("Deployment", func() { framework.ConformanceIt("deployment should support proportional scaling", func() { testProportionalScalingDeployment(f) }) - ginkgo.It("should not disrupt a cloud load-balancer's connectivity during rollout", func() { - framework.SkipUnlessProviderIs("aws", "azure", "gce", "gke") - testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f) - }) // TODO: add tests that cover deployment.Spec.MinReadySeconds once we solved clock-skew issues // See https://github.com/kubernetes/kubernetes/issues/29229 }) @@ -861,151 +856,3 @@ func orphanDeploymentReplicaSets(c clientset.Interface, d *appsv1.Deployment) er deleteOptions.Preconditions = metav1.NewUIDPreconditions(string(d.UID)) return c.AppsV1().Deployments(d.Namespace).Delete(d.Name, deleteOptions) } - -func testRollingUpdateDeploymentWithLocalTrafficLoadBalancer(f *framework.Framework) { - ns := f.Namespace.Name - c := f.ClientSet - - name := "test-rolling-update-with-lb" - framework.Logf("Creating Deployment %q", name) - podLabels := map[string]string{"name": name} - replicas := int32(3) - d := e2edeploy.NewDeployment(name, replicas, podLabels, AgnhostImageName, AgnhostImage, appsv1.RollingUpdateDeploymentStrategyType) - // NewDeployment assigned the same value to both d.Spec.Selector and - // d.Spec.Template.Labels, so mutating the one would mutate the other. - // Thus we need to set d.Spec.Template.Labels to a new value if we want - // to mutate it alone. - d.Spec.Template.Labels = map[string]string{ - "iteration": "0", - "name": name, - } - d.Spec.Template.Spec.Containers[0].Args = []string{"netexec", "--http-port=80", "--udp-port=80"} - // To ensure that a node that had a local endpoint prior to a rolling - // update continues to have a local endpoint throughout the rollout, we - // need an affinity policy that will cause pods to be scheduled on the - // same nodes as old pods, and we need the deployment to scale up a new - // pod before deleting an old pod. This affinity policy will define - // inter-pod affinity for pods of different rollouts and anti-affinity - // for pods of the same rollout, so it will need to be updated when - // performing a rollout. - setAffinity(d) - d.Spec.Strategy.RollingUpdate = &appsv1.RollingUpdateDeployment{ - MaxSurge: intOrStrP(1), - MaxUnavailable: intOrStrP(0), - } - deployment, err := c.AppsV1().Deployments(ns).Create(d) - framework.ExpectNoError(err) - err = e2edeploy.WaitForDeploymentComplete(c, deployment) - framework.ExpectNoError(err) - - framework.Logf("Creating a service %s with type=LoadBalancer and externalTrafficPolicy=Local in namespace %s", name, ns) - jig := e2eservice.NewTestJig(c, name) - jig.Labels = podLabels - service := jig.CreateLoadBalancerService(ns, name, e2eservice.LoadBalancerCreateTimeoutDefault, func(svc *v1.Service) { - svc.Spec.ExternalTrafficPolicy = v1.ServiceExternalTrafficPolicyTypeLocal - }) - - lbNameOrAddress := e2eservice.GetIngressPoint(&service.Status.LoadBalancer.Ingress[0]) - svcPort := int(service.Spec.Ports[0].Port) - - framework.Logf("Hitting the replica set's pods through the service's load balancer") - timeout := e2eservice.LoadBalancerLagTimeoutDefault - if framework.ProviderIs("aws") { - timeout = e2eservice.LoadBalancerLagTimeoutAWS - } - jig.TestReachableHTTP(lbNameOrAddress, svcPort, timeout) - - framework.Logf("Starting a goroutine to watch the service's endpoints in the background") - done := make(chan struct{}) - failed := make(chan struct{}) - defer close(done) - go func() { - defer ginkgo.GinkgoRecover() - expectedNodes := jig.GetEndpointNodeNames(service) - // The affinity policy should ensure that before an old pod is - // deleted, a new pod will have been created on the same node. - // Thus the set of nodes with local endpoints for the service - // should remain unchanged. - wait.Until(func() { - actualNodes := jig.GetEndpointNodeNames(service) - if !actualNodes.Equal(expectedNodes) { - framework.Logf("The set of nodes with local endpoints changed; started with %v, now have %v", expectedNodes.List(), actualNodes.List()) - failed <- struct{}{} - } - }, framework.Poll, done) - }() - - framework.Logf("Triggering a rolling deployment several times") - for i := 1; i <= 3; i++ { - framework.Logf("Updating label deployment %q pod spec (iteration #%d)", name, i) - deployment, err = e2edeploy.UpdateDeploymentWithRetries(c, ns, d.Name, func(update *appsv1.Deployment) { - update.Spec.Template.Labels["iteration"] = fmt.Sprintf("%d", i) - setAffinity(update) - }) - framework.ExpectNoError(err) - - framework.Logf("Waiting for observed generation %d", deployment.Generation) - err = e2edeploy.WaitForObservedDeployment(c, ns, name, deployment.Generation) - framework.ExpectNoError(err) - - framework.Logf("Make sure deployment %q is complete", name) - err = e2edeploy.WaitForDeploymentCompleteAndCheckRolling(c, deployment) - framework.ExpectNoError(err) - } - - select { - case <-failed: - framework.Failf("Connectivity to the load balancer was interrupted") - case <-time.After(1 * time.Minute): - } -} - -func setAffinity(d *appsv1.Deployment) { - d.Spec.Template.Spec.Affinity = &v1.Affinity{ - PodAffinity: &v1.PodAffinity{ - PreferredDuringSchedulingIgnoredDuringExecution: []v1.WeightedPodAffinityTerm{ - { - Weight: int32(100), - PodAffinityTerm: v1.PodAffinityTerm{ - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{d.Spec.Template.Labels["name"]}, - }, - { - Key: "iteration", - Operator: metav1.LabelSelectorOpNotIn, - Values: []string{d.Spec.Template.Labels["iteration"]}, - }, - }, - }, - }, - }, - }, - }, - PodAntiAffinity: &v1.PodAntiAffinity{ - RequiredDuringSchedulingIgnoredDuringExecution: []v1.PodAffinityTerm{ - { - TopologyKey: "kubernetes.io/hostname", - LabelSelector: &metav1.LabelSelector{ - MatchExpressions: []metav1.LabelSelectorRequirement{ - { - Key: "name", - Operator: metav1.LabelSelectorOpIn, - Values: []string{d.Spec.Template.Labels["name"]}, - }, - { - Key: "iteration", - Operator: metav1.LabelSelectorOpIn, - Values: []string{d.Spec.Template.Labels["iteration"]}, - }, - }, - }, - }, - }, - }, - } -} diff --git a/vendor/k8s.io/kubernetes/test/e2e/apps/types.go b/vendor/k8s.io/kubernetes/test/e2e/apps/types.go index 976b40259565..ffe2d785742f 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/apps/types.go +++ b/vendor/k8s.io/kubernetes/test/e2e/apps/types.go @@ -25,7 +25,6 @@ import ( const ( WebserverImageName = "httpd" RedisImageName = "redis" - AgnhostImageName = "agnhost" ) var ( @@ -49,7 +48,4 @@ var ( // RedisImage is the fully qualified URI to the Redis image RedisImage = imageutils.GetE2EImage(imageutils.Redis) - - // AgnhostImage is the fully qualified URI to the agnhost image. - AgnhostImage = imageutils.GetE2EImage(imageutils.Agnhost) ) diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go index b6dfb967541c..8cee066b82f7 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/framework.go @@ -302,7 +302,11 @@ func (f *Framework) AfterEach() { if TestContext.DeleteNamespace && (TestContext.DeleteNamespaceOnFailure || !ginkgo.CurrentGinkgoTestDescription().Failed) { for _, ns := range f.namespacesToDelete { ginkgo.By(fmt.Sprintf("Destroying namespace %q for this suite.", ns.Name)) - if err := f.ClientSet.CoreV1().Namespaces().Delete(ns.Name, nil); err != nil { + timeout := DefaultNamespaceDeletionTimeout + if f.NamespaceDeletionTimeout != 0 { + timeout = f.NamespaceDeletionTimeout + } + if err := deleteNS(f.ClientSet, f.DynamicClient, ns.Name, timeout); err != nil { if !apierrors.IsNotFound(err) { nsDeletionErrors[ns.Name] = err } else { diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go b/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go index f74b693ccab6..3de36de06dcd 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/service/jig.go @@ -271,19 +271,6 @@ func (j *TestJig) CreateLoadBalancerService(namespace, serviceName string, timeo // endpoints of the given Service are running. func (j *TestJig) GetEndpointNodes(svc *v1.Service) map[string][]string { nodes := j.GetNodes(MaxNodesForEndpointsTests) - epNodes := j.GetEndpointNodeNames(svc) - nodeMap := map[string][]string{} - for _, n := range nodes.Items { - if epNodes.Has(n.Name) { - nodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP) - } - } - return nodeMap -} - -// GetEndpointNodeNames returns a string set of node names on which the -// endpoints of the given Service are running. -func (j *TestJig) GetEndpointNodeNames(svc *v1.Service) sets.String { endpoints, err := j.Client.CoreV1().Endpoints(svc.Namespace).Get(svc.Name, metav1.GetOptions{}) if err != nil { framework.Failf("Get endpoints for service %s/%s failed (%s)", svc.Namespace, svc.Name, err) @@ -299,7 +286,13 @@ func (j *TestJig) GetEndpointNodeNames(svc *v1.Service) sets.String { } } } - return epNodes + nodeMap := map[string][]string{} + for _, n := range nodes.Items { + if epNodes.Has(n.Name) { + nodeMap[n.Name] = e2enode.GetAddresses(&n, v1.NodeExternalIP) + } + } + return nodeMap } // GetNodes returns the first maxNodesForTest nodes. Useful in large clusters diff --git a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go index 651adedd7b44..889617a3f7f1 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/framework/util.go +++ b/vendor/k8s.io/kubernetes/test/e2e/framework/util.go @@ -40,6 +40,7 @@ import ( "syscall" "time" + "github.com/davecgh/go-spew/spew" "golang.org/x/net/websocket" "k8s.io/klog" @@ -869,6 +870,193 @@ func CheckTestingNSDeletedExcept(c clientset.Interface, skip string) error { return fmt.Errorf("Waiting for terminating namespaces to be deleted timed out") } +// deleteNS deletes the provided namespace, waits for it to be completely deleted, and then checks +// whether there are any pods remaining in a non-terminating state. +func deleteNS(c clientset.Interface, dynamicClient dynamic.Interface, namespace string, timeout time.Duration) error { + startTime := time.Now() + if err := c.CoreV1().Namespaces().Delete(namespace, nil); err != nil { + return err + } + + // wait for namespace to delete or timeout. + var lastNamespace *v1.Namespace + err := wait.PollImmediate(2*time.Second, timeout, func() (bool, error) { + var err error + lastNamespace, err = c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + return true, nil + } + e2elog.Logf("Error while waiting for namespace to be terminated: %v", err) + return false, nil + } + return false, nil + }) + + // verify there is no more remaining content in the namespace + remainingContent, cerr := hasRemainingContent(c, dynamicClient, namespace) + if cerr != nil { + return cerr + } + + // if content remains, let's dump information about the namespace, and system for flake debugging. + remainingPods := 0 + missingTimestamp := 0 + if remainingContent { + // log information about namespace, and set of namespaces in api server to help flake detection + logNamespace(c, namespace) + logNamespaces(c, namespace) + + // if we can, check if there were pods remaining with no timestamp. + remainingPods, missingTimestamp, _ = e2epod.CountRemainingPods(c, namespace) + } + + // a timeout waiting for namespace deletion happened! + if err != nil { + // namespaces now have conditions that are useful for debugging generic resources and finalizers + Logf("namespace did not cleanup: %s", spew.Sdump(lastNamespace)) + + // some content remains in the namespace + if remainingContent { + // pods remain + if remainingPods > 0 { + if missingTimestamp != 0 { + // pods remained, but were not undergoing deletion (namespace controller is probably culprit) + return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v, pods missing deletion timestamp: %v", namespace, err, remainingPods, missingTimestamp) + } + // but they were all undergoing deletion (kubelet is probably culprit, check NodeLost) + return fmt.Errorf("namespace %v was not deleted with limit: %v, pods remaining: %v", namespace, err, remainingPods) + } + // other content remains (namespace controller is probably screwed up) + return fmt.Errorf("namespace %v was not deleted with limit: %v, namespaced content other than pods remain", namespace, err) + } + // no remaining content, but namespace was not deleted (namespace controller is probably wedged) + return fmt.Errorf("namespace %v was not deleted with limit: %v, namespace is empty but is not yet removed", namespace, err) + } + e2elog.Logf("namespace %v deletion completed in %s", namespace, time.Since(startTime)) + return nil +} + +// logNamespaces logs the number of namespaces by phase +// namespace is the namespace the test was operating against that failed to delete so it can be grepped in logs +func logNamespaces(c clientset.Interface, namespace string) { + namespaceList, err := c.CoreV1().Namespaces().List(metav1.ListOptions{}) + if err != nil { + e2elog.Logf("namespace: %v, unable to list namespaces: %v", namespace, err) + return + } + + numActive := 0 + numTerminating := 0 + for _, namespace := range namespaceList.Items { + if namespace.Status.Phase == v1.NamespaceActive { + numActive++ + } else { + numTerminating++ + } + } + e2elog.Logf("namespace: %v, total namespaces: %v, active: %v, terminating: %v", namespace, len(namespaceList.Items), numActive, numTerminating) +} + +// logNamespace logs detail about a namespace +func logNamespace(c clientset.Interface, namespace string) { + ns, err := c.CoreV1().Namespaces().Get(namespace, metav1.GetOptions{}) + if err != nil { + if apierrs.IsNotFound(err) { + e2elog.Logf("namespace: %v no longer exists", namespace) + return + } + e2elog.Logf("namespace: %v, unable to get namespace due to error: %v", namespace, err) + return + } + e2elog.Logf("namespace: %v, DeletionTimetamp: %v, Finalizers: %v, Phase: %v", ns.Name, ns.DeletionTimestamp, ns.Spec.Finalizers, ns.Status.Phase) +} + +// isDynamicDiscoveryError returns true if the error is a group discovery error +// only for groups expected to be created/deleted dynamically during e2e tests +func isDynamicDiscoveryError(err error) bool { + if !discovery.IsGroupDiscoveryFailedError(err) { + return false + } + discoveryErr := err.(*discovery.ErrGroupDiscoveryFailed) + for gv := range discoveryErr.Groups { + switch gv.Group { + case "mygroup.example.com": + // custom_resource_definition + // garbage_collector + case "wardle.k8s.io": + // aggregator + case "metrics.k8s.io": + // aggregated metrics server add-on, no persisted resources + default: + e2elog.Logf("discovery error for unexpected group: %#v", gv) + return false + } + } + return true +} + +// hasRemainingContent checks if there is remaining content in the namespace via API discovery +func hasRemainingContent(c clientset.Interface, dynamicClient dynamic.Interface, namespace string) (bool, error) { + // some tests generate their own framework.Client rather than the default + // TODO: ensure every test call has a configured dynamicClient + if dynamicClient == nil { + return false, nil + } + + // find out what content is supported on the server + // Since extension apiserver is not always available, e.g. metrics server sometimes goes down, + // add retry here. + resources, err := waitForServerPreferredNamespacedResources(c.Discovery(), 30*time.Second) + if err != nil { + return false, err + } + resources = discovery.FilteredBy(discovery.SupportsAllVerbs{Verbs: []string{"list", "delete"}}, resources) + groupVersionResources, err := discovery.GroupVersionResources(resources) + if err != nil { + return false, err + } + + // TODO: temporary hack for https://github.com/kubernetes/kubernetes/issues/31798 + ignoredResources := sets.NewString("bindings") + + contentRemaining := false + + // dump how many of resource type is on the server in a log. + for gvr := range groupVersionResources { + // get a client for this group version... + dynamicClient := dynamicClient.Resource(gvr).Namespace(namespace) + if err != nil { + // not all resource types support list, so some errors here are normal depending on the resource type. + e2elog.Logf("namespace: %s, unable to get client - gvr: %v, error: %v", namespace, gvr, err) + continue + } + // get the api resource + apiResource := metav1.APIResource{Name: gvr.Resource, Namespaced: true} + if ignoredResources.Has(gvr.Resource) { + e2elog.Logf("namespace: %s, resource: %s, ignored listing per whitelist", namespace, apiResource.Name) + continue + } + unstructuredList, err := dynamicClient.List(metav1.ListOptions{}) + if err != nil { + // not all resources support list, so we ignore those + if apierrs.IsMethodNotSupported(err) || apierrs.IsNotFound(err) || apierrs.IsForbidden(err) { + continue + } + // skip unavailable servers + if apierrs.IsServiceUnavailable(err) { + continue + } + return false, err + } + if len(unstructuredList.Items) > 0 { + e2elog.Logf("namespace: %s, resource: %s, items remaining: %v", namespace, apiResource.Name, len(unstructuredList.Items)) + contentRemaining = true + } + } + return contentRemaining, nil +} + // ContainerInitInvariant checks for an init containers are initialized and invariant on both older and newer. func ContainerInitInvariant(older, newer runtime.Object) error { oldPod := older.(*v1.Pod) @@ -3162,6 +3350,28 @@ func DsFromManifest(url string) (*appsv1.DaemonSet, error) { return &ds, nil } +// waitForServerPreferredNamespacedResources waits until server preferred namespaced resources could be successfully discovered. +// TODO: Fix https://github.com/kubernetes/kubernetes/issues/55768 and remove the following retry. +func waitForServerPreferredNamespacedResources(d discovery.DiscoveryInterface, timeout time.Duration) ([]*metav1.APIResourceList, error) { + e2elog.Logf("Waiting up to %v for server preferred namespaced resources to be successfully discovered", timeout) + var resources []*metav1.APIResourceList + if err := wait.PollImmediate(Poll, timeout, func() (bool, error) { + var err error + resources, err = d.ServerPreferredNamespacedResources() + if err == nil || isDynamicDiscoveryError(err) { + return true, nil + } + if !discovery.IsGroupDiscoveryFailedError(err) { + return false, err + } + e2elog.Logf("Error discoverying server preferred namespaced resources: %v, retrying in %v.", err, Poll) + return false, nil + }); err != nil { + return nil, err + } + return resources, nil +} + // WaitForPersistentVolumeClaimDeleted waits for a PersistentVolumeClaim to be removed from the system until timeout occurs, whichever comes first. func WaitForPersistentVolumeClaimDeleted(c clientset.Interface, ns string, pvcName string, Poll, timeout time.Duration) error { e2elog.Logf("Waiting up to %v for PersistentVolumeClaim %s to be removed", timeout, pvcName) diff --git a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go index 200a266b927a..87a84558d246 100644 --- a/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go +++ b/vendor/k8s.io/kubernetes/test/e2e/generated/bindata.go @@ -2576,7 +2576,7 @@ func testE2eTestingManifestsStorageCsiMockCsiStorageclassYaml() (*asset, error) return a, nil } -var _testImagesMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x53\x51\x6f\xe3\x36\x13\x7c\xfe\xf8\x2b\x06\xf1\x3d\x24\x80\x4d\x25\x7e\x39\x9c\x3f\x04\x85\x2e\x71\x13\xe1\x12\xab\xb5\x9c\x0b\xf2\x54\xd0\xd2\x4a\x5a\x84\x22\x55\x92\x8a\xed\x7f\x5f\x50\x4e\x7a\xf5\x9d\xde\xc8\x9d\x99\x1d\xee\xac\x26\xb8\xb1\xfd\xc1\x71\xd3\x06\xcc\x2f\xaf\x3e\x63\xd3\x12\xbe\x0d\x5b\x72\x86\x02\x79\xa4\x43\x68\xad\xf3\x52\x4c\xc4\x04\x0f\x5c\x92\xf1\x54\x61\x30\x15\x39\x84\x96\x90\xf6\xaa\x6c\xe9\xa3\x32\xc5\x77\x72\x9e\xad\xc1\x5c\x5e\xe2\x3c\x02\xce\xde\x4b\x67\x17\xff\x17\x13\x1c\xec\x80\x4e\x1d\x60\x6c\xc0\xe0\x09\xa1\x65\x8f\x9a\x35\x81\xf6\x25\xf5\x01\x6c\x50\xda\xae\xd7\xac\x4c\x49\xd8\x71\x68\xc7\x36\xef\x22\x52\x4c\xf0\xf2\x2e\x61\xb7\x41\xb1\x81\x42\x69\xfb\x03\x6c\xfd\x5f\x1c\x54\x18\x0d\xc7\xaf\x0d\xa1\x5f\x24\xc9\x6e\xb7\x93\x6a\x34\x2b\xad\x6b\x12\x7d\x04\xfa\xe4\x21\xbb\x59\xae\x8a\xe5\x6c\x2e\x2f\x47\xca\x93\xd1\xe4\x3d\x1c\xfd\x3d\xb0\xa3\x0a\xdb\x03\x54\xdf\x6b\x2e\xd5\x56\x13\xb4\xda\xc1\x3a\xa8\xc6\x11\x55\x08\x36\xfa\xdd\x39\x0e\x6c\x9a\x29\xbc\xad\xc3\x4e\x39\x12\x13\x54\xec\x83\xe3\xed\x10\x4e\x86\xf5\xe1\x8e\xfd\x09\xc0\x1a\x28\x83\xb3\xb4\x40\x56\x9c\xe1\x6b\x5a\x64\xc5\x54\x4c\xf0\x9c\x6d\xee\xf3\xa7\x0d\x9e\xd3\xf5\x3a\x5d\x6d\xb2\x65\x81\x7c\x8d\x9b\x7c\x75\x9b\x6d\xb2\x7c\x55\x20\xff\x1d\xe9\xea\x05\xdf\xb2\xd5\xed\x14\xc4\xa1\x25\x07\xda\xf7\x2e\xfa\xb7\x0e\x1c\xc7\x48\x55\x9c\x59\x41\x74\x62\xa0\xb6\x47\x43\xbe\xa7\x92\x6b\x2e\xa1\x95\x69\x06\xd5\x10\x1a\xfb\x46\xce\xb0\x69\xd0\x93\xeb\xd8\xc7\x30\x3d\x94\xa9\xc4\x04\x9a\x3b\x0e\x2a\x8c\x37\xbf\x3c\x4a\x0a\xc1\xa6\xd4\x43\x45\x80\x94\x89\x94\x49\xab\xca\xd7\xa4\x53\xaf\x34\x73\x83\x26\x9f\x3c\xaa\x57\x8a\x51\xcb\x4e\x19\xae\xc9\x07\x21\xd6\xcb\xbb\xac\xd8\xac\x5f\xf0\xdb\x35\x9a\xd2\x49\xb6\xc9\xeb\xbf\xbb\x37\xa3\x39\xcd\x02\xf9\x30\xe3\x4e\x35\xe4\xc5\x5d\x9e\xae\x1f\x23\xf4\xb3\xf8\x73\xf9\xf8\xf4\x7d\xb9\x2e\xb2\x7c\x75\xfd\x36\x97\x5f\xe4\x95\xb8\xcb\x1f\xd2\xd5\xdd\x5f\x1f\xb7\x57\xf2\x6a\x2e\xbf\x08\xda\xf7\xd6\x05\x21\xb8\x36\x15\xd5\x78\xbe\x4f\x37\xe2\xd3\x39\x39\x67\xdd\x78\x88\x59\xa8\x1f\x69\xbf\x29\xc7\x31\xe8\x29\x68\xbf\x40\x34\x0f\xa5\xf5\x88\xbc\x36\x14\x2e\x04\x99\x8a\x6b\x21\x26\xf8\x3a\xb0\xae\x50\xda\x8a\x8e\xff\x46\xea\x1a\xbf\x18\x57\x2e\x82\x17\xb8\x65\x47\x65\xb0\xee\x00\xa3\x3a\xf2\x71\x57\xb6\x91\x72\x44\x2f\xf7\xaa\xeb\x35\x1d\x09\xa7\x6d\x4a\x3d\xf8\x40\x4e\xf5\x3c\x3e\x9e\x9c\x50\x5a\x2f\x62\x7d\x56\x5a\x13\x77\x9e\x9c\x10\x27\xc7\x85\xf8\x9f\x4c\xc6\x21\xcd\x86\xc0\x5a\xfa\xf6\xd8\x0b\x9f\xce\xa3\xe4\xc5\x11\xde\x0f\xbe\xfd\x59\xe7\x17\x5e\x04\xfd\xa0\xc9\x3f\xee\xf3\xd5\xcb\x48\xc2\x87\xc4\x4f\x0a\xff\x04\x00\x00\xff\xff\xa7\x76\x3e\x39\x41\x04\x00\x00") +var _testImagesMakefile = []byte("\x1f\x8b\x08\x00\x00\x00\x00\x00\x00\xff\x64\x53\xd1\x6e\xe3\x36\x10\x7c\x2e\xbf\x62\x10\xdf\x43\x02\xc4\x54\xec\x97\x43\x5d\x04\x85\x2e\x71\x13\xe1\x12\xab\xb5\x9c\x0b\xf2\x54\xd0\xd2\x4a\x5a\x84\x22\x55\x92\x8a\xed\xbf\x2f\x28\x27\xbd\xfa\x4e\x6f\xe4\xce\xcc\x0e\x77\x56\x13\xdc\xd8\xfe\xe0\xb8\x69\x03\xe6\x57\xb3\xcf\xd8\xb4\x84\xaf\xc3\x96\x9c\xa1\x40\x1e\xe9\x10\x5a\xeb\xbc\x14\x13\x31\xc1\x03\x97\x64\x3c\x55\x18\x4c\x45\x0e\xa1\x25\xa4\xbd\x2a\x5b\xfa\xa8\x5c\xe2\x1b\x39\xcf\xd6\x60\x2e\xaf\x70\x1e\x01\x67\xef\xa5\xb3\x8b\xdf\xc4\x04\x07\x3b\xa0\x53\x07\x18\x1b\x30\x78\x42\x68\xd9\xa3\x66\x4d\xa0\x7d\x49\x7d\x00\x1b\x94\xb6\xeb\x35\x2b\x53\x12\x76\x1c\xda\xb1\xcd\xbb\x88\x14\x13\xbc\xbc\x4b\xd8\x6d\x50\x6c\xa0\x50\xda\xfe\x00\x5b\xff\x1f\x07\x15\x46\xc3\xf1\x6b\x43\xe8\x17\x49\xb2\xdb\xed\xa4\x1a\xcd\x4a\xeb\x9a\x44\x1f\x81\x3e\x79\xc8\x6e\x96\xab\x62\x39\x9d\xcb\xab\x91\xf2\x64\x34\x79\x0f\x47\xff\x0c\xec\xa8\xc2\xf6\x00\xd5\xf7\x9a\x4b\xb5\xd5\x04\xad\x76\xb0\x0e\xaa\x71\x44\x15\x82\x8d\x7e\x77\x8e\x03\x9b\xe6\x12\xde\xd6\x61\xa7\x1c\x89\x09\x2a\xf6\xc1\xf1\x76\x08\x27\xc3\xfa\x70\xc7\xfe\x04\x60\x0d\x94\xc1\x59\x5a\x20\x2b\xce\xf0\x25\x2d\xb2\xe2\x52\x4c\xf0\x9c\x6d\xee\xf3\xa7\x0d\x9e\xd3\xf5\x3a\x5d\x6d\xb2\x65\x81\x7c\x8d\x9b\x7c\x75\x9b\x6d\xb2\x7c\x55\x20\xff\x03\xe9\xea\x05\x5f\xb3\xd5\xed\x25\x88\x43\x4b\x0e\xb4\xef\x5d\xf4\x6f\x1d\x38\x8e\x91\xaa\x38\xb3\x82\xe8\xc4\x40\x6d\x8f\x86\x7c\x4f\x25\xd7\x5c\x42\x2b\xd3\x0c\xaa\x21\x34\xf6\x8d\x9c\x61\xd3\xa0\x27\xd7\xb1\x8f\x61\x7a\x28\x53\x89\x09\x34\x77\x1c\x54\x18\x6f\x7e\x7a\x94\x14\x82\x4d\xa9\x87\x8a\x00\x29\x13\x29\x93\x56\x95\xaf\x49\xa7\x5e\x69\xea\x06\x4d\x3e\x79\x54\xaf\x14\xa3\x96\x9d\x32\x5c\x93\x0f\x42\xac\x97\x77\x59\xb1\x59\xbf\xe0\xf7\x6b\x34\xa5\x93\x6c\x93\xd7\xff\x76\x6f\x4a\x73\x9a\x06\xf2\x61\xca\x9d\x6a\xc8\x8b\xbb\x3c\x5d\x3f\x46\xe8\x67\xf1\xd7\xf2\xf1\xe9\xdb\x72\x5d\x64\xf9\xea\xfa\x6d\x2e\x7f\x95\x33\x71\x97\x3f\xa4\xab\xbb\xbf\x3f\x6e\x67\x72\x36\x97\xb3\x2b\x41\xfb\xde\xba\x20\x04\xd7\xa6\xa2\x1a\xcf\xf7\xe9\x46\x7c\x3a\x27\xe7\xac\x1b\x0f\x31\x0c\xf5\x3d\xee\x37\xe5\x38\x26\x7d\x09\xda\x2f\x10\xdd\x43\x69\x3d\x22\xaf\x0d\x85\x0b\x41\xa6\xe2\x5a\x88\x09\xbe\x0c\xac\x2b\x94\xb6\xa2\xe3\xcf\x91\xba\xc6\x2f\xc6\x9d\x8b\xe0\x05\x6e\xd9\x51\x19\xac\x3b\xc0\xa8\x8e\x7c\x5c\x96\x6d\xa4\x1c\xd1\xcb\xbd\xea\x7a\x4d\x47\xc2\x69\x9b\x52\x0f\x3e\x90\x53\x3d\x8f\xaf\x27\x27\x94\xd6\x8b\x58\x9f\x96\xd6\xc4\xa5\x27\x27\xc4\xc9\x71\x21\x7e\x91\xc9\x38\xa5\xe9\x10\x58\x4b\xdf\x1e\x7b\xe1\xd3\x79\x94\xbc\x38\xc2\xfb\xc1\xb7\x3f\xea\xfc\xc4\x8b\xa0\xef\x34\xf9\xe7\x7d\xbe\x7a\x19\x49\xf8\x90\xf8\x41\xe1\xdf\x00\x00\x00\xff\xff\xfd\x4e\xdb\xf5\x42\x04\x00\x00") func testImagesMakefileBytes() ([]byte, error) { return bindataRead( diff --git a/vendor/k8s.io/kubernetes/test/images/Makefile b/vendor/k8s.io/kubernetes/test/images/Makefile index 7b71a1866905..ead09b114346 100644 --- a/vendor/k8s.io/kubernetes/test/images/Makefile +++ b/vendor/k8s.io/kubernetes/test/images/Makefile @@ -17,7 +17,7 @@ include ../../hack/make-rules/Makefile.manifest REGISTRY ?= gcr.io/kubernetes-e2e-test-images GOARM ?= 7 QEMUVERSION=v2.9.1 -GOLANG_VERSION=1.12.9 +GOLANG_VERSION=1.12.10 export ifndef WHAT diff --git a/vendor/k8s.io/kubernetes/test/integration/kubelet/watch_manager_test.go b/vendor/k8s.io/kubernetes/test/integration/kubelet/watch_manager_test.go index add0a2a16882..3e37e850cfc6 100644 --- a/vendor/k8s.io/kubernetes/test/integration/kubelet/watch_manager_test.go +++ b/vendor/k8s.io/kubernetes/test/integration/kubelet/watch_manager_test.go @@ -40,6 +40,7 @@ func TestWatchBasedManager(t *testing.T) { defer server.TearDownFn() server.ClientConfig.QPS = 10000 + server.ClientConfig.Burst = 10000 client, err := kubernetes.NewForConfig(server.ClientConfig) if err != nil { t.Fatalf("unexpected error: %v", err)