From 953dbf21fdbf418d12540af4d7b08f0dba1e3e1d Mon Sep 17 00:00:00 2001
From: Jhon Honce
Date: Fri, 23 Jan 2015 13:31:39 -0700
Subject: [PATCH 1/5]
bump(github.com/GoogleCloudPlatform/kubernetes):e0acd75629ec29bde764bcde29367146ae8b389b
---
Godeps/Godeps.json | 263 +-
.../compute/serviceaccount/serviceaccount.go | 172 -
.../compute/v1/v1/compute-api.json | 9526 +++++++++
.../compute/v1/v1/compute-gen.go | 16952 ++++++++++++++++
.../v1beta1/v1beta1/container-api.json | 579 +
.../v1beta1/v1beta1/container-gen.go | 1007 +
.../kubernetes/pkg/api/conversion.go | 12 +
.../kubernetes/pkg/api/errors/errors.go | 2 +-
.../kubernetes/pkg/api/helpers.go | 24 -
.../kubernetes/pkg/api/helpers_test.go | 2 -
.../kubernetes/pkg/api/latest/latest_test.go | 4 +
.../kubernetes/pkg/api/meta.go | 2 +-
.../kubernetes/pkg/api/meta/interfaces.go | 12 +-
.../kubernetes/pkg/api/meta/meta.go | 14 +-
.../kubernetes/pkg/api/meta/meta_test.go | 28 +-
.../pkg/api/meta/restmapper_test.go | 2 +-
.../kubernetes/pkg/api/register.go | 6 +-
.../kubernetes/pkg/api/resource/quantity.go | 9 +-
.../pkg/api/resource/quantity_test.go | 8 +
.../kubernetes/pkg/api/serialization_test.go | 11 +-
.../kubernetes/pkg/api/types.go | 63 +-
.../kubernetes/pkg/api/unversioned.go | 6 +
.../kubernetes/pkg/api/v1beta1/conversion.go | 95 +-
.../pkg/api/v1beta1/conversion_test.go | 46 +
.../kubernetes/pkg/api/v1beta1/register.go | 4 +-
.../kubernetes/pkg/api/v1beta1/types.go | 33 +-
.../kubernetes/pkg/api/v1beta2/conversion.go | 90 +-
.../pkg/api/v1beta2/conversion_test.go | 46 +
.../kubernetes/pkg/api/v1beta2/register.go | 4 +-
.../kubernetes/pkg/api/v1beta2/types.go | 33 +-
.../kubernetes/pkg/api/v1beta3/register.go | 4 +-
.../kubernetes/pkg/api/v1beta3/types.go | 66 +-
.../pkg/api/validation/schema_test.go | 4 +
.../pkg/api/validation/validation.go | 59 +-
.../pkg/api/validation/validation_test.go | 90 +-
.../kubernetes/pkg/apiserver/apiserver.go | 45 +-
.../kubernetes/pkg/apiserver/index.go | 21 +-
.../kubernetes/pkg/apiserver/mux_helper.go | 37 +
.../kubernetes/pkg/apiserver/proxy.go | 15 +-
.../kubernetes/pkg/apiserver/proxy_test.go | 124 +-
.../kubernetes/pkg/client/cache/listers.go | 32 +-
.../kubernetes/pkg/client/cache/listwatch.go | 2 +-
.../pkg/client/clientcmd/api/types.go | 2 +
.../pkg/client/clientcmd/api/v1/types.go | 2 +
.../pkg/client/clientcmd/client_config.go | 52 +-
.../client/clientcmd/client_config_test.go | 18 +
.../client/clientcmd/merged_client_builder.go | 10 +
.../pkg/client/clientcmd/overrides.go | 76 +-
.../pkg/client/clientcmd/validation.go | 2 +-
.../kubernetes/pkg/client/events.go | 2 +-
.../kubernetes/pkg/client/events_test.go | 3 -
.../kubernetes/pkg/client/helper_test.go | 146 +-
.../kubernetes/pkg/client/kubelet.go | 28 +-
.../kubernetes/pkg/client/kubelet_test.go | 15 +-
.../kubernetes/pkg/client/record/event.go | 101 +-
.../pkg/client/record/event_test.go | 98 +-
.../kubernetes/pkg/client/restclient_test.go | 2 +-
.../kubernetes/pkg/cloudprovider/gce/gce.go | 8 +-
.../pkg/controller/replication_controller.go | 6 +
.../kubernetes/pkg/conversion/converter.go | 37 +-
.../pkg/conversion/converter_test.go | 40 +
.../kubernetes/pkg/conversion/scheme_test.go | 2 +-
.../pkg/credentialprovider/config.go | 1 -
.../kubernetes/pkg/health/exec.go | 7 +-
.../kubernetes/pkg/health/exec_test.go | 3 +-
.../kubernetes/pkg/health/health.go | 7 +-
.../kubernetes/pkg/health/http.go | 3 +-
.../kubernetes/pkg/health/tcp.go | 3 +-
.../pkg/kubecfg/resource_printer.go | 5 +-
.../pkg/kubecfg/resource_printer_test.go | 4 +-
.../kubernetes/pkg/kubectl/cmd/cmd.go | 78 +-
.../kubernetes/pkg/kubectl/cmd/cmd_test.go | 29 +-
.../pkg/kubectl/cmd/config/config.go | 2 +-
.../kubernetes/pkg/kubectl/cmd/create.go | 5 +-
.../kubernetes/pkg/kubectl/cmd/create_test.go | 6 +-
.../kubernetes/pkg/kubectl/cmd/delete.go | 11 +-
.../kubernetes/pkg/kubectl/cmd/delete_test.go | 44 +-
.../kubernetes/pkg/kubectl/cmd/describe.go | 5 +-
.../pkg/kubectl/cmd/describe_test.go | 5 +-
.../pkg/kubectl/cmd/factory_test.go | 41 +
.../kubernetes/pkg/kubectl/cmd/get.go | 15 +-
.../kubernetes/pkg/kubectl/cmd/get_test.go | 26 +-
.../kubernetes/pkg/kubectl/cmd/helpers.go | 38 +
.../pkg/kubectl/cmd/helpers_test.go | 108 +
.../kubernetes/pkg/kubectl/cmd/log.go | 3 +-
.../kubernetes/pkg/kubectl/cmd/printing.go | 37 +-
.../pkg/kubectl/cmd/printing_test.go | 67 +
.../kubernetes/pkg/kubectl/cmd/resize.go | 72 +
.../kubernetes/pkg/kubectl/cmd/resource.go | 40 +-
.../pkg/kubectl/cmd/rollingupdate.go | 20 +-
.../kubernetes/pkg/kubectl/cmd/run.go | 25 +-
.../kubernetes/pkg/kubectl/cmd/stop.go | 56 +
.../kubernetes/pkg/kubectl/cmd/update.go | 81 +-
.../kubernetes/pkg/kubectl/describe.go | 3 +-
.../kubernetes/pkg/kubectl/generate.go | 4 +-
.../kubernetes/pkg/kubectl/generate_test.go | 114 +
.../kubernetes/pkg/kubectl/resize.go | 93 +
.../kubernetes/pkg/kubectl/resize_test.go | 182 +
.../pkg/kubectl/resource_printer.go | 34 +-
.../pkg/kubectl/resource_printer_test.go | 76 +
.../pkg/kubectl/rolling_updater_test.go | 6 +-
.../kubernetes/pkg/kubectl/run_test.go | 104 +
.../kubernetes/pkg/kubectl/stop.go | 110 +
.../kubernetes/pkg/kubectl/stop_test.go | 168 +
.../kubernetes/pkg/kubectl/version.go | 2 +-
.../kubernetes/pkg/kubelet/cadvisor.go | 5 +-
.../pkg/kubelet/config/apiserver_test.go | 59 +-
.../kubernetes/pkg/kubelet/config/config.go | 2 +-
.../pkg/kubelet/config/config_test.go | 3 +-
.../kubernetes/pkg/kubelet/config/etcd.go | 3 +-
.../pkg/kubelet/config/etcd_test.go | 52 +
.../kubernetes/pkg/kubelet/config/file.go | 31 +-
.../pkg/kubelet/config/file_test.go | 127 +-
.../kubernetes/pkg/kubelet/config/http.go | 114 +-
.../pkg/kubelet/config/http_test.go | 71 +-
.../pkg/kubelet/dockertools/docker.go | 45 +-
.../pkg/kubelet/dockertools/docker_test.go | 9 +-
.../kubernetes/pkg/kubelet/handlers.go | 15 +-
.../kubernetes/pkg/kubelet/kubelet.go | 441 +-
.../kubernetes/pkg/kubelet/kubelet_test.go | 555 +-
.../kubernetes/pkg/kubelet/server.go | 40 +-
.../kubernetes/pkg/kubelet/server_test.go | 51 +-
.../kubernetes/pkg/kubelet/util.go | 3 +-
.../pkg/{ => kubelet}/volume/doc.go | 0
.../pkg/kubelet/volume/empty_dir/empty_dir.go | 125 +
.../volume/empty_dir/empty_dir_test.go | 152 +
.../pkg/kubelet/volume/gce_pd/gce_pd.go | 237 +
.../pkg/kubelet/volume/gce_pd/gce_pd_test.go | 173 +
.../volume/gce_pd}/gce_util.go | 29 +-
.../volume/gce_pd}/gce_util_test.go | 2 +-
.../pkg/kubelet/volume/gce_pd/mount_util.go | 53 +
.../volume/gce_pd/mount_util_linux.go} | 2 +-
.../volume/gce_pd/mount_util_unsupported.go} | 4 +-
.../pkg/kubelet/volume/git_repo/git_repo.go | 214 +
.../kubelet/volume/git_repo/git_repo_test.go | 186 +
.../pkg/kubelet/volume/host_path/host_path.go | 81 +
.../volume/host_path/host_path_test.go | 86 +
.../kubernetes/pkg/kubelet/volume/plugins.go | 174 +
.../kubernetes/pkg/kubelet/volume/testing.go | 92 +
.../kubernetes/pkg/kubelet/volume/volume.go | 59 +
.../kubernetes/pkg/kubelet/volumes.go | 154 +
.../kubernetes/pkg/master/master.go | 86 +-
.../kubernetes/pkg/master/pod_cache.go | 4 +-
.../kubernetes/pkg/master/pod_cache_test.go | 63 +-
.../kubernetes/pkg/proxy/config/config.go | 8 +-
.../pkg/registry/controller/rest.go | 2 +-
.../kubernetes/pkg/registry/etcd/etcd_test.go | 12 +-
.../kubernetes/pkg/registry/event/rest.go | 4 +-
.../pkg/registry/event/rest_test.go | 24 +-
.../pkg/registry/minion/healthy_registry.go | 12 +-
.../pkg/registry/pod/bound_pod_factory.go | 37 +-
.../registry/pod/bound_pod_factory_test.go | 264 +-
.../kubernetes/pkg/registry/pod/rest.go | 2 +-
.../pkg/registry/service/rest_test.go | 10 +-
.../kubernetes/pkg/runtime/scheme.go | 3 +
.../kubernetes/pkg/scheduler/listers.go | 39 +
.../kubernetes/pkg/scheduler/predicates.go | 133 +-
.../pkg/scheduler/predicates_test.go | 188 +-
.../kubernetes/pkg/scheduler/priorities.go | 44 +
.../pkg/scheduler/priorities_test.go | 100 +
.../kubernetes/pkg/scheduler/spreading.go | 118 +-
.../pkg/scheduler/spreading_test.go | 222 +-
.../pkg/service/endpoints_controller.go | 10 +-
.../kubernetes/pkg/tools/etcd_tools_watch.go | 31 +-
.../pkg/tools/etcd_tools_watch_test.go | 53 +-
.../kubernetes/pkg/types/doc.go | 18 +
.../kubernetes/pkg/types/uid.go | 22 +
.../kubernetes/pkg/util/errors/errors.go | 2 +-
.../mounter_unsupported.go => util/flags.go} | 25 +-
.../kubernetes/pkg/util/logs.go | 3 +-
.../kubernetes/pkg/util/net_test.go | 3 +-
.../util/{plog_import.go => pflag_import.go} | 34 +-
.../kubernetes/pkg/util/set.go | 8 +-
.../kubernetes/pkg/util/set_test.go | 23 +
.../kubernetes/pkg/util/uuid.go | 9 +-
.../kubernetes/pkg/version/base.go | 4 +-
.../kubernetes/pkg/version/verflag/verflag.go | 7 +-
.../kubernetes/pkg/volume/mounter_linux.go | 89 -
.../kubernetes/pkg/volume/volume.go | 438 -
.../kubernetes/pkg/volume/volume_test.go | 295 -
.../kubernetes/pkg/watch/mux.go | 60 +-
.../kubernetes/pkg/watch/mux_test.go | 55 +-
.../algorithmprovider/affinity/affinity.go | 55 +
.../algorithmprovider/defaults/defaults.go | 10 +-
.../scheduler/algorithmprovider/plugins.go | 1 +
.../algorithmprovider/plugins_test.go | 10 +-
.../plugin/pkg/scheduler/factory/factory.go | 30 +-
.../plugin/pkg/scheduler/factory/plugins.go | 60 +-
.../pkg/scheduler/factory/plugins_test.go | 41 +
.../plugin/pkg/scheduler/scheduler.go | 6 +-
.../src/github.com/coreos/etcd/mod/mod.go | 8 +-
.../github.com/davecgh/go-spew/spew/dump.go | 6 +
.../davecgh/go-spew/spew/dump_test.go | 8 +
.../github.com/davecgh/go-spew/spew/format.go | 6 +
.../davecgh/go-spew/spew/format_test.go | 5 +
.../docker/docker/pkg/archive/archive.go | 48 +-
.../docker/docker/pkg/archive/archive_test.go | 8 +-
.../docker/docker/pkg/archive/changes_test.go | 2 +-
.../docker/docker/pkg/archive/diff.go | 40 +-
.../docker/docker/pkg/archive/utils_test.go | 3 +-
.../docker/docker/pkg/units/size.go | 2 +-
.../docker/docker/pkg/units/size_test.go | 6 +-
.../fsouza/go-dockerclient/.travis.yml | 1 +
.../github.com/fsouza/go-dockerclient/AUTHORS | 5 +
.../fsouza/go-dockerclient/README.markdown | 2 +-
.../fsouza/go-dockerclient/client.go | 18 +-
.../fsouza/go-dockerclient/container.go | 1 +
.../github.com/fsouza/go-dockerclient/exec.go | 48 +
.../fsouza/go-dockerclient/exec_test.go | 131 +
.../github.com/fsouza/go-dockerclient/tar.go | 6 +-
.../go-dockerclient/testing/bin/fmtpolice | 2 +-
.../fsouza/go-dockerclient/testing/server.go | 53 +-
.../go-dockerclient/testing/server_test.go | 80 +
.../github.com/fsouza/go-dockerclient/tls.go | 100 +
.../github.com/golang/protobuf/proto/Makefile | 43 +
.../golang/protobuf/proto/all_test.go | 2059 ++
.../github.com/golang/protobuf/proto/clone.go | 197 +
.../golang/protobuf/proto/clone_test.go | 227 +
.../golang/protobuf/proto/decode.go | 823 +
.../golang/protobuf/proto/encode.go | 1283 ++
.../github.com/golang/protobuf/proto/equal.go | 256 +
.../golang/protobuf/proto/equal_test.go | 191 +
.../golang/protobuf/proto/extensions.go | 353 +
.../golang/protobuf/proto/extensions_test.go | 137 +
.../github.com/golang/protobuf/proto/lib.go | 751 +
.../golang/protobuf/proto/message_set.go | 287 +
.../golang/protobuf/proto/message_set_test.go | 66 +
.../golang/protobuf/proto/pointer_reflect.go | 479 +
.../golang/protobuf/proto/pointer_unsafe.go | 266 +
.../golang/protobuf/proto/properties.go | 724 +
.../protobuf/proto/proto3_proto/Makefile | 44 +
.../protobuf/proto/proto3_proto/proto3.proto | 58 +
.../golang/protobuf/proto/proto3_test.go | 93 +
.../golang/protobuf/proto/size2_test.go | 63 +
.../golang/protobuf/proto/size_test.go | 135 +
.../golang/protobuf/proto/testdata/Makefile | 50 +
.../protobuf/proto/testdata/golden_test.go | 86 +
.../golang/protobuf/proto/testdata/test.pb.go | 2389 +++
.../golang/protobuf/proto/testdata/test.proto | 434 +
.../github.com/golang/protobuf/proto/text.go | 789 +
.../golang/protobuf/proto/text_parser.go | 757 +
.../golang/protobuf/proto/text_parser_test.go | 509 +
.../golang/protobuf/proto/text_test.go | 436 +
.../src/github.com/spf13/pflag/bool.go | 9 +
.../src/github.com/spf13/pflag/bool_test.go | 163 +
.../github.com/spf13/pflag/example_test.go | 4 +
.../src/github.com/spf13/pflag/flag.go | 4 +-
.../src/github.com/spf13/pflag/flag_test.go | 4 +
.../src/golang.org/x/oauth2/.travis.yml | 14 +
.../src/golang.org/x/oauth2/AUTHORS | 3 +
.../src/golang.org/x/oauth2/CONTRIBUTING.md | 25 +
.../src/golang.org/x/oauth2/CONTRIBUTORS | 3 +
.../src/golang.org/x/oauth2/LICENSE | 27 +
.../src/golang.org/x/oauth2/README.md | 18 +
.../golang.org/x/oauth2/client_appengine.go | 39 +
.../src/golang.org/x/oauth2/example_test.go | 50 +
.../src/golang.org/x/oauth2/github/github.go | 16 +
.../golang.org/x/oauth2/google/appengine.go | 37 +
.../golang.org/x/oauth2/google/appenginevm.go | 36 +
.../x/oauth2/google/example_test.go | 133 +
.../src/golang.org/x/oauth2/google/google.go | 103 +
.../x/oauth2/google/source_appengine.go | 71 +
.../golang.org/x/oauth2/internal/oauth2.go | 37 +
.../src/golang.org/x/oauth2/jws/jws.go | 160 +
.../golang.org/x/oauth2/jwt/example_test.go | 31 +
.../src/golang.org/x/oauth2/jwt/jwt.go | 146 +
.../src/golang.org/x/oauth2/jwt/jwt_test.go | 134 +
.../src/golang.org/x/oauth2/oauth2.go | 462 +
.../src/golang.org/x/oauth2/oauth2_test.go | 260 +
.../src/golang.org/x/oauth2/token.go | 99 +
.../src/golang.org/x/oauth2/token_test.go | 30 +
.../src/golang.org/x/oauth2/transport.go | 138 +
.../src/golang.org/x/oauth2/transport_test.go | 53 +
.../google.golang.org/appengine/.travis.yml | 14 +
.../src/google.golang.org/appengine/LICENSE | 202 +
.../src/google.golang.org/appengine/README.md | 65 +
.../google.golang.org/appengine/appengine.go | 78 +
.../appengine/appengine_test.go | 45 +
.../appengine/channel/channel.go | 81 +
.../appengine/channel/channel_test.go | 17 +
.../appengine/datastore/datastore.go | 405 +
.../appengine/datastore/datastore_test.go | 1499 ++
.../appengine/datastore/doc.go | 316 +
.../appengine/datastore/key.go | 309 +
.../appengine/datastore/key_test.go | 214 +
.../appengine/datastore/load.go | 334 +
.../appengine/datastore/prop.go | 294 +
.../appengine/datastore/prop_test.go | 559 +
.../appengine/datastore/query.go | 712 +
.../appengine/datastore/query_test.go | 580 +
.../appengine/datastore/save.go | 300 +
.../appengine/datastore/time_test.go | 65 +
.../appengine/datastore/transaction.go | 138 +
.../appengine/delay/delay.go | 275 +
.../appengine/delay/delay_test.go | 307 +
.../appengine/demos/guestbook/app.yaml | 19 +
.../appengine/demos/guestbook/favicon.ico | Bin 0 -> 1150 bytes
.../appengine/demos/guestbook/guestbook.go | 102 +
.../appengine/demos/guestbook/index.yaml | 7 +
.../demos/guestbook/templates/guestbook.html | 26 +
.../appengine/demos/helloworld/app.yaml | 15 +
.../appengine/demos/helloworld/favicon.ico | Bin 0 -> 1150 bytes
.../appengine/demos/helloworld/helloworld.go | 45 +
.../src/google.golang.org/appengine/errors.go | 46 +
.../google.golang.org/appengine/file/file.go | 26 +
.../google.golang.org/appengine/identity.go | 141 +
.../appengine/image/image.go | 65 +
.../appengine/internal/aetesting/fake.go | 88 +
.../appengine/internal/api.go | 589 +
.../appengine/internal/api_race_test.go | 5 +
.../appengine/internal/api_test.go | 412 +
.../appengine/internal/app_id.go | 28 +
.../appengine/internal/app_id_test.go | 34 +
.../app_identity/app_identity_service.pb.go | 295 +
.../app_identity/app_identity_service.proto | 64 +
.../appengine/internal/base/api_base.pb.go | 134 +
.../appengine/internal/base/api_base.proto | 33 +
.../internal/channel/channel_service.pb.go | 153 +
.../internal/channel/channel_service.proto | 30 +
.../internal/datastore/datastore_v3.pb.go | 2787 +++
.../internal/datastore/datastore_v3.proto | 541 +
.../appengine/internal/identity.go | 12 +
.../appengine/internal/identity_vm.go | 85 +
.../internal/image/images_service.pb.go | 848 +
.../internal/image/images_service.proto | 162 +
.../appengine/internal/internal.go | 165 +
.../appengine/internal/internal_test.go | 54 +
.../appengine/internal/log/log_service.pb.go | 898 +
.../appengine/internal/log/log_service.proto | 150 +
.../internal/mail/mail_service.pb.go | 228 +
.../internal/mail/mail_service.proto | 45 +
.../internal/memcache/memcache_service.pb.go | 942 +
.../internal/memcache/memcache_service.proto | 165 +
.../appengine/internal/metadata.go | 61 +
.../internal/modules/modules_service.pb.go | 374 +
.../internal/modules/modules_service.proto | 80 +
.../appengine/internal/net.go | 63 +
.../appengine/internal/net_test.go | 55 +
.../appengine/internal/regen.sh | 36 +
.../internal/remote_api/remote_api.pb.go | 230 +
.../internal/remote_api/remote_api.proto | 44 +
.../appengine/internal/search/search.pb.go | 2072 ++
.../appengine/internal/search/search.proto | 376 +
.../taskqueue/taskqueue_service.pb.go | 1890 ++
.../taskqueue/taskqueue_service.proto | 342 +
.../appengine/internal/transaction.go | 30 +
.../internal/urlfetch/urlfetch_service.pb.go | 355 +
.../internal/urlfetch/urlfetch_service.proto | 64 +
.../internal/user/user_service.pb.go | 288 +
.../internal/user/user_service.proto | 58 +
.../internal/xmpp/xmpp_service.pb.go | 428 +
.../internal/xmpp/xmpp_service.proto | 83 +
.../google.golang.org/appengine/log/log.go | 322 +
.../appengine/log/log_test.go | 108 +
.../google.golang.org/appengine/mail/mail.go | 123 +
.../appengine/mail/mail_test.go | 65 +
.../appengine/memcache/memcache.go | 525 +
.../appengine/memcache/memcache_test.go | 255 +
.../appengine/module/module.go | 112 +
.../appengine/module/module_test.go | 124 +
.../google.golang.org/appengine/namespace.go | 48 +
.../appengine/namespace_test.go | 33 +
.../appengine/remote_api/client.go | 173 +
.../appengine/remote_api/client_test.go | 20 +
.../appengine/remote_api/remote_api.go | 142 +
.../appengine/search/field.go | 144 +
.../appengine/search/search.go | 853 +
.../appengine/search/search_test.go | 650 +
.../appengine/taskqueue/taskqueue.go | 493 +
.../appengine/taskqueue/taskqueue_test.go | 103 +
.../google.golang.org/appengine/timeout.go | 49 +
.../appengine/timeout_test.go | 59 +
.../appengine/urlfetch/urlfetch.go | 211 +
.../google.golang.org/appengine/user/oauth.go | 46 +
.../google.golang.org/appengine/user/user.go | 103 +
.../appengine/user/user_test.go | 97 +
.../google.golang.org/appengine/xmpp/xmpp.go | 251 +
.../appengine/xmpp/xmpp_test.go | 173 +
.../cloud/compute/metadata/go13.go | 37 +
.../cloud/compute/metadata/metadata.go | 267 +
.../google.golang.org/cloud/internal/cloud.go | 128 +
.../internal/datastore/datastore_v1.pb.go | 1633 ++
.../internal/datastore/datastore_v1.proto | 594 +
.../cloud/internal/testutil/context.go | 57 +
384 files changed, 83742 insertions(+), 2489 deletions(-)
delete mode 100644 Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
create mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-api.json
create mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-gen.go
create mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-api.json
create mode 100644 Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-gen.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/mux_helper.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/factory_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resize.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/stop.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/run_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop_test.go
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{ => kubelet}/volume/doc.go (100%)
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd_test.go
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{volume => kubelet/volume/gce_pd}/gce_util.go (78%)
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{volume => kubelet/volume/gce_pd}/gce_util_test.go (98%)
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util.go
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{volume/mount_utils.go => kubelet/volume/gce_pd/mount_util_linux.go} (98%)
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{volume/mount_utils_windows.go => kubelet/volume/gce_pd/mount_util_unsupported.go} (95%)
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/plugins.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/testing.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/volume.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volumes.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/doc.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/uid.go
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/{volume/mounter_unsupported.go => util/flags.go} (58%)
rename Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/{plog_import.go => pflag_import.go} (74%)
delete mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_linux.go
delete mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume.go
delete mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume_test.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity/affinity.go
create mode 100644 Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins_test.go
create mode 100644 Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
create mode 100644 Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
create mode 100644 Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/README.md
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/token.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/transport.go
create mode 100644 Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/.travis.yml
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/LICENSE
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/README.md
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/appengine.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/appengine_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/channel/channel.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/channel/channel_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/doc.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/key.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/key_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/load.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/prop.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/prop_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/query.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/query_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/save.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/time_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/datastore/transaction.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/delay/delay.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/delay/delay_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/app.yaml
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/favicon.ico
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/guestbook.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/index.yaml
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/templates/guestbook.html
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/app.yaml
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/favicon.ico
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/helloworld.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/errors.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/file/file.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/identity.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/image/image.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/aetesting/fake.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/api.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/api_race_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/api_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/app_id.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/app_id_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/identity.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/identity_vm.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/internal.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/internal_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/metadata.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/net.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/net_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/regen.sh
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/transaction.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/log/log.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/log/log_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/mail/mail.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/mail/mail_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/module/module.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/module/module_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/namespace.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/namespace_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/remote_api/client.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/remote_api/client_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/remote_api/remote_api.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/search/field.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/search/search.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/search/search_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/timeout.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/timeout_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/urlfetch/urlfetch.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/user/oauth.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/user/user.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/user/user_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp.go
create mode 100644 Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp_test.go
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto
create mode 100644 Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go
diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index f6a126e1d2de..090fbc291a1b 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -10,11 +10,6 @@
"Comment": "null-12",
"Rev": "7dda39b2e7d5e265014674c5af696ba4186679e9"
},
- {
- "ImportPath": "code.google.com/p/goauth2/compute/serviceaccount",
- "Comment": "weekly-50",
- "Rev": "7fc9d958c83464bd7650240569bf93a102266e6a"
- },
{
"ImportPath": "code.google.com/p/goauth2/oauth",
"Comment": "weekly-50",
@@ -22,243 +17,243 @@
},
{
"ImportPath": "code.google.com/p/google-api-go-client/compute/v1",
- "Comment": "release-105",
- "Rev": "98c78185197025f935947caac56a7b6d022f89d2"
+ "Comment": "release-107",
+ "Rev": "6ddfebb10ece847f1ae09c701834f1b15abbd8b2"
},
{
"ImportPath": "code.google.com/p/google-api-go-client/container/v1beta1",
- "Comment": "release-105",
- "Rev": "98c78185197025f935947caac56a7b6d022f89d2"
+ "Comment": "release-107",
+ "Rev": "6ddfebb10ece847f1ae09c701834f1b15abbd8b2"
},
{
"ImportPath": "code.google.com/p/google-api-go-client/googleapi",
- "Comment": "release-105",
- "Rev": "98c78185197025f935947caac56a7b6d022f89d2"
+ "Comment": "release-107",
+ "Rev": "6ddfebb10ece847f1ae09c701834f1b15abbd8b2"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/admission",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/api",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authenticator",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/authorizer",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/handlers",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/auth/user",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/client",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/constraint",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/controller",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/conversion",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/health",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/healthz",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/httplog",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/labels",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/master",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/proxy",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/binding",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/endpoint",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/generic",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/service",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/tools",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
+ },
+ {
+ "ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/types",
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/ui",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/util",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/version",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
- },
- {
- "ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/volume",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/pkg/watch",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/admission/admit",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/auth/authenticator/token/tokenfile",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler",
- "Comment": "v0.8.0-276-gdbe1bd9",
- "Rev": "dbe1bd9b158ee047f4307ab59ee38652b2cd7c76"
+ "Comment": "v0.8.0-607-ge0acd75",
+ "Rev": "e0acd75629ec29bde764bcde29367146ae8b389b"
},
{
"ImportPath": "github.com/RangelReale/osin",
@@ -400,7 +395,7 @@
},
{
"ImportPath": "github.com/davecgh/go-spew/spew",
- "Rev": "83f84dc933714d51504ceed59f43ead21d096fe7"
+ "Rev": "1aaf839fb07e099361e445273993ccd9adc21b07"
},
{
"ImportPath": "github.com/docker/docker/dockerversion",
@@ -409,13 +404,13 @@
},
{
"ImportPath": "github.com/docker/docker/pkg/archive",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/fileutils",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/httputils",
@@ -424,23 +419,23 @@
},
{
"ImportPath": "github.com/docker/docker/pkg/ioutils",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/pools",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/promise",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/system",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/pkg/tarsum",
@@ -459,8 +454,8 @@
},
{
"ImportPath": "github.com/docker/docker/pkg/units",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/docker/docker/utils",
@@ -469,8 +464,8 @@
},
{
"ImportPath": "github.com/docker/docker/vendor/src/code.google.com/p/go/src/pkg/archive/tar",
- "Comment": "v1.4.1-108-g364720b",
- "Rev": "364720b5e7e725cdc466171de873eefdb8609a33"
+ "Comment": "v1.4.1-656-g2115131",
+ "Rev": "211513156dc1ace48e630b4bf4ea0fcfdc8d9abf"
},
{
"ImportPath": "github.com/elazarl/go-bindata-assetfs",
@@ -483,8 +478,8 @@
},
{
"ImportPath": "github.com/fsouza/go-dockerclient",
- "Comment": "0.2.1-334-g9c377ff",
- "Rev": "9c377ffd9aed48a012adf1c3fd517fe98394120b"
+ "Comment": "0.2.1-357-gd197177",
+ "Rev": "d19717788084716e4adff0515be6289aa04bec46"
},
{
"ImportPath": "github.com/getsentry/raven-go",
@@ -498,6 +493,10 @@
"ImportPath": "github.com/golang/glog",
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
},
+ {
+ "ImportPath": "github.com/golang/protobuf/proto",
+ "Rev": "7f07925444bb51fa4cf9dfe6f7661876f8852275"
+ },
{
"ImportPath": "github.com/google/cadvisor/client",
"Comment": "0.6.2",
@@ -557,7 +556,7 @@
},
{
"ImportPath": "github.com/spf13/pflag",
- "Rev": "463bdc838f2b35e9307e91d480878bda5fff7232"
+ "Rev": "f82776d6cc998e3c026baef7b24409ff49fe5c8d"
},
{
"ImportPath": "golang.org/x/net/context",
@@ -571,6 +570,22 @@
"ImportPath": "golang.org/x/net/websocket",
"Rev": "cbcac7bb8415db9b6cb4d1ebab1dc9afbd688b97"
},
+ {
+ "ImportPath": "golang.org/x/oauth2",
+ "Rev": "2e66694fea36dc820636630792a55cdc6987e05b"
+ },
+ {
+ "ImportPath": "google.golang.org/appengine",
+ "Rev": "6aa67407028217c352e215f5af320a429d0bcf5f"
+ },
+ {
+ "ImportPath": "google.golang.org/cloud/compute/metadata",
+ "Rev": "2e43671e4ad874a7bca65746ff3edb38e6e93762"
+ },
+ {
+ "ImportPath": "google.golang.org/cloud/internal",
+ "Rev": "2e43671e4ad874a7bca65746ff3edb38e6e93762"
+ },
{
"ImportPath": "gopkg.in/yaml.v2",
"Rev": "d466437aa4adc35830964cffc5b5f262c63ddcb4"
diff --git a/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go b/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
deleted file mode 100644
index ed3e10cf5f9c..000000000000
--- a/Godeps/_workspace/src/code.google.com/p/goauth2/compute/serviceaccount/serviceaccount.go
+++ /dev/null
@@ -1,172 +0,0 @@
-// Copyright 2013 The goauth2 Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package serviceaccount provides support for making OAuth2-authorized
-// HTTP requests from Google Compute Engine instances using service accounts.
-//
-// See: https://developers.google.com/compute/docs/authentication
-//
-// Example usage:
-//
-// client, err := serviceaccount.NewClient(&serviceaccount.Options{})
-// if err != nil {
-// c.Errorf("failed to create service account client: %q", err)
-// return err
-// }
-// client.Post("https://www.googleapis.com/compute/...", ...)
-// client.Post("https://www.googleapis.com/bigquery/...", ...)
-//
-package serviceaccount
-
-import (
- "encoding/json"
- "net/http"
- "net/url"
- "path"
- "sync"
- "time"
-
- "code.google.com/p/goauth2/oauth"
-)
-
-const (
- metadataServer = "metadata"
- serviceAccountPath = "/computeMetadata/v1/instance/service-accounts"
-)
-
-// Options configures a service account Client.
-type Options struct {
- // Underlying transport of service account Client.
- // If nil, http.DefaultTransport is used.
- Transport http.RoundTripper
-
- // Service account name.
- // If empty, "default" is used.
- Account string
-}
-
-// NewClient returns an *http.Client authorized with the service account
-// configured in the Google Compute Engine instance.
-func NewClient(opt *Options) (*http.Client, error) {
- tr := http.DefaultTransport
- account := "default"
- if opt != nil {
- if opt.Transport != nil {
- tr = opt.Transport
- }
- if opt.Account != "" {
- account = opt.Account
- }
- }
- t := &transport{
- Transport: tr,
- Account: account,
- }
- // Get the initial access token.
- if _, err := fetchToken(t); err != nil {
- return nil, err
- }
- return &http.Client{
- Transport: t,
- }, nil
-}
-
-type tokenData struct {
- AccessToken string `json:"access_token"`
- ExpiresIn float64 `json:"expires_in"`
- TokenType string `json:"token_type"`
-}
-
-// transport is an oauth.Transport with a custom Refresh and RoundTrip implementation.
-type transport struct {
- Transport http.RoundTripper
- Account string
-
- mu sync.Mutex
- *oauth.Token
-}
-
-// Refresh renews the transport's AccessToken.
-// t.mu sould be held when this is called.
-func (t *transport) refresh() error {
- // https://developers.google.com/compute/docs/metadata#transitioning
- // v1 requires "Metadata-Flavor: Google" header.
- tokenURL := &url.URL{
- Scheme: "http",
- Host: metadataServer,
- Path: path.Join(serviceAccountPath, t.Account, "token"),
- }
- req, err := http.NewRequest("GET", tokenURL.String(), nil)
- if err != nil {
- return err
- }
- req.Header.Add("Metadata-Flavor", "Google")
- resp, err := http.DefaultClient.Do(req)
- if err != nil {
- return err
- }
- defer resp.Body.Close()
- d := json.NewDecoder(resp.Body)
- var token tokenData
- err = d.Decode(&token)
- if err != nil {
- return err
- }
- t.Token = &oauth.Token{
- AccessToken: token.AccessToken,
- Expiry: time.Now().Add(time.Duration(token.ExpiresIn) * time.Second),
- }
- return nil
-}
-
-// Refresh renews the transport's AccessToken.
-func (t *transport) Refresh() error {
- t.mu.Lock()
- defer t.mu.Unlock()
- return t.refresh()
-}
-
-// Fetch token from cache or generate a new one if cache miss or expired.
-func fetchToken(t *transport) (*oauth.Token, error) {
- // Get a new token using Refresh in case of a cache miss of if it has expired.
- t.mu.Lock()
- defer t.mu.Unlock()
- if t.Token == nil || t.Expired() {
- if err := t.refresh(); err != nil {
- return nil, err
- }
- }
- return t.Token, nil
-}
-
-// cloneRequest returns a clone of the provided *http.Request.
-// The clone is a shallow copy of the struct and its Header map.
-func cloneRequest(r *http.Request) *http.Request {
- // shallow copy of the struct
- r2 := new(http.Request)
- *r2 = *r
- // deep copy of the Header
- r2.Header = make(http.Header)
- for k, s := range r.Header {
- r2.Header[k] = s
- }
- return r2
-}
-
-// RoundTrip issues an authorized HTTP request and returns its response.
-func (t *transport) RoundTrip(req *http.Request) (*http.Response, error) {
- token, err := fetchToken(t)
- if err != nil {
- return nil, err
- }
-
- // To set the Authorization header, we must make a copy of the Request
- // so that we don't modify the Request we were given.
- // This is required by the specification of http.RoundTripper.
- newReq := cloneRequest(req)
- newReq.Header.Set("Authorization", "Bearer "+token.AccessToken)
-
- // Make the HTTP request.
- return t.Transport.RoundTrip(newReq)
-}
diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-api.json b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-api.json
new file mode 100644
index 000000000000..726a0ac363d3
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-api.json
@@ -0,0 +1,9526 @@
+{
+ "kind": "discovery#restDescription",
+ "etag": "\"l66ggWbucbkBw9Lpos72oziyefE/qp3DHGvWPpREzEdWk7WwxnpgC9w\"",
+ "discoveryVersion": "v1",
+ "id": "compute:v1",
+ "name": "compute",
+ "version": "v1",
+ "revision": "20141014",
+ "title": "Compute Engine API",
+ "description": "API for the Google Compute Engine service.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "https://www.google.com/images/icons/product/compute_engine-16.png",
+ "x32": "https://www.google.com/images/icons/product/compute_engine-32.png"
+ },
+ "documentationLink": "https://developers.google.com/compute/docs/reference/latest/",
+ "protocol": "rest",
+ "baseUrl": "https://www.googleapis.com/compute/v1/projects/",
+ "basePath": "/compute/v1/projects/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "compute/v1/projects/",
+ "batchPath": "batch",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/compute": {
+ "description": "View and manage your Google Compute Engine resources"
+ },
+ "https://www.googleapis.com/auth/compute.readonly": {
+ "description": "View your Google Compute Engine resources"
+ },
+ "https://www.googleapis.com/auth/devstorage.full_control": {
+ "description": "Manage your data and permissions in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_only": {
+ "description": "View your data in Google Cloud Storage"
+ },
+ "https://www.googleapis.com/auth/devstorage.read_write": {
+ "description": "Manage your data in Google Cloud Storage"
+ }
+ }
+ }
+ },
+ "schemas": {
+ "AccessConfig": {
+ "id": "AccessConfig",
+ "type": "object",
+ "description": "An access configuration attached to an instance's network interface.",
+ "properties": {
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#accessConfig"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of this access configuration."
+ },
+ "natIP": {
+ "type": "string",
+ "description": "An external IP address associated with this instance. Specify an unused static IP address available to the project. If not specified, the external IP will be drawn from a shared ephemeral pool."
+ },
+ "type": {
+ "type": "string",
+ "description": "Type of configuration. Must be set to \"ONE_TO_ONE_NAT\". This configures port-for-port NAT to the internet.",
+ "default": "ONE_TO_ONE_NAT",
+ "enum": [
+ "ONE_TO_ONE_NAT"
+ ],
+ "enumDescriptions": [
+ ""
+ ]
+ }
+ }
+ },
+ "Address": {
+ "id": "Address",
+ "type": "object",
+ "description": "A reserved address resource.",
+ "properties": {
+ "address": {
+ "type": "string",
+ "description": "The IP address represented by this resource."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#address"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.addresses.insert"
+ ]
+ }
+ },
+ "region": {
+ "type": "string",
+ "description": "URL of the region where the regional address resides (output only). This field is not applicable to global addresses."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of the address (output only).",
+ "enum": [
+ "IN_USE",
+ "RESERVED"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "users": {
+ "type": "array",
+ "description": "The resources that are using this address resource.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "AddressAggregatedList": {
+ "id": "AddressAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped address lists.",
+ "additionalProperties": {
+ "$ref": "AddressesScopedList",
+ "description": "Name of the scope containing this set of addresses."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#addressAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "AddressList": {
+ "id": "AddressList",
+ "type": "object",
+ "description": "Contains a list of address resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The address resources.",
+ "items": {
+ "$ref": "Address"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#addressList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ }
+ }
+ },
+ "AddressesScopedList": {
+ "id": "AddressesScopedList",
+ "type": "object",
+ "properties": {
+ "addresses": {
+ "type": "array",
+ "description": "List of addresses contained in this scope.",
+ "items": {
+ "$ref": "Address"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of addresses when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "AttachedDisk": {
+ "id": "AttachedDisk",
+ "type": "object",
+ "description": "An instance-attached disk resource.",
+ "properties": {
+ "autoDelete": {
+ "type": "boolean",
+ "description": "Whether the disk will be auto-deleted when the instance is deleted (but not when the disk is detached from the instance)."
+ },
+ "boot": {
+ "type": "boolean",
+ "description": "Indicates that this is a boot disk. VM will use the first partition of the disk for its root filesystem."
+ },
+ "deviceName": {
+ "type": "string",
+ "description": "Persistent disk only; must be unique within the instance when specified. This represents a unique device name that is reflected into the /dev/ tree of a Linux operating system running within the instance. If not specified, a default will be chosen by the system."
+ },
+ "index": {
+ "type": "integer",
+ "description": "A zero-based index to assign to this disk, where 0 is reserved for the boot disk. If not specified, the server will choose an appropriate value (output only).",
+ "format": "int32"
+ },
+ "initializeParams": {
+ "$ref": "AttachedDiskInitializeParams",
+ "description": "Initialization parameters."
+ },
+ "interface": {
+ "type": "string",
+ "enum": [
+ "NVME",
+ "SCSI"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#attachedDisk"
+ },
+ "licenses": {
+ "type": "array",
+ "description": "Public visible licenses.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "mode": {
+ "type": "string",
+ "description": "The mode in which to attach this disk, either \"READ_WRITE\" or \"READ_ONLY\".",
+ "enum": [
+ "READ_ONLY",
+ "READ_WRITE"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "source": {
+ "type": "string",
+ "description": "Persistent disk only; the URL of the persistent disk resource."
+ },
+ "type": {
+ "type": "string",
+ "description": "Type of the disk, either \"SCRATCH\" or \"PERSISTENT\". Note that persistent disks must be created before you can specify them here.",
+ "enum": [
+ "PERSISTENT",
+ "SCRATCH"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ],
+ "annotations": {
+ "required": [
+ "compute.instances.insert"
+ ]
+ }
+ }
+ }
+ },
+ "AttachedDiskInitializeParams": {
+ "id": "AttachedDiskInitializeParams",
+ "type": "object",
+ "description": "Initialization parameters for the new disk (input-only). Can only be specified on the boot disk or local SSDs. Mutually exclusive with 'source'.",
+ "properties": {
+ "diskName": {
+ "type": "string",
+ "description": "Name of the disk (when not provided defaults to the name of the instance)."
+ },
+ "diskSizeGb": {
+ "type": "string",
+ "description": "Size of the disk in base-2 GB.",
+ "format": "int64"
+ },
+ "diskType": {
+ "type": "string",
+ "description": "URL of the disk type resource describing which disk type to use to create the disk; provided by the client when the disk is created."
+ },
+ "sourceImage": {
+ "type": "string",
+ "description": "The source image used to create this disk."
+ }
+ }
+ },
+ "Backend": {
+ "id": "Backend",
+ "type": "object",
+ "description": "Message containing information of one individual backend.",
+ "properties": {
+ "balancingMode": {
+ "type": "string",
+ "description": "The balancing mode of this backend, default is UTILIZATION.",
+ "enum": [
+ "RATE",
+ "UTILIZATION"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "capacityScaler": {
+ "type": "number",
+ "description": "The multiplier (a value between 0 and 1e6) of the max capacity (CPU or RPS, depending on 'balancingMode') the group should serve up to. 0 means the group is totally drained. Default value is 1. Valid range is [0, 1e6].",
+ "format": "float"
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource, which is provided by the client when the resource is created."
+ },
+ "group": {
+ "type": "string",
+ "description": "URL of a zonal Cloud Resource View resource. This resource view defines the list of instances that serve traffic. Member virtual machine instances from each resource view must live in the same zone as the resource view itself. No two backends in a backend service are allowed to use same Resource View resource."
+ },
+ "maxRate": {
+ "type": "integer",
+ "description": "The max RPS of the group. Can be used with either balancing mode, but required if RATE mode. For RATE mode, either maxRate or maxRatePerInstance must be set.",
+ "format": "int32"
+ },
+ "maxRatePerInstance": {
+ "type": "number",
+ "description": "The max RPS that a single backed instance can handle. This is used to calculate the capacity of the group. Can be used in either balancing mode. For RATE mode, either maxRate or maxRatePerInstance must be set.",
+ "format": "float"
+ },
+ "maxUtilization": {
+ "type": "number",
+ "description": "Used when 'balancingMode' is UTILIZATION. This ratio defines the CPU utilization target for the group. The default is 0.8. Valid range is [0, 1].",
+ "format": "float"
+ }
+ }
+ },
+ "BackendService": {
+ "id": "BackendService",
+ "type": "object",
+ "description": "A BackendService resource. This resource defines a group of backend VMs together with their serving capacity.",
+ "properties": {
+ "backends": {
+ "type": "array",
+ "description": "The list of backends that serve this BackendService.",
+ "items": {
+ "$ref": "Backend"
+ }
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "fingerprint": {
+ "type": "string",
+ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a BackendService. An up-to-date fingerprint must be provided in order to update the BackendService.",
+ "format": "byte"
+ },
+ "healthChecks": {
+ "type": "array",
+ "description": "The list of URLs to the HttpHealthCheck resource for health checking this BackendService. Currently at most one health check can be specified, and a health check is required.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#backendService"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "port": {
+ "type": "integer",
+ "description": "Deprecated in favor of port_name. The TCP port to connect on the backend. The default value is 80.",
+ "format": "int32"
+ },
+ "portName": {
+ "type": "string",
+ "description": "Name of backend port. The same name should appear in the resource views referenced by this service. Required."
+ },
+ "protocol": {
+ "type": "string",
+ "enum": [
+ "HTTP"
+ ],
+ "enumDescriptions": [
+ ""
+ ]
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "timeoutSec": {
+ "type": "integer",
+ "description": "How many seconds to wait for the backend before considering it a failed request. Default is 30 seconds.",
+ "format": "int32"
+ }
+ }
+ },
+ "BackendServiceGroupHealth": {
+ "id": "BackendServiceGroupHealth",
+ "type": "object",
+ "properties": {
+ "healthStatus": {
+ "type": "array",
+ "items": {
+ "$ref": "HealthStatus"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#backendServiceGroupHealth"
+ }
+ }
+ },
+ "BackendServiceList": {
+ "id": "BackendServiceList",
+ "type": "object",
+ "description": "Contains a list of BackendService resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The BackendService resources.",
+ "items": {
+ "$ref": "BackendService"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#backendServiceList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "DeprecationStatus": {
+ "id": "DeprecationStatus",
+ "type": "object",
+ "description": "Deprecation status for a public resource.",
+ "properties": {
+ "deleted": {
+ "type": "string",
+ "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DELETED."
+ },
+ "deprecated": {
+ "type": "string",
+ "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to DEPRECATED."
+ },
+ "obsolete": {
+ "type": "string",
+ "description": "An optional RFC3339 timestamp on or after which the deprecation state of this resource will be changed to OBSOLETE."
+ },
+ "replacement": {
+ "type": "string",
+ "description": "A URL of the suggested replacement for the deprecated resource. The deprecated resource and its replacement must be resources of the same kind."
+ },
+ "state": {
+ "type": "string",
+ "description": "The deprecation state. Can be \"DEPRECATED\", \"OBSOLETE\", or \"DELETED\". Operations which create a new resource using a \"DEPRECATED\" resource will return successfully, but with a warning indicating the deprecated resource and recommending its replacement. New uses of \"OBSOLETE\" or \"DELETED\" resources will result in an error.",
+ "enum": [
+ "DELETED",
+ "DEPRECATED",
+ "OBSOLETE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "Disk": {
+ "id": "Disk",
+ "type": "object",
+ "description": "A persistent disk resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#disk"
+ },
+ "licenses": {
+ "type": "array",
+ "description": "Public visible licenses.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.disks.insert"
+ ]
+ }
+ },
+ "options": {
+ "type": "string",
+ "description": "Internal use only."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "sizeGb": {
+ "type": "string",
+ "description": "Size of the persistent disk, specified in GB. This parameter is optional when creating a disk from a disk image or a snapshot, otherwise it is required.",
+ "format": "int64"
+ },
+ "sourceImage": {
+ "type": "string",
+ "description": "The source image used to create this disk."
+ },
+ "sourceImageId": {
+ "type": "string",
+ "description": "The 'id' value of the image used to create this disk. This value may be used to determine whether the disk was created from the current or a previous instance of a given image."
+ },
+ "sourceSnapshot": {
+ "type": "string",
+ "description": "The source snapshot used to create this disk."
+ },
+ "sourceSnapshotId": {
+ "type": "string",
+ "description": "The 'id' value of the snapshot used to create this disk. This value may be used to determine whether the disk was created from the current or a previous instance of a given disk snapshot."
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of disk creation (output only).",
+ "enum": [
+ "CREATING",
+ "FAILED",
+ "READY",
+ "RESTORING"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "type": {
+ "type": "string",
+ "description": "URL of the disk type resource describing which disk type to use to create the disk; provided by the client when the disk is created."
+ },
+ "zone": {
+ "type": "string",
+ "description": "URL of the zone where the disk resides (output only)."
+ }
+ }
+ },
+ "DiskAggregatedList": {
+ "id": "DiskAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped disk lists.",
+ "additionalProperties": {
+ "$ref": "DisksScopedList",
+ "description": "Name of the scope containing this set of disks."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#diskAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "DiskList": {
+ "id": "DiskList",
+ "type": "object",
+ "description": "Contains a list of persistent disk resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The persistent disk resources.",
+ "items": {
+ "$ref": "Disk"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#diskList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "DiskType": {
+ "id": "DiskType",
+ "type": "object",
+ "description": "A disk type resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "defaultDiskSizeGb": {
+ "type": "string",
+ "description": "Server defined default disk size in gb (output only).",
+ "format": "int64"
+ },
+ "deprecated": {
+ "$ref": "DeprecationStatus",
+ "description": "The deprecation status associated with this disk type."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#diskType"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "validDiskSize": {
+ "type": "string",
+ "description": "An optional textual descroption of the valid disk size, e.g., \"10GB-10TB\"."
+ },
+ "zone": {
+ "type": "string",
+ "description": "Url of the zone where the disk type resides (output only)."
+ }
+ }
+ },
+ "DiskTypeAggregatedList": {
+ "id": "DiskTypeAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped disk type lists.",
+ "additionalProperties": {
+ "$ref": "DiskTypesScopedList",
+ "description": "Name of the scope containing this set of disk types."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#diskTypeAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "DiskTypeList": {
+ "id": "DiskTypeList",
+ "type": "object",
+ "description": "Contains a list of disk type resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The disk type resources.",
+ "items": {
+ "$ref": "DiskType"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#diskTypeList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "DiskTypesScopedList": {
+ "id": "DiskTypesScopedList",
+ "type": "object",
+ "properties": {
+ "diskTypes": {
+ "type": "array",
+ "description": "List of disk types contained in this scope.",
+ "items": {
+ "$ref": "DiskType"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of disk types when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "DisksScopedList": {
+ "id": "DisksScopedList",
+ "type": "object",
+ "properties": {
+ "disks": {
+ "type": "array",
+ "description": "List of disks contained in this scope.",
+ "items": {
+ "$ref": "Disk"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of disks when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "Firewall": {
+ "id": "Firewall",
+ "type": "object",
+ "description": "A firewall resource.",
+ "properties": {
+ "allowed": {
+ "type": "array",
+ "description": "The list of rules specified by this firewall. Each rule specifies a protocol and port-range tuple that describes a permitted connection.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "IPProtocol": {
+ "type": "string",
+ "description": "Required; this is the IP protocol that is allowed for this rule. This can either be one of the following well known protocol strings [\"tcp\", \"udp\", \"icmp\", \"esp\", \"ah\", \"sctp\"], or the IP protocol number."
+ },
+ "ports": {
+ "type": "array",
+ "description": "An optional list of ports which are allowed. It is an error to specify this for any protocol that isn't UDP or TCP. Each entry must be either an integer or a range. If not specified, connections through any port are allowed.\n\nExample inputs include: [\"22\"], [\"80\",\"443\"] and [\"12345-12349\"].",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ }
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#firewall"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.firewalls.insert",
+ "compute.firewalls.patch"
+ ]
+ }
+ },
+ "network": {
+ "type": "string",
+ "description": "URL of the network to which this firewall is applied; provided by the client when the firewall is created."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "sourceRanges": {
+ "type": "array",
+ "description": "A list of IP address blocks expressed in CIDR format which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "sourceTags": {
+ "type": "array",
+ "description": "A list of instance tags which this rule applies to. One or both of sourceRanges and sourceTags may be set; an inbound connection is allowed if either the range or the tag of the source matches.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "targetTags": {
+ "type": "array",
+ "description": "A list of instance tags indicating sets of instances located on network which may make network connections as specified in allowed. If no targetTags are specified, the firewall rule applies to all instances on the specified network.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "FirewallList": {
+ "id": "FirewallList",
+ "type": "object",
+ "description": "Contains a list of firewall resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The firewall resources.",
+ "items": {
+ "$ref": "Firewall"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#firewallList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "ForwardingRule": {
+ "id": "ForwardingRule",
+ "type": "object",
+ "description": "A ForwardingRule resource. A ForwardingRule resource specifies which pool of target VMs to forward a packet to if it matches the given [IPAddress, IPProtocol, portRange] tuple.",
+ "properties": {
+ "IPAddress": {
+ "type": "string",
+ "description": "Value of the reserved IP address that this forwarding rule is serving on behalf of. For global forwarding rules, the address must be a global IP; for regional forwarding rules, the address must live in the same region as the forwarding rule. If left empty (default value), an ephemeral IP from the same scope (global or regional) will be assigned."
+ },
+ "IPProtocol": {
+ "type": "string",
+ "description": "The IP protocol to which this rule applies, valid options are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.",
+ "enum": [
+ "AH",
+ "ESP",
+ "SCTP",
+ "TCP",
+ "UDP"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#forwardingRule"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "portRange": {
+ "type": "string",
+ "description": "Applicable only when 'IPProtocol' is 'TCP', 'UDP' or 'SCTP', only packets addressed to ports in the specified range will be forwarded to 'target'. If 'portRange' is left empty (default value), all ports are forwarded. Forwarding rules with the same [IPAddress, IPProtocol] pair must have disjoint port ranges."
+ },
+ "region": {
+ "type": "string",
+ "description": "URL of the region where the regional forwarding rule resides (output only). This field is not applicable to global forwarding rules."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "target": {
+ "type": "string",
+ "description": "The URL of the target resource to receive the matched traffic. For regional forwarding rules, this target must live in the same region as the forwarding rule. For global forwarding rules, this target must be a global TargetHttpProxy resource."
+ }
+ }
+ },
+ "ForwardingRuleAggregatedList": {
+ "id": "ForwardingRuleAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped forwarding rule lists.",
+ "additionalProperties": {
+ "$ref": "ForwardingRulesScopedList",
+ "description": "Name of the scope containing this set of addresses."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#forwardingRuleAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "ForwardingRuleList": {
+ "id": "ForwardingRuleList",
+ "type": "object",
+ "description": "Contains a list of ForwardingRule resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The ForwardingRule resources.",
+ "items": {
+ "$ref": "ForwardingRule"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#forwardingRuleList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "ForwardingRulesScopedList": {
+ "id": "ForwardingRulesScopedList",
+ "type": "object",
+ "properties": {
+ "forwardingRules": {
+ "type": "array",
+ "description": "List of forwarding rules contained in this scope.",
+ "items": {
+ "$ref": "ForwardingRule"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of forwarding rules when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "HealthCheckReference": {
+ "id": "HealthCheckReference",
+ "type": "object",
+ "properties": {
+ "healthCheck": {
+ "type": "string"
+ }
+ }
+ },
+ "HealthStatus": {
+ "id": "HealthStatus",
+ "type": "object",
+ "properties": {
+ "healthState": {
+ "type": "string",
+ "description": "Health state of the instance.",
+ "enum": [
+ "HEALTHY",
+ "UNHEALTHY"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "instance": {
+ "type": "string",
+ "description": "URL of the instance resource."
+ },
+ "ipAddress": {
+ "type": "string",
+ "description": "The IP address represented by this resource."
+ },
+ "port": {
+ "type": "integer",
+ "description": "The port on the instance.",
+ "format": "int32"
+ }
+ }
+ },
+ "HostRule": {
+ "id": "HostRule",
+ "type": "object",
+ "description": "A host-matching rule for a URL. If matched, will use the named PathMatcher to select the BackendService.",
+ "properties": {
+ "description": {
+ "type": "string"
+ },
+ "hosts": {
+ "type": "array",
+ "description": "The list of host patterns to match. They must be valid hostnames except that they may start with *. or *-. The * acts like a glob and will match any string of atoms (separated by .s and -s) to the left.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "pathMatcher": {
+ "type": "string",
+ "description": "The name of the PathMatcher to match the path portion of the URL, if the this HostRule matches the URL's host portion."
+ }
+ }
+ },
+ "HttpHealthCheck": {
+ "id": "HttpHealthCheck",
+ "type": "object",
+ "description": "An HttpHealthCheck resource. This resource defines a template for how individual VMs should be checked for health, via HTTP.",
+ "properties": {
+ "checkIntervalSec": {
+ "type": "integer",
+ "description": "How often (in seconds) to send a health check. The default value is 5 seconds.",
+ "format": "int32"
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "healthyThreshold": {
+ "type": "integer",
+ "description": "A so-far unhealthy VM will be marked healthy after this many consecutive successes. The default value is 2.",
+ "format": "int32"
+ },
+ "host": {
+ "type": "string",
+ "description": "The value of the host header in the HTTP health check request. If left empty (default value), the public IP on behalf of which this health check is performed will be used."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#httpHealthCheck"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "port": {
+ "type": "integer",
+ "description": "The TCP port number for the HTTP health check request. The default value is 80.",
+ "format": "int32"
+ },
+ "requestPath": {
+ "type": "string",
+ "description": "The request path of the HTTP health check request. The default value is \"/\"."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "timeoutSec": {
+ "type": "integer",
+ "description": "How long (in seconds) to wait before claiming failure. The default value is 5 seconds.",
+ "format": "int32"
+ },
+ "unhealthyThreshold": {
+ "type": "integer",
+ "description": "A so-far healthy VM will be marked unhealthy after this many consecutive failures. The default value is 2.",
+ "format": "int32"
+ }
+ }
+ },
+ "HttpHealthCheckList": {
+ "id": "HttpHealthCheckList",
+ "type": "object",
+ "description": "Contains a list of HttpHealthCheck resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The HttpHealthCheck resources.",
+ "items": {
+ "$ref": "HttpHealthCheck"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#httpHealthCheckList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "Image": {
+ "id": "Image",
+ "type": "object",
+ "description": "A disk image resource.",
+ "properties": {
+ "archiveSizeBytes": {
+ "type": "string",
+ "description": "Size of the image tar.gz archive stored in Google Cloud Storage (in bytes).",
+ "format": "int64"
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "deprecated": {
+ "$ref": "DeprecationStatus",
+ "description": "The deprecation status associated with this image."
+ },
+ "description": {
+ "type": "string",
+ "description": "Textual description of the resource; provided by the client when the resource is created."
+ },
+ "diskSizeGb": {
+ "type": "string",
+ "description": "Size of the image when restored onto a disk (in GiB).",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#image"
+ },
+ "licenses": {
+ "type": "array",
+ "description": "Public visible licenses.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.images.insert"
+ ]
+ }
+ },
+ "rawDisk": {
+ "type": "object",
+ "description": "The raw disk image parameters.",
+ "properties": {
+ "containerType": {
+ "type": "string",
+ "description": "The format used to encode and transmit the block device. Should be TAR. This is just a container and transmission format and not a runtime format. Provided by the client when the disk image is created.",
+ "enum": [
+ "TAR"
+ ],
+ "enumDescriptions": [
+ ""
+ ]
+ },
+ "sha1Checksum": {
+ "type": "string",
+ "description": "An optional SHA1 checksum of the disk image before unpackaging; provided by the client when the disk image is created.",
+ "pattern": "[a-f0-9]{40}"
+ },
+ "source": {
+ "type": "string",
+ "description": "The full Google Cloud Storage URL where the disk image is stored; provided by the client when the disk image is created.",
+ "annotations": {
+ "required": [
+ "compute.images.insert"
+ ]
+ }
+ }
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "sourceDisk": {
+ "type": "string",
+ "description": "The source disk used to create this image."
+ },
+ "sourceDiskId": {
+ "type": "string",
+ "description": "The 'id' value of the disk used to create this image. This value may be used to determine whether the image was taken from the current or a previous instance of a given disk name."
+ },
+ "sourceType": {
+ "type": "string",
+ "description": "Must be \"RAW\"; provided by the client when the disk image is created.",
+ "default": "RAW",
+ "enum": [
+ "RAW"
+ ],
+ "enumDescriptions": [
+ ""
+ ]
+ },
+ "status": {
+ "type": "string",
+ "description": "Status of the image (output only). It will be one of the following READY - after image has been successfully created and is ready for use FAILED - if creating the image fails for some reason PENDING - the image creation is in progress An image can be used to create other resources suck as instances only after the image has been successfully created and the status is set to READY.",
+ "enum": [
+ "FAILED",
+ "PENDING",
+ "READY"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "ImageList": {
+ "id": "ImageList",
+ "type": "object",
+ "description": "Contains a list of disk image resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The disk image resources.",
+ "items": {
+ "$ref": "Image"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#imageList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "Instance": {
+ "id": "Instance",
+ "type": "object",
+ "description": "An instance resource.",
+ "properties": {
+ "canIpForward": {
+ "type": "boolean",
+ "description": "Allows this instance to send packets with source IP addresses other than its own and receive packets with destination IP addresses other than its own. If this instance will be used as an IP gateway or it will be set as the next-hop in a Route resource, say true. If unsure, leave this set to false."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "disks": {
+ "type": "array",
+ "description": "Array of disks associated with this instance. Persistent disks must be created before you can assign them.",
+ "items": {
+ "$ref": "AttachedDisk"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#instance"
+ },
+ "machineType": {
+ "type": "string",
+ "description": "URL of the machine type resource describing which machine type to use to host the instance; provided by the client when the instance is created.",
+ "annotations": {
+ "required": [
+ "compute.instances.insert"
+ ]
+ }
+ },
+ "metadata": {
+ "$ref": "Metadata",
+ "description": "Metadata key/value pairs assigned to this instance. Consists of custom metadata or predefined keys; see Instance documentation for more information."
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.instances.insert"
+ ]
+ }
+ },
+ "networkInterfaces": {
+ "type": "array",
+ "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.",
+ "items": {
+ "$ref": "NetworkInterface"
+ }
+ },
+ "scheduling": {
+ "$ref": "Scheduling",
+ "description": "Scheduling options for this instance."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ },
+ "serviceAccounts": {
+ "type": "array",
+ "description": "A list of service accounts each with specified scopes, for which access tokens are to be made available to the instance through metadata queries.",
+ "items": {
+ "$ref": "ServiceAccount"
+ }
+ },
+ "status": {
+ "type": "string",
+ "description": "Instance status. One of the following values: \"PROVISIONING\", \"STAGING\", \"RUNNING\", \"STOPPING\", \"STOPPED\", \"TERMINATED\" (output only).",
+ "enum": [
+ "PROVISIONING",
+ "RUNNING",
+ "STAGING",
+ "STOPPED",
+ "STOPPING",
+ "TERMINATED"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "statusMessage": {
+ "type": "string",
+ "description": "An optional, human-readable explanation of the status (output only)."
+ },
+ "tags": {
+ "$ref": "Tags",
+ "description": "A list of tags to be applied to this instance. Used to identify valid sources or targets for network firewalls. Provided by the client on instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035."
+ },
+ "zone": {
+ "type": "string",
+ "description": "URL of the zone where the instance resides (output only)."
+ }
+ }
+ },
+ "InstanceAggregatedList": {
+ "id": "InstanceAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped instance lists.",
+ "additionalProperties": {
+ "$ref": "InstancesScopedList",
+ "description": "Name of the scope containing this set of instances."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#instanceAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "InstanceList": {
+ "id": "InstanceList",
+ "type": "object",
+ "description": "Contains a list of instance resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "A list of instance resources.",
+ "items": {
+ "$ref": "Instance"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#instanceList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "InstanceProperties": {
+ "id": "InstanceProperties",
+ "type": "object",
+ "description": "",
+ "properties": {
+ "canIpForward": {
+ "type": "boolean",
+ "description": "Allows instances created based on this template to send packets with source IP addresses other than their own and receive packets with destination IP addresses other than their own. If these instances will be used as an IP gateway or it will be set as the next-hop in a Route resource, say true. If unsure, leave this set to false."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description for the instances created based on the instance template resource; provided by the client when the template is created."
+ },
+ "disks": {
+ "type": "array",
+ "description": "Array of disks associated with instance created based on this template.",
+ "items": {
+ "$ref": "AttachedDisk"
+ }
+ },
+ "machineType": {
+ "type": "string",
+ "description": "Name of the machine type resource describing which machine type to use to host the instances created based on this template; provided by the client when the instance template is created.",
+ "annotations": {
+ "required": [
+ "compute.instanceTemplates.insert"
+ ]
+ }
+ },
+ "metadata": {
+ "$ref": "Metadata",
+ "description": "Metadata key/value pairs assigned to instances created based on this template. Consists of custom metadata or predefined keys; see Instance documentation for more information."
+ },
+ "networkInterfaces": {
+ "type": "array",
+ "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instances created based based on this template will have no external internet access.",
+ "items": {
+ "$ref": "NetworkInterface"
+ }
+ },
+ "scheduling": {
+ "$ref": "Scheduling",
+ "description": "Scheduling options for the instances created based on this template."
+ },
+ "serviceAccounts": {
+ "type": "array",
+ "description": "A list of service accounts each with specified scopes, for which access tokens are to be made available to the instances created based on this template, through metadata queries.",
+ "items": {
+ "$ref": "ServiceAccount"
+ }
+ },
+ "tags": {
+ "$ref": "Tags",
+ "description": "A list of tags to be applied to the instances created based on this template used to identify valid sources or targets for network firewalls. Provided by the client on instance creation. The tags can be later modified by the setTags method. Each tag within the list must comply with RFC1035."
+ }
+ }
+ },
+ "InstanceReference": {
+ "id": "InstanceReference",
+ "type": "object",
+ "properties": {
+ "instance": {
+ "type": "string"
+ }
+ }
+ },
+ "InstanceTemplate": {
+ "id": "InstanceTemplate",
+ "type": "object",
+ "description": "An Instance Template resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the instance template resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#instanceTemplate"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the instance template resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.instanceTemplates.insert"
+ ]
+ }
+ },
+ "properties": {
+ "$ref": "InstanceProperties",
+ "description": "The instance properties portion of this instance template resource."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ }
+ }
+ },
+ "InstanceTemplateList": {
+ "id": "InstanceTemplateList",
+ "type": "object",
+ "description": "Contains a list of instance template resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "A list of instance template resources.",
+ "items": {
+ "$ref": "InstanceTemplate"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#instanceTemplateList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "InstancesScopedList": {
+ "id": "InstancesScopedList",
+ "type": "object",
+ "properties": {
+ "instances": {
+ "type": "array",
+ "description": "List of instances contained in this scope.",
+ "items": {
+ "$ref": "Instance"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of instances when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "License": {
+ "id": "License",
+ "type": "object",
+ "description": "A license resource.",
+ "properties": {
+ "chargesUseFee": {
+ "type": "boolean",
+ "description": "If true, the customer will be charged license fee for running software that contains this license on an instance."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#license"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.images.insert"
+ ]
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ }
+ }
+ },
+ "MachineType": {
+ "id": "MachineType",
+ "type": "object",
+ "description": "A machine type resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "deprecated": {
+ "$ref": "DeprecationStatus",
+ "description": "The deprecation status associated with this machine type."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource."
+ },
+ "guestCpus": {
+ "type": "integer",
+ "description": "Count of CPUs exposed to the instance.",
+ "format": "int32"
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "imageSpaceGb": {
+ "type": "integer",
+ "description": "Space allotted for the image, defined in GB.",
+ "format": "int32"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#machineType"
+ },
+ "maximumPersistentDisks": {
+ "type": "integer",
+ "description": "Maximum persistent disks allowed.",
+ "format": "int32"
+ },
+ "maximumPersistentDisksSizeGb": {
+ "type": "string",
+ "description": "Maximum total persistent disks size (GB) allowed.",
+ "format": "int64"
+ },
+ "memoryMb": {
+ "type": "integer",
+ "description": "Physical memory assigned to the instance, defined in MB.",
+ "format": "int32"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "scratchDisks": {
+ "type": "array",
+ "description": "List of extended scratch disks assigned to the instance.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "diskGb": {
+ "type": "integer",
+ "description": "Size of the scratch disk, defined in GB.",
+ "format": "int32"
+ }
+ }
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "zone": {
+ "type": "string",
+ "description": "Url of the zone where the machine type resides (output only)."
+ }
+ }
+ },
+ "MachineTypeAggregatedList": {
+ "id": "MachineTypeAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped machine type lists.",
+ "additionalProperties": {
+ "$ref": "MachineTypesScopedList",
+ "description": "Name of the scope containing this set of machine types."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#machineTypeAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "MachineTypeList": {
+ "id": "MachineTypeList",
+ "type": "object",
+ "description": "Contains a list of machine type resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The machine type resources.",
+ "items": {
+ "$ref": "MachineType"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#machineTypeList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "MachineTypesScopedList": {
+ "id": "MachineTypesScopedList",
+ "type": "object",
+ "properties": {
+ "machineTypes": {
+ "type": "array",
+ "description": "List of machine types contained in this scope.",
+ "items": {
+ "$ref": "MachineType"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of machine types when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "Metadata": {
+ "id": "Metadata",
+ "type": "object",
+ "description": "A metadata key/value entry.",
+ "properties": {
+ "fingerprint": {
+ "type": "string",
+ "description": "Fingerprint of this resource. A hash of the metadata's contents. This field is used for optimistic locking. An up-to-date metadata fingerprint must be provided in order to modify metadata.",
+ "format": "byte"
+ },
+ "items": {
+ "type": "array",
+ "description": "Array of key/value pairs. The total size of all keys and values must be less than 512 KB.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "Key for the metadata entry. Keys must conform to the following regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is reflected as part of a URL in the metadata server. Additionally, to avoid ambiguity, keys must not conflict with any other metadata keys for the project.",
+ "pattern": "[a-zA-Z0-9-_]{1,128}",
+ "annotations": {
+ "required": [
+ "compute.instances.insert",
+ "compute.projects.setCommonInstanceMetadata"
+ ]
+ }
+ },
+ "value": {
+ "type": "string",
+ "description": "Value for the metadata entry. These are free-form strings, and only have meaning as interpreted by the image running in the instance. The only restriction placed on values is that their size must be less than or equal to 32768 bytes.",
+ "annotations": {
+ "required": [
+ "compute.instances.insert",
+ "compute.projects.setCommonInstanceMetadata"
+ ]
+ }
+ }
+ }
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#metadata"
+ }
+ }
+ },
+ "Network": {
+ "id": "Network",
+ "type": "object",
+ "description": "A network resource.",
+ "properties": {
+ "IPv4Range": {
+ "type": "string",
+ "description": "Required; The range of internal addresses that are legal on this network. This range is a CIDR specification, for example: 192.168.0.0/16. Provided by the client when the network is created.",
+ "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}/[0-9]{1,2}",
+ "annotations": {
+ "required": [
+ "compute.networks.insert"
+ ]
+ }
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "gatewayIPv4": {
+ "type": "string",
+ "description": "An optional address that is used for default routing to other networks. This must be within the range specified by IPv4Range, and is typically the first usable address in that range. If not specified, the default value is the first usable address in IPv4Range.",
+ "pattern": "[0-9]{1,3}(?:\\.[0-9]{1,3}){3}"
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#network"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.networks.insert"
+ ]
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ }
+ }
+ },
+ "NetworkInterface": {
+ "id": "NetworkInterface",
+ "type": "object",
+ "description": "A network interface resource attached to an instance.",
+ "properties": {
+ "accessConfigs": {
+ "type": "array",
+ "description": "Array of configurations for this interface. This specifies how this interface is configured to interact with other network services, such as connecting to the internet. Currently, ONE_TO_ONE_NAT is the only access config supported. If there are no accessConfigs specified, then this instance will have no external internet access.",
+ "items": {
+ "$ref": "AccessConfig"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the network interface, determined by the server; for network devices, these are e.g. eth0, eth1, etc. (output only)."
+ },
+ "network": {
+ "type": "string",
+ "description": "URL of the network resource attached to this interface.",
+ "annotations": {
+ "required": [
+ "compute.instances.insert"
+ ]
+ }
+ },
+ "networkIP": {
+ "type": "string",
+ "description": "An optional IPV4 internal network address assigned to the instance for this network interface (output only)."
+ }
+ }
+ },
+ "NetworkList": {
+ "id": "NetworkList",
+ "type": "object",
+ "description": "Contains a list of network resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The network resources.",
+ "items": {
+ "$ref": "Network"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#networkList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "Operation": {
+ "id": "Operation",
+ "type": "object",
+ "description": "An operation resource, used to manage asynchronous API requests.",
+ "properties": {
+ "clientOperationId": {
+ "type": "string",
+ "description": "An optional identifier specified by the client when the mutation was initiated. Must be unique for all operation resources in the project (output only)."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "endTime": {
+ "type": "string",
+ "description": "The time that this operation was completed. This is in RFC 3339 format (output only)."
+ },
+ "error": {
+ "type": "object",
+ "description": "If errors occurred during processing of this operation, this field will be populated (output only).",
+ "properties": {
+ "errors": {
+ "type": "array",
+ "description": "The array of errors encountered while processing this operation.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The error type identifier for this error."
+ },
+ "location": {
+ "type": "string",
+ "description": "Indicates the field in the request which caused the error. This property is optional."
+ },
+ "message": {
+ "type": "string",
+ "description": "An optional, human-readable error message."
+ }
+ }
+ }
+ }
+ }
+ },
+ "httpErrorMessage": {
+ "type": "string",
+ "description": "If operation fails, the HTTP error message returned, e.g. NOT FOUND. (output only)."
+ },
+ "httpErrorStatusCode": {
+ "type": "integer",
+ "description": "If operation fails, the HTTP error status code returned, e.g. 404. (output only).",
+ "format": "int32"
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "insertTime": {
+ "type": "string",
+ "description": "The time that this operation was requested. This is in RFC 3339 format (output only)."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#operation"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource (output only)."
+ },
+ "operationType": {
+ "type": "string",
+ "description": "Type of the operation. Examples include \"insert\", \"update\", and \"delete\" (output only)."
+ },
+ "progress": {
+ "type": "integer",
+ "description": "An optional progress indicator that ranges from 0 to 100. There is no requirement that this be linear or support any granularity of operations. This should not be used to guess at when the operation will be complete. This number should be monotonically increasing as the operation progresses (output only).",
+ "format": "int32"
+ },
+ "region": {
+ "type": "string",
+ "description": "URL of the region where the operation resides (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "startTime": {
+ "type": "string",
+ "description": "The time that this operation was started by the server. This is in RFC 3339 format (output only)."
+ },
+ "status": {
+ "type": "string",
+ "description": "Status of the operation. Can be one of the following: \"PENDING\", \"RUNNING\", or \"DONE\" (output only).",
+ "enum": [
+ "DONE",
+ "PENDING",
+ "RUNNING"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ ""
+ ]
+ },
+ "statusMessage": {
+ "type": "string",
+ "description": "An optional textual description of the current status of the operation (output only)."
+ },
+ "targetId": {
+ "type": "string",
+ "description": "Unique target id which identifies a particular incarnation of the target (output only).",
+ "format": "uint64"
+ },
+ "targetLink": {
+ "type": "string",
+ "description": "URL of the resource the operation is mutating (output only)."
+ },
+ "user": {
+ "type": "string",
+ "description": "User who requested the operation, for example \"user@example.com\" (output only)."
+ },
+ "warnings": {
+ "type": "array",
+ "description": "If warning messages generated during processing of this operation, this field will be populated (output only).",
+ "items": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ },
+ "zone": {
+ "type": "string",
+ "description": "URL of the zone where the operation resides (output only)."
+ }
+ }
+ },
+ "OperationAggregatedList": {
+ "id": "OperationAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped operation lists.",
+ "additionalProperties": {
+ "$ref": "OperationsScopedList",
+ "description": "Name of the scope containing this set of operations."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#operationAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "OperationList": {
+ "id": "OperationList",
+ "type": "object",
+ "description": "Contains a list of operation resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The operation resources.",
+ "items": {
+ "$ref": "Operation"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#operationList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "OperationsScopedList": {
+ "id": "OperationsScopedList",
+ "type": "object",
+ "properties": {
+ "operations": {
+ "type": "array",
+ "description": "List of operations contained in this scope.",
+ "items": {
+ "$ref": "Operation"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of operations when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "PathMatcher": {
+ "id": "PathMatcher",
+ "type": "object",
+ "description": "A matcher for the path portion of the URL. The BackendService from the longest-matched rule will serve the URL. If no rule was matched, the default_service will be used.",
+ "properties": {
+ "defaultService": {
+ "type": "string",
+ "description": "The URL to the BackendService resource. This will be used if none of the 'pathRules' defined by this PathMatcher is met by the URL's path portion."
+ },
+ "description": {
+ "type": "string"
+ },
+ "name": {
+ "type": "string",
+ "description": "The name to which this PathMatcher is referred by the HostRule."
+ },
+ "pathRules": {
+ "type": "array",
+ "description": "The list of path rules.",
+ "items": {
+ "$ref": "PathRule"
+ }
+ }
+ }
+ },
+ "PathRule": {
+ "id": "PathRule",
+ "type": "object",
+ "description": "A path-matching rule for a URL. If matched, will use the specified BackendService to handle the traffic arriving at this URL.",
+ "properties": {
+ "paths": {
+ "type": "array",
+ "description": "The list of path patterns to match. Each must start with / and the only place a * is allowed is at the end following a /. The string fed to the path matcher does not include any text after the first ? or #, and those chars are not allowed here.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "service": {
+ "type": "string",
+ "description": "The URL of the BackendService resource if this rule is matched."
+ }
+ }
+ },
+ "Project": {
+ "id": "Project",
+ "type": "object",
+ "description": "A project resource. Projects can be created only in the APIs Console. Unless marked otherwise, values can only be modified in the console.",
+ "properties": {
+ "commonInstanceMetadata": {
+ "$ref": "Metadata",
+ "description": "Metadata key/value pairs available to all instances contained in this project."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#project"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource."
+ },
+ "quotas": {
+ "type": "array",
+ "description": "Quotas assigned to this project.",
+ "items": {
+ "$ref": "Quota"
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "usageExportLocation": {
+ "$ref": "UsageExportLocation",
+ "description": "The location in Cloud Storage and naming method of the daily usage report."
+ }
+ }
+ },
+ "Quota": {
+ "id": "Quota",
+ "type": "object",
+ "description": "A quotas entry.",
+ "properties": {
+ "limit": {
+ "type": "number",
+ "description": "Quota limit for this metric.",
+ "format": "double"
+ },
+ "metric": {
+ "type": "string",
+ "description": "Name of the quota metric.",
+ "enum": [
+ "BACKEND_SERVICES",
+ "CPUS",
+ "DISKS",
+ "DISKS_TOTAL_GB",
+ "EPHEMERAL_ADDRESSES",
+ "FIREWALLS",
+ "FORWARDING_RULES",
+ "HEALTH_CHECKS",
+ "IMAGES",
+ "IMAGES_TOTAL_GB",
+ "INSTANCES",
+ "IN_USE_ADDRESSES",
+ "KERNELS",
+ "KERNELS_TOTAL_GB",
+ "LOCAL_SSD_TOTAL_GB",
+ "NETWORKS",
+ "OPERATIONS",
+ "ROUTES",
+ "SNAPSHOTS",
+ "SSD_TOTAL_GB",
+ "STATIC_ADDRESSES",
+ "TARGET_HTTP_PROXIES",
+ "TARGET_INSTANCES",
+ "TARGET_POOLS",
+ "URL_MAPS"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "usage": {
+ "type": "number",
+ "description": "Current usage of this metric.",
+ "format": "double"
+ }
+ }
+ },
+ "Region": {
+ "id": "Region",
+ "type": "object",
+ "description": "Region resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "deprecated": {
+ "$ref": "DeprecationStatus",
+ "description": "The deprecation status associated with this region."
+ },
+ "description": {
+ "type": "string",
+ "description": "Textual description of the resource."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#region"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource."
+ },
+ "quotas": {
+ "type": "array",
+ "description": "Quotas assigned to this region.",
+ "items": {
+ "$ref": "Quota"
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "status": {
+ "type": "string",
+ "description": "Status of the region, \"UP\" or \"DOWN\".",
+ "enum": [
+ "DOWN",
+ "UP"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "zones": {
+ "type": "array",
+ "description": "A list of zones homed in this region, in the form of resource URLs.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "RegionList": {
+ "id": "RegionList",
+ "type": "object",
+ "description": "Contains a list of region resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The region resources.",
+ "items": {
+ "$ref": "Region"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#regionList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "ResourceGroupReference": {
+ "id": "ResourceGroupReference",
+ "type": "object",
+ "properties": {
+ "group": {
+ "type": "string",
+ "description": "A URI referencing one of the resource views listed in the backend service."
+ }
+ }
+ },
+ "Route": {
+ "id": "Route",
+ "type": "object",
+ "description": "The route resource. A Route is a rule that specifies how certain packets should be handled by the virtual network. Routes are associated with VMs by tag and the set of Routes for a particular VM is called its routing table. For each packet leaving a VM, the system searches that VM's routing table for a single best matching Route. Routes match packets by destination IP address, preferring smaller or more specific ranges over larger ones. If there is a tie, the system selects the Route with the smallest priority value. If there is still a tie, it uses the layer three and four packet headers to select just one of the remaining matching Routes. The packet is then forwarded as specified by the next_hop field of the winning Route -- either to another VM destination, a VM gateway or a GCE operated gateway. Packets that do not match any Route in the sending VM's routing table will be dropped.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "destRange": {
+ "type": "string",
+ "description": "Which packets does this route apply to?",
+ "annotations": {
+ "required": [
+ "compute.routes.insert"
+ ]
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#route"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "annotations": {
+ "required": [
+ "compute.routes.insert"
+ ]
+ }
+ },
+ "network": {
+ "type": "string",
+ "description": "URL of the network to which this route is applied; provided by the client when the route is created.",
+ "annotations": {
+ "required": [
+ "compute.routes.insert"
+ ]
+ }
+ },
+ "nextHopGateway": {
+ "type": "string",
+ "description": "The URL to a gateway that should handle matching packets."
+ },
+ "nextHopInstance": {
+ "type": "string",
+ "description": "The URL to an instance that should handle matching packets."
+ },
+ "nextHopIp": {
+ "type": "string",
+ "description": "The network IP address of an instance that should handle matching packets."
+ },
+ "nextHopNetwork": {
+ "type": "string",
+ "description": "The URL of the local network if it should handle matching packets."
+ },
+ "priority": {
+ "type": "integer",
+ "description": "Breaks ties between Routes of equal specificity. Routes with smaller values win when tied with routes with larger values.",
+ "format": "uint32",
+ "annotations": {
+ "required": [
+ "compute.routes.insert"
+ ]
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "tags": {
+ "type": "array",
+ "description": "A list of instance tags to which this route applies.",
+ "items": {
+ "type": "string"
+ },
+ "annotations": {
+ "required": [
+ "compute.routes.insert"
+ ]
+ }
+ },
+ "warnings": {
+ "type": "array",
+ "description": "If potential misconfigurations are detected for this route, this field will be populated with warning messages.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ }
+ },
+ "RouteList": {
+ "id": "RouteList",
+ "type": "object",
+ "description": "Contains a list of route resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The route resources.",
+ "items": {
+ "$ref": "Route"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#routeList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "Scheduling": {
+ "id": "Scheduling",
+ "type": "object",
+ "description": "Scheduling options for an Instance.",
+ "properties": {
+ "automaticRestart": {
+ "type": "boolean",
+ "description": "Whether the Instance should be automatically restarted whenever it is terminated by Compute Engine (not terminated by user)."
+ },
+ "onHostMaintenance": {
+ "type": "string",
+ "description": "How the instance should behave when the host machine undergoes maintenance that may temporarily impact instance performance.",
+ "enum": [
+ "MIGRATE",
+ "TERMINATE"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "SerialPortOutput": {
+ "id": "SerialPortOutput",
+ "type": "object",
+ "description": "An instance serial console output.",
+ "properties": {
+ "contents": {
+ "type": "string",
+ "description": "The contents of the console output."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#serialPortOutput"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ }
+ }
+ },
+ "ServiceAccount": {
+ "id": "ServiceAccount",
+ "type": "object",
+ "description": "A service account.",
+ "properties": {
+ "email": {
+ "type": "string",
+ "description": "Email address of the service account."
+ },
+ "scopes": {
+ "type": "array",
+ "description": "The list of scopes to be made available for this service account.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "Snapshot": {
+ "id": "Snapshot",
+ "type": "object",
+ "description": "A persistent disk snapshot resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "diskSizeGb": {
+ "type": "string",
+ "description": "Size of the persistent disk snapshot, specified in GB (output only).",
+ "format": "int64"
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#snapshot"
+ },
+ "licenses": {
+ "type": "array",
+ "description": "Public visible licenses.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "sourceDisk": {
+ "type": "string",
+ "description": "The source disk used to create this snapshot."
+ },
+ "sourceDiskId": {
+ "type": "string",
+ "description": "The 'id' value of the disk used to create this snapshot. This value may be used to determine whether the snapshot was taken from the current or a previous instance of a given disk name."
+ },
+ "status": {
+ "type": "string",
+ "description": "The status of the persistent disk snapshot (output only).",
+ "enum": [
+ "CREATING",
+ "DELETING",
+ "FAILED",
+ "READY",
+ "UPLOADING"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "storageBytes": {
+ "type": "string",
+ "description": "A size of the the storage used by the snapshot. As snapshots share storage this number is expected to change with snapshot creation/deletion.",
+ "format": "int64"
+ },
+ "storageBytesStatus": {
+ "type": "string",
+ "description": "An indicator whether storageBytes is in a stable state, or it is being adjusted as a result of shared storage reallocation.",
+ "enum": [
+ "UPDATING",
+ "UP_TO_DATE"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "SnapshotList": {
+ "id": "SnapshotList",
+ "type": "object",
+ "description": "Contains a list of persistent disk snapshot resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The persistent snapshot resources.",
+ "items": {
+ "$ref": "Snapshot"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#snapshotList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "Tags": {
+ "id": "Tags",
+ "type": "object",
+ "description": "A set of instance tags.",
+ "properties": {
+ "fingerprint": {
+ "type": "string",
+ "description": "Fingerprint of this resource. A hash of the tags stored in this object. This field is used optimistic locking. An up-to-date tags fingerprint must be provided in order to modify tags.",
+ "format": "byte"
+ },
+ "items": {
+ "type": "array",
+ "description": "An array of tags. Each tag must be 1-63 characters long, and comply with RFC1035.",
+ "items": {
+ "type": "string"
+ }
+ }
+ }
+ },
+ "TargetHttpProxy": {
+ "id": "TargetHttpProxy",
+ "type": "object",
+ "description": "A TargetHttpProxy resource. This resource defines an HTTP proxy.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#targetHttpProxy"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "URL to the UrlMap resource that defines the mapping from URL to the BackendService."
+ }
+ }
+ },
+ "TargetHttpProxyList": {
+ "id": "TargetHttpProxyList",
+ "type": "object",
+ "description": "Contains a list of TargetHttpProxy resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The TargetHttpProxy resources.",
+ "items": {
+ "$ref": "TargetHttpProxy"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetHttpProxyList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "TargetInstance": {
+ "id": "TargetInstance",
+ "type": "object",
+ "description": "A TargetInstance resource. This resource defines an endpoint VM that terminates traffic of certain protocols.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "instance": {
+ "type": "string",
+ "description": "The URL to the instance that terminates the relevant traffic."
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#targetInstance"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "natPolicy": {
+ "type": "string",
+ "description": "NAT option controlling how IPs are NAT'ed to the VM. Currently only NO_NAT (default value) is supported.",
+ "enum": [
+ "NO_NAT"
+ ],
+ "enumDescriptions": [
+ ""
+ ]
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "zone": {
+ "type": "string",
+ "description": "URL of the zone where the target instance resides (output only)."
+ }
+ }
+ },
+ "TargetInstanceAggregatedList": {
+ "id": "TargetInstanceAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped target instance lists.",
+ "additionalProperties": {
+ "$ref": "TargetInstancesScopedList",
+ "description": "Name of the scope containing this set of target instances."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetInstanceAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "TargetInstanceList": {
+ "id": "TargetInstanceList",
+ "type": "object",
+ "description": "Contains a list of TargetInstance resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The TargetInstance resources.",
+ "items": {
+ "$ref": "TargetInstance"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetInstanceList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "TargetInstancesScopedList": {
+ "id": "TargetInstancesScopedList",
+ "type": "object",
+ "properties": {
+ "targetInstances": {
+ "type": "array",
+ "description": "List of target instances contained in this scope.",
+ "items": {
+ "$ref": "TargetInstance"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of addresses when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "TargetPool": {
+ "id": "TargetPool",
+ "type": "object",
+ "description": "A TargetPool resource. This resource defines a pool of VMs, associated HttpHealthCheck resources, and the fallback TargetPool.",
+ "properties": {
+ "backupPool": {
+ "type": "string",
+ "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool, and its 'failoverRatio' field is properly set to a value between [0, 1].\n\n'backupPool' and 'failoverRatio' together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below 'failoverRatio', traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' and 'backupPool' are not set, or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "failoverRatio": {
+ "type": "number",
+ "description": "This field is applicable only when the containing target pool is serving a forwarding rule as the primary pool (i.e., not as a backup pool to some other target pool). The value of the field must be in [0, 1].\n\nIf set, 'backupPool' must also be set. They together define the fallback behavior of the primary target pool: if the ratio of the healthy VMs in the primary pool is at or below this number, traffic arriving at the load-balanced IP will be directed to the backup pool.\n\nIn case where 'failoverRatio' is not set or all the VMs in the backup pool are unhealthy, the traffic will be directed back to the primary pool in the \"force\" mode, where traffic will be spread to the healthy VMs with the best effort, or to all VMs when no VM is healthy.",
+ "format": "float"
+ },
+ "healthChecks": {
+ "type": "array",
+ "description": "A list of URLs to the HttpHealthCheck resource. A member VM in this pool is considered healthy if and only if all specified health checks pass. An empty list means all member VMs will be considered healthy at all times.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "instances": {
+ "type": "array",
+ "description": "A list of resource URLs to the member VMs serving this pool. They must live in zones contained in the same region as this pool.",
+ "items": {
+ "type": "string"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#targetPool"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "region": {
+ "type": "string",
+ "description": "URL of the region where the target pool resides (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "sessionAffinity": {
+ "type": "string",
+ "description": "Sesssion affinity option, must be one of the following values: 'NONE': Connections from the same client IP may go to any VM in the pool; 'CLIENT_IP': Connections from the same client IP will go to the same VM in the pool while that VM remains healthy. 'CLIENT_IP_PROTO': Connections from the same client IP with the same IP protocol will go to the same VM in the pool while that VM remains healthy.",
+ "enum": [
+ "CLIENT_IP",
+ "CLIENT_IP_PROTO",
+ "NONE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "TargetPoolAggregatedList": {
+ "id": "TargetPoolAggregatedList",
+ "type": "object",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "object",
+ "description": "A map of scoped target pool lists.",
+ "additionalProperties": {
+ "$ref": "TargetPoolsScopedList",
+ "description": "Name of the scope containing this set of target pools."
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetPoolAggregatedList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "TargetPoolInstanceHealth": {
+ "id": "TargetPoolInstanceHealth",
+ "type": "object",
+ "properties": {
+ "healthStatus": {
+ "type": "array",
+ "items": {
+ "$ref": "HealthStatus"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetPoolInstanceHealth"
+ }
+ }
+ },
+ "TargetPoolList": {
+ "id": "TargetPoolList",
+ "type": "object",
+ "description": "Contains a list of TargetPool resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The TargetPool resources.",
+ "items": {
+ "$ref": "TargetPool"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#targetPoolList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "TargetPoolsAddHealthCheckRequest": {
+ "id": "TargetPoolsAddHealthCheckRequest",
+ "type": "object",
+ "properties": {
+ "healthChecks": {
+ "type": "array",
+ "description": "Health check URLs to be added to targetPool.",
+ "items": {
+ "$ref": "HealthCheckReference"
+ }
+ }
+ }
+ },
+ "TargetPoolsAddInstanceRequest": {
+ "id": "TargetPoolsAddInstanceRequest",
+ "type": "object",
+ "properties": {
+ "instances": {
+ "type": "array",
+ "description": "URLs of the instances to be added to targetPool.",
+ "items": {
+ "$ref": "InstanceReference"
+ }
+ }
+ }
+ },
+ "TargetPoolsRemoveHealthCheckRequest": {
+ "id": "TargetPoolsRemoveHealthCheckRequest",
+ "type": "object",
+ "properties": {
+ "healthChecks": {
+ "type": "array",
+ "description": "Health check URLs to be removed from targetPool.",
+ "items": {
+ "$ref": "HealthCheckReference"
+ }
+ }
+ }
+ },
+ "TargetPoolsRemoveInstanceRequest": {
+ "id": "TargetPoolsRemoveInstanceRequest",
+ "type": "object",
+ "properties": {
+ "instances": {
+ "type": "array",
+ "description": "URLs of the instances to be removed from targetPool.",
+ "items": {
+ "$ref": "InstanceReference"
+ }
+ }
+ }
+ },
+ "TargetPoolsScopedList": {
+ "id": "TargetPoolsScopedList",
+ "type": "object",
+ "properties": {
+ "targetPools": {
+ "type": "array",
+ "description": "List of target pools contained in this scope.",
+ "items": {
+ "$ref": "TargetPool"
+ }
+ },
+ "warning": {
+ "type": "object",
+ "description": "Informational warning which replaces the list of addresses when the list is empty.",
+ "properties": {
+ "code": {
+ "type": "string",
+ "description": "The warning type identifier for this warning.",
+ "enum": [
+ "DEPRECATED_RESOURCE_USED",
+ "DISK_SIZE_LARGER_THAN_IMAGE_SIZE",
+ "INJECTED_KERNELS_DEPRECATED",
+ "NEXT_HOP_ADDRESS_NOT_ASSIGNED",
+ "NEXT_HOP_CANNOT_IP_FORWARD",
+ "NEXT_HOP_INSTANCE_NOT_FOUND",
+ "NEXT_HOP_INSTANCE_NOT_ON_NETWORK",
+ "NEXT_HOP_NOT_RUNNING",
+ "NO_RESULTS_ON_PAGE",
+ "REQUIRED_TOS_AGREEMENT",
+ "RESOURCE_NOT_DELETED",
+ "UNREACHABLE"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "data": {
+ "type": "array",
+ "description": "Metadata for this warning in 'key: value' format.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "key": {
+ "type": "string",
+ "description": "A key for the warning data."
+ },
+ "value": {
+ "type": "string",
+ "description": "A warning data value corresponding to the key."
+ }
+ }
+ }
+ },
+ "message": {
+ "type": "string",
+ "description": "Optional human-readable details for this warning."
+ }
+ }
+ }
+ }
+ },
+ "TargetReference": {
+ "id": "TargetReference",
+ "type": "object",
+ "properties": {
+ "target": {
+ "type": "string"
+ }
+ }
+ },
+ "TestFailure": {
+ "id": "TestFailure",
+ "type": "object",
+ "properties": {
+ "actualService": {
+ "type": "string"
+ },
+ "expectedService": {
+ "type": "string"
+ },
+ "host": {
+ "type": "string"
+ },
+ "path": {
+ "type": "string"
+ }
+ }
+ },
+ "UrlMap": {
+ "id": "UrlMap",
+ "type": "object",
+ "description": "A UrlMap resource. This resource defines the mapping from URL to the BackendService resource, based on the \"longest-match\" of the URL's host and path.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "defaultService": {
+ "type": "string",
+ "description": "The URL of the BackendService resource if none of the hostRules match."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional textual description of the resource; provided by the client when the resource is created."
+ },
+ "fingerprint": {
+ "type": "string",
+ "description": "Fingerprint of this resource. A hash of the contents stored in this object. This field is used in optimistic locking. This field will be ignored when inserting a UrlMap. An up-to-date fingerprint must be provided in order to update the UrlMap.",
+ "format": "byte"
+ },
+ "hostRules": {
+ "type": "array",
+ "description": "The list of HostRules to use against the URL.",
+ "items": {
+ "$ref": "HostRule"
+ }
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#urlMap"
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource; provided by the client when the resource is created. The name must be 1-63 characters long, and comply with RFC1035.",
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?"
+ },
+ "pathMatchers": {
+ "type": "array",
+ "description": "The list of named PathMatchers to use against the URL.",
+ "items": {
+ "$ref": "PathMatcher"
+ }
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "tests": {
+ "type": "array",
+ "description": "The list of expected URL mappings. Request to update this UrlMap will succeed only all of the test cases pass.",
+ "items": {
+ "$ref": "UrlMapTest"
+ }
+ }
+ }
+ },
+ "UrlMapList": {
+ "id": "UrlMapList",
+ "type": "object",
+ "description": "Contains a list of UrlMap resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The UrlMap resources.",
+ "items": {
+ "$ref": "UrlMap"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#urlMapList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ },
+ "UrlMapReference": {
+ "id": "UrlMapReference",
+ "type": "object",
+ "properties": {
+ "urlMap": {
+ "type": "string"
+ }
+ }
+ },
+ "UrlMapTest": {
+ "id": "UrlMapTest",
+ "type": "object",
+ "description": "Message for the expected URL mappings.",
+ "properties": {
+ "description": {
+ "type": "string",
+ "description": "Description of this test case."
+ },
+ "host": {
+ "type": "string",
+ "description": "Host portion of the URL."
+ },
+ "path": {
+ "type": "string",
+ "description": "Path portion of the URL."
+ },
+ "service": {
+ "type": "string",
+ "description": "Expected BackendService resource the given URL should be mapped to."
+ }
+ }
+ },
+ "UrlMapValidationResult": {
+ "id": "UrlMapValidationResult",
+ "type": "object",
+ "description": "Message representing the validation result for a UrlMap.",
+ "properties": {
+ "loadErrors": {
+ "type": "array",
+ "items": {
+ "type": "string"
+ }
+ },
+ "loadSucceeded": {
+ "type": "boolean",
+ "description": "Whether the given UrlMap can be successfully loaded. If false, 'loadErrors' indicates the reasons."
+ },
+ "testFailures": {
+ "type": "array",
+ "items": {
+ "$ref": "TestFailure"
+ }
+ },
+ "testPassed": {
+ "type": "boolean",
+ "description": "If successfully loaded, this field indicates whether the test passed. If false, 'testFailures's indicate the reason of failure."
+ }
+ }
+ },
+ "UrlMapsValidateRequest": {
+ "id": "UrlMapsValidateRequest",
+ "type": "object",
+ "properties": {
+ "resource": {
+ "$ref": "UrlMap",
+ "description": "Content of the UrlMap to be validated."
+ }
+ }
+ },
+ "UrlMapsValidateResponse": {
+ "id": "UrlMapsValidateResponse",
+ "type": "object",
+ "properties": {
+ "result": {
+ "$ref": "UrlMapValidationResult"
+ }
+ }
+ },
+ "UsageExportLocation": {
+ "id": "UsageExportLocation",
+ "type": "object",
+ "description": "The location in Cloud Storage and naming method of the daily usage report. Contains bucket_name and report_name prefix.",
+ "properties": {
+ "bucketName": {
+ "type": "string",
+ "description": "The name of an existing bucket in Cloud Storage where the usage report object is stored. The Google Service Account is granted write access to this bucket. This is simply the bucket name, with no \"gs://\" or \"https://storage.googleapis.com/\" in front of it."
+ },
+ "reportNamePrefix": {
+ "type": "string",
+ "description": "An optional prefix for the name of the usage report object stored in bucket_name. If not supplied, defaults to \"usage_\". The report is stored as a CSV file named _gce_.csv. where is the day of the usage according to Pacific Time. The prefix should conform to Cloud Storage object naming conventions."
+ }
+ }
+ },
+ "Zone": {
+ "id": "Zone",
+ "type": "object",
+ "description": "A zone resource.",
+ "properties": {
+ "creationTimestamp": {
+ "type": "string",
+ "description": "Creation timestamp in RFC3339 text format (output only)."
+ },
+ "deprecated": {
+ "$ref": "DeprecationStatus",
+ "description": "The deprecation status associated with this zone."
+ },
+ "description": {
+ "type": "string",
+ "description": "Textual description of the resource."
+ },
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only).",
+ "format": "uint64"
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of the resource.",
+ "default": "compute#zone"
+ },
+ "maintenanceWindows": {
+ "type": "array",
+ "description": "Scheduled maintenance windows for the zone. When the zone is in a maintenance window, all resources which reside in the zone will be unavailable.",
+ "items": {
+ "type": "object",
+ "properties": {
+ "beginTime": {
+ "type": "string",
+ "description": "Begin time of the maintenance window, in RFC 3339 format."
+ },
+ "description": {
+ "type": "string",
+ "description": "Textual description of the maintenance window."
+ },
+ "endTime": {
+ "type": "string",
+ "description": "End time of the maintenance window, in RFC 3339 format."
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the maintenance window."
+ }
+ }
+ }
+ },
+ "name": {
+ "type": "string",
+ "description": "Name of the resource."
+ },
+ "region": {
+ "type": "string",
+ "description": "Full URL reference to the region which hosts the zone (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for the resource (output only)."
+ },
+ "status": {
+ "type": "string",
+ "description": "Status of the zone. \"UP\" or \"DOWN\".",
+ "enum": [
+ "DOWN",
+ "UP"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ }
+ }
+ },
+ "ZoneList": {
+ "id": "ZoneList",
+ "type": "object",
+ "description": "Contains a list of zone resources.",
+ "properties": {
+ "id": {
+ "type": "string",
+ "description": "Unique identifier for the resource; defined by the server (output only)."
+ },
+ "items": {
+ "type": "array",
+ "description": "The zone resources.",
+ "items": {
+ "$ref": "Zone"
+ }
+ },
+ "kind": {
+ "type": "string",
+ "description": "Type of resource.",
+ "default": "compute#zoneList"
+ },
+ "nextPageToken": {
+ "type": "string",
+ "description": "A token used to continue a truncated list request (output only)."
+ },
+ "selfLink": {
+ "type": "string",
+ "description": "Server defined URL for this resource (output only)."
+ }
+ }
+ }
+ },
+ "resources": {
+ "addresses": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.addresses.aggregatedList",
+ "path": "{project}/aggregated/addresses",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of addresses grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "AddressAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "delete": {
+ "id": "compute.addresses.delete",
+ "path": "{project}/regions/{region}/addresses/{address}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified address resource.",
+ "parameters": {
+ "address": {
+ "type": "string",
+ "description": "Name of the address resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "address"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.addresses.get",
+ "path": "{project}/regions/{region}/addresses/{address}",
+ "httpMethod": "GET",
+ "description": "Returns the specified address resource.",
+ "parameters": {
+ "address": {
+ "type": "string",
+ "description": "Name of the address resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "address"
+ ],
+ "response": {
+ "$ref": "Address"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.addresses.insert",
+ "path": "{project}/regions/{region}/addresses",
+ "httpMethod": "POST",
+ "description": "Creates an address resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "request": {
+ "$ref": "Address"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.addresses.list",
+ "path": "{project}/regions/{region}/addresses",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of address resources contained within the specified region.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "response": {
+ "$ref": "AddressList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "backendServices": {
+ "methods": {
+ "delete": {
+ "id": "compute.backendServices.delete",
+ "path": "{project}/global/backendServices/{backendService}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified BackendService resource.",
+ "parameters": {
+ "backendService": {
+ "type": "string",
+ "description": "Name of the BackendService resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendService"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.backendServices.get",
+ "path": "{project}/global/backendServices/{backendService}",
+ "httpMethod": "GET",
+ "description": "Returns the specified BackendService resource.",
+ "parameters": {
+ "backendService": {
+ "type": "string",
+ "description": "Name of the BackendService resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendService"
+ ],
+ "response": {
+ "$ref": "BackendService"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "getHealth": {
+ "id": "compute.backendServices.getHealth",
+ "path": "{project}/global/backendServices/{backendService}/getHealth",
+ "httpMethod": "POST",
+ "description": "Gets the most recent health check results for this BackendService.",
+ "parameters": {
+ "backendService": {
+ "type": "string",
+ "description": "Name of the BackendService resource to which the queried instance belongs.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendService"
+ ],
+ "request": {
+ "$ref": "ResourceGroupReference"
+ },
+ "response": {
+ "$ref": "BackendServiceGroupHealth"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.backendServices.insert",
+ "path": "{project}/global/backendServices",
+ "httpMethod": "POST",
+ "description": "Creates a BackendService resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "BackendService"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.backendServices.list",
+ "path": "{project}/global/backendServices",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of BackendService resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "BackendServiceList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "patch": {
+ "id": "compute.backendServices.patch",
+ "path": "{project}/global/backendServices/{backendService}",
+ "httpMethod": "PATCH",
+ "description": "Update the entire content of the BackendService resource. This method supports patch semantics.",
+ "parameters": {
+ "backendService": {
+ "type": "string",
+ "description": "Name of the BackendService resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendService"
+ ],
+ "request": {
+ "$ref": "BackendService"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "update": {
+ "id": "compute.backendServices.update",
+ "path": "{project}/global/backendServices/{backendService}",
+ "httpMethod": "PUT",
+ "description": "Update the entire content of the BackendService resource.",
+ "parameters": {
+ "backendService": {
+ "type": "string",
+ "description": "Name of the BackendService resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "backendService"
+ ],
+ "request": {
+ "$ref": "BackendService"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "diskTypes": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.diskTypes.aggregatedList",
+ "path": "{project}/aggregated/diskTypes",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of disk type resources grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "DiskTypeAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "get": {
+ "id": "compute.diskTypes.get",
+ "path": "{project}/zones/{zone}/diskTypes/{diskType}",
+ "httpMethod": "GET",
+ "description": "Returns the specified disk type resource.",
+ "parameters": {
+ "diskType": {
+ "type": "string",
+ "description": "Name of the disk type resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "diskType"
+ ],
+ "response": {
+ "$ref": "DiskType"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.diskTypes.list",
+ "path": "{project}/zones/{zone}/diskTypes",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of disk type resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "DiskTypeList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "disks": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.disks.aggregatedList",
+ "path": "{project}/aggregated/disks",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of disks grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "DiskAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "createSnapshot": {
+ "id": "compute.disks.createSnapshot",
+ "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot",
+ "httpMethod": "POST",
+ "parameters": {
+ "disk": {
+ "type": "string",
+ "description": "Name of the persistent disk resource to snapshot.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "disk"
+ ],
+ "request": {
+ "$ref": "Snapshot"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "delete": {
+ "id": "compute.disks.delete",
+ "path": "{project}/zones/{zone}/disks/{disk}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified persistent disk resource.",
+ "parameters": {
+ "disk": {
+ "type": "string",
+ "description": "Name of the persistent disk resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "disk"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.disks.get",
+ "path": "{project}/zones/{zone}/disks/{disk}",
+ "httpMethod": "GET",
+ "description": "Returns the specified persistent disk resource.",
+ "parameters": {
+ "disk": {
+ "type": "string",
+ "description": "Name of the persistent disk resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "disk"
+ ],
+ "response": {
+ "$ref": "Disk"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.disks.insert",
+ "path": "{project}/zones/{zone}/disks",
+ "httpMethod": "POST",
+ "description": "Creates a persistent disk resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "sourceImage": {
+ "type": "string",
+ "description": "Optional. Source image to restore onto a disk.",
+ "location": "query"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "request": {
+ "$ref": "Disk"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.disks.list",
+ "path": "{project}/zones/{zone}/disks",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of persistent disk resources contained within the specified zone.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "DiskList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "firewalls": {
+ "methods": {
+ "delete": {
+ "id": "compute.firewalls.delete",
+ "path": "{project}/global/firewalls/{firewall}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified firewall resource.",
+ "parameters": {
+ "firewall": {
+ "type": "string",
+ "description": "Name of the firewall resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "firewall"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.firewalls.get",
+ "path": "{project}/global/firewalls/{firewall}",
+ "httpMethod": "GET",
+ "description": "Returns the specified firewall resource.",
+ "parameters": {
+ "firewall": {
+ "type": "string",
+ "description": "Name of the firewall resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "firewall"
+ ],
+ "response": {
+ "$ref": "Firewall"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.firewalls.insert",
+ "path": "{project}/global/firewalls",
+ "httpMethod": "POST",
+ "description": "Creates a firewall resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Firewall"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.firewalls.list",
+ "path": "{project}/global/firewalls",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of firewall resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "FirewallList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "patch": {
+ "id": "compute.firewalls.patch",
+ "path": "{project}/global/firewalls/{firewall}",
+ "httpMethod": "PATCH",
+ "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.",
+ "parameters": {
+ "firewall": {
+ "type": "string",
+ "description": "Name of the firewall resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "firewall"
+ ],
+ "request": {
+ "$ref": "Firewall"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "update": {
+ "id": "compute.firewalls.update",
+ "path": "{project}/global/firewalls/{firewall}",
+ "httpMethod": "PUT",
+ "description": "Updates the specified firewall resource with the data included in the request.",
+ "parameters": {
+ "firewall": {
+ "type": "string",
+ "description": "Name of the firewall resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "firewall"
+ ],
+ "request": {
+ "$ref": "Firewall"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "forwardingRules": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.forwardingRules.aggregatedList",
+ "path": "{project}/aggregated/forwardingRules",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of forwarding rules grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "ForwardingRuleAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "delete": {
+ "id": "compute.forwardingRules.delete",
+ "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified ForwardingRule resource.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "forwardingRule"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.forwardingRules.get",
+ "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
+ "httpMethod": "GET",
+ "description": "Returns the specified ForwardingRule resource.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "forwardingRule"
+ ],
+ "response": {
+ "$ref": "ForwardingRule"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.forwardingRules.insert",
+ "path": "{project}/regions/{region}/forwardingRules",
+ "httpMethod": "POST",
+ "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "request": {
+ "$ref": "ForwardingRule"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.forwardingRules.list",
+ "path": "{project}/regions/{region}/forwardingRules",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "response": {
+ "$ref": "ForwardingRuleList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "setTarget": {
+ "id": "compute.forwardingRules.setTarget",
+ "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget",
+ "httpMethod": "POST",
+ "description": "Changes target url for forwarding rule.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource in which target is to be set.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "forwardingRule"
+ ],
+ "request": {
+ "$ref": "TargetReference"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "globalAddresses": {
+ "methods": {
+ "delete": {
+ "id": "compute.globalAddresses.delete",
+ "path": "{project}/global/addresses/{address}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified address resource.",
+ "parameters": {
+ "address": {
+ "type": "string",
+ "description": "Name of the address resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "address"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.globalAddresses.get",
+ "path": "{project}/global/addresses/{address}",
+ "httpMethod": "GET",
+ "description": "Returns the specified address resource.",
+ "parameters": {
+ "address": {
+ "type": "string",
+ "description": "Name of the address resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "address"
+ ],
+ "response": {
+ "$ref": "Address"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.globalAddresses.insert",
+ "path": "{project}/global/addresses",
+ "httpMethod": "POST",
+ "description": "Creates an address resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Address"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.globalAddresses.list",
+ "path": "{project}/global/addresses",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of global address resources.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "AddressList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "globalForwardingRules": {
+ "methods": {
+ "delete": {
+ "id": "compute.globalForwardingRules.delete",
+ "path": "{project}/global/forwardingRules/{forwardingRule}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified ForwardingRule resource.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "forwardingRule"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.globalForwardingRules.get",
+ "path": "{project}/global/forwardingRules/{forwardingRule}",
+ "httpMethod": "GET",
+ "description": "Returns the specified ForwardingRule resource.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "forwardingRule"
+ ],
+ "response": {
+ "$ref": "ForwardingRule"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.globalForwardingRules.insert",
+ "path": "{project}/global/forwardingRules",
+ "httpMethod": "POST",
+ "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "ForwardingRule"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.globalForwardingRules.list",
+ "path": "{project}/global/forwardingRules",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of ForwardingRule resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "ForwardingRuleList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "setTarget": {
+ "id": "compute.globalForwardingRules.setTarget",
+ "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget",
+ "httpMethod": "POST",
+ "description": "Changes target url for forwarding rule.",
+ "parameters": {
+ "forwardingRule": {
+ "type": "string",
+ "description": "Name of the ForwardingRule resource in which target is to be set.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "forwardingRule"
+ ],
+ "request": {
+ "$ref": "TargetReference"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "globalOperations": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.globalOperations.aggregatedList",
+ "path": "{project}/aggregated/operations",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of all operations grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "OperationAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "delete": {
+ "id": "compute.globalOperations.delete",
+ "path": "{project}/global/operations/{operation}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "operation"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.globalOperations.get",
+ "path": "{project}/global/operations/{operation}",
+ "httpMethod": "GET",
+ "description": "Retrieves the specified operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "operation"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.globalOperations.list",
+ "path": "{project}/global/operations",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of operation resources contained within the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "OperationList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "httpHealthChecks": {
+ "methods": {
+ "delete": {
+ "id": "compute.httpHealthChecks.delete",
+ "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified HttpHealthCheck resource.",
+ "parameters": {
+ "httpHealthCheck": {
+ "type": "string",
+ "description": "Name of the HttpHealthCheck resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "httpHealthCheck"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.httpHealthChecks.get",
+ "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ "httpMethod": "GET",
+ "description": "Returns the specified HttpHealthCheck resource.",
+ "parameters": {
+ "httpHealthCheck": {
+ "type": "string",
+ "description": "Name of the HttpHealthCheck resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "httpHealthCheck"
+ ],
+ "response": {
+ "$ref": "HttpHealthCheck"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.httpHealthChecks.insert",
+ "path": "{project}/global/httpHealthChecks",
+ "httpMethod": "POST",
+ "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "HttpHealthCheck"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.httpHealthChecks.list",
+ "path": "{project}/global/httpHealthChecks",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "HttpHealthCheckList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "patch": {
+ "id": "compute.httpHealthChecks.patch",
+ "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ "httpMethod": "PATCH",
+ "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.",
+ "parameters": {
+ "httpHealthCheck": {
+ "type": "string",
+ "description": "Name of the HttpHealthCheck resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "httpHealthCheck"
+ ],
+ "request": {
+ "$ref": "HttpHealthCheck"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "update": {
+ "id": "compute.httpHealthChecks.update",
+ "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ "httpMethod": "PUT",
+ "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.",
+ "parameters": {
+ "httpHealthCheck": {
+ "type": "string",
+ "description": "Name of the HttpHealthCheck resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "httpHealthCheck"
+ ],
+ "request": {
+ "$ref": "HttpHealthCheck"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "images": {
+ "methods": {
+ "delete": {
+ "id": "compute.images.delete",
+ "path": "{project}/global/images/{image}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified image resource.",
+ "parameters": {
+ "image": {
+ "type": "string",
+ "description": "Name of the image resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "image"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "deprecate": {
+ "id": "compute.images.deprecate",
+ "path": "{project}/global/images/{image}/deprecate",
+ "httpMethod": "POST",
+ "description": "Sets the deprecation status of an image. If no message body is given, clears the deprecation status instead.",
+ "parameters": {
+ "image": {
+ "type": "string",
+ "description": "Image name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "image"
+ ],
+ "request": {
+ "$ref": "DeprecationStatus"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.images.get",
+ "path": "{project}/global/images/{image}",
+ "httpMethod": "GET",
+ "description": "Returns the specified image resource.",
+ "parameters": {
+ "image": {
+ "type": "string",
+ "description": "Name of the image resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "image"
+ ],
+ "response": {
+ "$ref": "Image"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.images.insert",
+ "path": "{project}/global/images",
+ "httpMethod": "POST",
+ "description": "Creates an image resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Image"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ },
+ "list": {
+ "id": "compute.images.list",
+ "path": "{project}/global/images",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of image resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "ImageList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "instanceTemplates": {
+ "methods": {
+ "delete": {
+ "id": "compute.instanceTemplates.delete",
+ "path": "{project}/global/instanceTemplates/{instanceTemplate}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified instance template resource.",
+ "parameters": {
+ "instanceTemplate": {
+ "type": "string",
+ "description": "Name of the instance template resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "instanceTemplate"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.instanceTemplates.get",
+ "path": "{project}/global/instanceTemplates/{instanceTemplate}",
+ "httpMethod": "GET",
+ "description": "Returns the specified instance template resource.",
+ "parameters": {
+ "instanceTemplate": {
+ "type": "string",
+ "description": "Name of the instance template resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "instanceTemplate"
+ ],
+ "response": {
+ "$ref": "InstanceTemplate"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.instanceTemplates.insert",
+ "path": "{project}/global/instanceTemplates",
+ "httpMethod": "POST",
+ "description": "Creates an instance template resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "InstanceTemplate"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.instanceTemplates.list",
+ "path": "{project}/global/instanceTemplates",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of instance template resources contained within the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "InstanceTemplateList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "instances": {
+ "methods": {
+ "addAccessConfig": {
+ "id": "compute.instances.addAccessConfig",
+ "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig",
+ "httpMethod": "POST",
+ "description": "Adds an access config to an instance's network interface.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "networkInterface": {
+ "type": "string",
+ "description": "Network interface name.",
+ "required": true,
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance",
+ "networkInterface"
+ ],
+ "request": {
+ "$ref": "AccessConfig"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "aggregatedList": {
+ "id": "compute.instances.aggregatedList",
+ "path": "{project}/aggregated/instances",
+ "httpMethod": "GET",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "InstanceAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "attachDisk": {
+ "id": "compute.instances.attachDisk",
+ "path": "{project}/zones/{zone}/instances/{instance}/attachDisk",
+ "httpMethod": "POST",
+ "description": "Attaches a disk resource to an instance.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "request": {
+ "$ref": "AttachedDisk"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "delete": {
+ "id": "compute.instances.delete",
+ "path": "{project}/zones/{zone}/instances/{instance}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified instance resource.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "deleteAccessConfig": {
+ "id": "compute.instances.deleteAccessConfig",
+ "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig",
+ "httpMethod": "POST",
+ "description": "Deletes an access config from an instance's network interface.",
+ "parameters": {
+ "accessConfig": {
+ "type": "string",
+ "description": "Access config name.",
+ "required": true,
+ "location": "query"
+ },
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "networkInterface": {
+ "type": "string",
+ "description": "Network interface name.",
+ "required": true,
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance",
+ "accessConfig",
+ "networkInterface"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "detachDisk": {
+ "id": "compute.instances.detachDisk",
+ "path": "{project}/zones/{zone}/instances/{instance}/detachDisk",
+ "httpMethod": "POST",
+ "description": "Detaches a disk from an instance.",
+ "parameters": {
+ "deviceName": {
+ "type": "string",
+ "description": "Disk device name to detach.",
+ "required": true,
+ "pattern": "\\w[\\w.-]{0,254}",
+ "location": "query"
+ },
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance",
+ "deviceName"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.instances.get",
+ "path": "{project}/zones/{zone}/instances/{instance}",
+ "httpMethod": "GET",
+ "description": "Returns the specified instance resource.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "response": {
+ "$ref": "Instance"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "getSerialPortOutput": {
+ "id": "compute.instances.getSerialPortOutput",
+ "path": "{project}/zones/{zone}/instances/{instance}/serialPort",
+ "httpMethod": "GET",
+ "description": "Returns the specified instance's serial port output.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "response": {
+ "$ref": "SerialPortOutput"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.instances.insert",
+ "path": "{project}/zones/{zone}/instances",
+ "httpMethod": "POST",
+ "description": "Creates an instance resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "request": {
+ "$ref": "Instance"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.instances.list",
+ "path": "{project}/zones/{zone}/instances",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of instance resources contained within the specified zone.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "InstanceList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "reset": {
+ "id": "compute.instances.reset",
+ "path": "{project}/zones/{zone}/instances/{instance}/reset",
+ "httpMethod": "POST",
+ "description": "Performs a hard reset on the instance.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setDiskAutoDelete": {
+ "id": "compute.instances.setDiskAutoDelete",
+ "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete",
+ "httpMethod": "POST",
+ "description": "Sets the auto-delete flag for a disk attached to an instance",
+ "parameters": {
+ "autoDelete": {
+ "type": "boolean",
+ "description": "Whether to auto-delete the disk when the instance is deleted.",
+ "required": true,
+ "location": "query"
+ },
+ "deviceName": {
+ "type": "string",
+ "description": "Disk device name to modify.",
+ "required": true,
+ "pattern": "\\w[\\w.-]{0,254}",
+ "location": "query"
+ },
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance",
+ "autoDelete",
+ "deviceName"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setMetadata": {
+ "id": "compute.instances.setMetadata",
+ "path": "{project}/zones/{zone}/instances/{instance}/setMetadata",
+ "httpMethod": "POST",
+ "description": "Sets metadata for the specified instance to the data included in the request.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "request": {
+ "$ref": "Metadata"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setScheduling": {
+ "id": "compute.instances.setScheduling",
+ "path": "{project}/zones/{zone}/instances/{instance}/setScheduling",
+ "httpMethod": "POST",
+ "description": "Sets an instance's scheduling options.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Instance name.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Project name.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "request": {
+ "$ref": "Scheduling"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setTags": {
+ "id": "compute.instances.setTags",
+ "path": "{project}/zones/{zone}/instances/{instance}/setTags",
+ "httpMethod": "POST",
+ "description": "Sets tags for the specified instance to the data included in the request.",
+ "parameters": {
+ "instance": {
+ "type": "string",
+ "description": "Name of the instance scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "instance"
+ ],
+ "request": {
+ "$ref": "Tags"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "licenses": {
+ "methods": {
+ "get": {
+ "id": "compute.licenses.get",
+ "path": "{project}/global/licenses/{license}",
+ "httpMethod": "GET",
+ "description": "Returns the specified license resource.",
+ "parameters": {
+ "license": {
+ "type": "string",
+ "description": "Name of the license resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "license"
+ ],
+ "response": {
+ "$ref": "License"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "machineTypes": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.machineTypes.aggregatedList",
+ "path": "{project}/aggregated/machineTypes",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of machine type resources grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "MachineTypeAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "get": {
+ "id": "compute.machineTypes.get",
+ "path": "{project}/zones/{zone}/machineTypes/{machineType}",
+ "httpMethod": "GET",
+ "description": "Returns the specified machine type resource.",
+ "parameters": {
+ "machineType": {
+ "type": "string",
+ "description": "Name of the machine type resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "machineType"
+ ],
+ "response": {
+ "$ref": "MachineType"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.machineTypes.list",
+ "path": "{project}/zones/{zone}/machineTypes",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of machine type resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "MachineTypeList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "networks": {
+ "methods": {
+ "delete": {
+ "id": "compute.networks.delete",
+ "path": "{project}/global/networks/{network}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified network resource.",
+ "parameters": {
+ "network": {
+ "type": "string",
+ "description": "Name of the network resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "network"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.networks.get",
+ "path": "{project}/global/networks/{network}",
+ "httpMethod": "GET",
+ "description": "Returns the specified network resource.",
+ "parameters": {
+ "network": {
+ "type": "string",
+ "description": "Name of the network resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "network"
+ ],
+ "response": {
+ "$ref": "Network"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.networks.insert",
+ "path": "{project}/global/networks",
+ "httpMethod": "POST",
+ "description": "Creates a network resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Network"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.networks.list",
+ "path": "{project}/global/networks",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of network resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "NetworkList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "projects": {
+ "methods": {
+ "get": {
+ "id": "compute.projects.get",
+ "path": "{project}",
+ "httpMethod": "GET",
+ "description": "Returns the specified project resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project resource to retrieve.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "Project"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "setCommonInstanceMetadata": {
+ "id": "compute.projects.setCommonInstanceMetadata",
+ "path": "{project}/setCommonInstanceMetadata",
+ "httpMethod": "POST",
+ "description": "Sets metadata common to all instances within the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Metadata"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setUsageExportBucket": {
+ "id": "compute.projects.setUsageExportBucket",
+ "path": "{project}/setUsageExportBucket",
+ "httpMethod": "POST",
+ "description": "Sets usage export location",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "UsageExportLocation"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/devstorage.full_control",
+ "https://www.googleapis.com/auth/devstorage.read_only",
+ "https://www.googleapis.com/auth/devstorage.read_write"
+ ]
+ }
+ }
+ },
+ "regionOperations": {
+ "methods": {
+ "delete": {
+ "id": "compute.regionOperations.delete",
+ "path": "{project}/regions/{region}/operations/{operation}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified region-specific operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "operation"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.regionOperations.get",
+ "path": "{project}/regions/{region}/operations/{operation}",
+ "httpMethod": "GET",
+ "description": "Retrieves the specified region-specific operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "operation"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.regionOperations.list",
+ "path": "{project}/regions/{region}/operations",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of operation resources contained within the specified region.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "response": {
+ "$ref": "OperationList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "regions": {
+ "methods": {
+ "get": {
+ "id": "compute.regions.get",
+ "path": "{project}/regions/{region}",
+ "httpMethod": "GET",
+ "description": "Returns the specified region resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "response": {
+ "$ref": "Region"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.regions.list",
+ "path": "{project}/regions",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of region resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "RegionList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "routes": {
+ "methods": {
+ "delete": {
+ "id": "compute.routes.delete",
+ "path": "{project}/global/routes/{route}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified route resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "route": {
+ "type": "string",
+ "description": "Name of the route resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "route"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.routes.get",
+ "path": "{project}/global/routes/{route}",
+ "httpMethod": "GET",
+ "description": "Returns the specified route resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "route": {
+ "type": "string",
+ "description": "Name of the route resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "route"
+ ],
+ "response": {
+ "$ref": "Route"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.routes.insert",
+ "path": "{project}/global/routes",
+ "httpMethod": "POST",
+ "description": "Creates a route resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "Route"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.routes.list",
+ "path": "{project}/global/routes",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of route resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "RouteList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "snapshots": {
+ "methods": {
+ "delete": {
+ "id": "compute.snapshots.delete",
+ "path": "{project}/global/snapshots/{snapshot}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified persistent disk snapshot resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "snapshot": {
+ "type": "string",
+ "description": "Name of the persistent disk snapshot resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "snapshot"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.snapshots.get",
+ "path": "{project}/global/snapshots/{snapshot}",
+ "httpMethod": "GET",
+ "description": "Returns the specified persistent disk snapshot resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "snapshot": {
+ "type": "string",
+ "description": "Name of the persistent disk snapshot resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "snapshot"
+ ],
+ "response": {
+ "$ref": "Snapshot"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.snapshots.list",
+ "path": "{project}/global/snapshots",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of persistent disk snapshot resources contained within the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "SnapshotList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "targetHttpProxies": {
+ "methods": {
+ "delete": {
+ "id": "compute.targetHttpProxies.delete",
+ "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified TargetHttpProxy resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "targetHttpProxy": {
+ "type": "string",
+ "description": "Name of the TargetHttpProxy resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "targetHttpProxy"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.targetHttpProxies.get",
+ "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
+ "httpMethod": "GET",
+ "description": "Returns the specified TargetHttpProxy resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "targetHttpProxy": {
+ "type": "string",
+ "description": "Name of the TargetHttpProxy resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "targetHttpProxy"
+ ],
+ "response": {
+ "$ref": "TargetHttpProxy"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.targetHttpProxies.insert",
+ "path": "{project}/global/targetHttpProxies",
+ "httpMethod": "POST",
+ "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "TargetHttpProxy"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.targetHttpProxies.list",
+ "path": "{project}/global/targetHttpProxies",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "TargetHttpProxyList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "setUrlMap": {
+ "id": "compute.targetHttpProxies.setUrlMap",
+ "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap",
+ "httpMethod": "POST",
+ "description": "Changes the URL map for TargetHttpProxy.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "targetHttpProxy": {
+ "type": "string",
+ "description": "Name of the TargetHttpProxy resource whose URL map is to be set.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "targetHttpProxy"
+ ],
+ "request": {
+ "$ref": "UrlMapReference"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "targetInstances": {
+ "methods": {
+ "aggregatedList": {
+ "id": "compute.targetInstances.aggregatedList",
+ "path": "{project}/aggregated/targetInstances",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of target instances grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "TargetInstanceAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "delete": {
+ "id": "compute.targetInstances.delete",
+ "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified TargetInstance resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "targetInstance": {
+ "type": "string",
+ "description": "Name of the TargetInstance resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "targetInstance"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.targetInstances.get",
+ "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
+ "httpMethod": "GET",
+ "description": "Returns the specified TargetInstance resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "targetInstance": {
+ "type": "string",
+ "description": "Name of the TargetInstance resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "targetInstance"
+ ],
+ "response": {
+ "$ref": "TargetInstance"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.targetInstances.insert",
+ "path": "{project}/zones/{zone}/targetInstances",
+ "httpMethod": "POST",
+ "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "request": {
+ "$ref": "TargetInstance"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.targetInstances.list",
+ "path": "{project}/zones/{zone}/targetInstances",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "TargetInstanceList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "targetPools": {
+ "methods": {
+ "addHealthCheck": {
+ "id": "compute.targetPools.addHealthCheck",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck",
+ "httpMethod": "POST",
+ "description": "Adds health check URL to targetPool.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to which health_check_url is to be added.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "TargetPoolsAddHealthCheckRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "addInstance": {
+ "id": "compute.targetPools.addInstance",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance",
+ "httpMethod": "POST",
+ "description": "Adds instance url to targetPool.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to which instance_url is to be added.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "TargetPoolsAddInstanceRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "aggregatedList": {
+ "id": "compute.targetPools.aggregatedList",
+ "path": "{project}/aggregated/targetPools",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of target pools grouped by scope.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "TargetPoolAggregatedList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "delete": {
+ "id": "compute.targetPools.delete",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified TargetPool resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.targetPools.get",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}",
+ "httpMethod": "GET",
+ "description": "Returns the specified TargetPool resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "response": {
+ "$ref": "TargetPool"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "getHealth": {
+ "id": "compute.targetPools.getHealth",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth",
+ "httpMethod": "POST",
+ "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to which the queried instance belongs.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "InstanceReference"
+ },
+ "response": {
+ "$ref": "TargetPoolInstanceHealth"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.targetPools.insert",
+ "path": "{project}/regions/{region}/targetPools",
+ "httpMethod": "POST",
+ "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "request": {
+ "$ref": "TargetPool"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.targetPools.list",
+ "path": "{project}/regions/{region}/targetPools",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of TargetPool resources available to the specified project and region.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region"
+ ],
+ "response": {
+ "$ref": "TargetPoolList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "removeHealthCheck": {
+ "id": "compute.targetPools.removeHealthCheck",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck",
+ "httpMethod": "POST",
+ "description": "Removes health check URL from targetPool.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to which health_check_url is to be removed.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "TargetPoolsRemoveHealthCheckRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "removeInstance": {
+ "id": "compute.targetPools.removeInstance",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance",
+ "httpMethod": "POST",
+ "description": "Removes instance URL from targetPool.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource to which instance_url is to be removed.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "TargetPoolsRemoveInstanceRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "setBackup": {
+ "id": "compute.targetPools.setBackup",
+ "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup",
+ "httpMethod": "POST",
+ "description": "Changes backup pool configurations.",
+ "parameters": {
+ "failoverRatio": {
+ "type": "number",
+ "description": "New failoverRatio value for the containing target pool.",
+ "format": "float",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "region": {
+ "type": "string",
+ "description": "Name of the region scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "targetPool": {
+ "type": "string",
+ "description": "Name of the TargetPool resource for which the backup is to be set.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "region",
+ "targetPool"
+ ],
+ "request": {
+ "$ref": "TargetReference"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "urlMaps": {
+ "methods": {
+ "delete": {
+ "id": "compute.urlMaps.delete",
+ "path": "{project}/global/urlMaps/{urlMap}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified UrlMap resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "Name of the UrlMap resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "urlMap"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.urlMaps.get",
+ "path": "{project}/global/urlMaps/{urlMap}",
+ "httpMethod": "GET",
+ "description": "Returns the specified UrlMap resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "Name of the UrlMap resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "urlMap"
+ ],
+ "response": {
+ "$ref": "UrlMap"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "insert": {
+ "id": "compute.urlMaps.insert",
+ "path": "{project}/global/urlMaps",
+ "httpMethod": "POST",
+ "description": "Creates a UrlMap resource in the specified project using the data included in the request.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "request": {
+ "$ref": "UrlMap"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "list": {
+ "id": "compute.urlMaps.list",
+ "path": "{project}/global/urlMaps",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of UrlMap resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "UrlMapList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "patch": {
+ "id": "compute.urlMaps.patch",
+ "path": "{project}/global/urlMaps/{urlMap}",
+ "httpMethod": "PATCH",
+ "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "Name of the UrlMap resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "urlMap"
+ ],
+ "request": {
+ "$ref": "UrlMap"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "update": {
+ "id": "compute.urlMaps.update",
+ "path": "{project}/global/urlMaps/{urlMap}",
+ "httpMethod": "PUT",
+ "description": "Update the entire content of the UrlMap resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "Name of the UrlMap resource to update.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "urlMap"
+ ],
+ "request": {
+ "$ref": "UrlMap"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "validate": {
+ "id": "compute.urlMaps.validate",
+ "path": "{project}/global/urlMaps/{urlMap}/validate",
+ "httpMethod": "POST",
+ "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "urlMap": {
+ "type": "string",
+ "description": "Name of the UrlMap resource to be validated as.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "urlMap"
+ ],
+ "request": {
+ "$ref": "UrlMapsValidateRequest"
+ },
+ "response": {
+ "$ref": "UrlMapsValidateResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ }
+ }
+ },
+ "zoneOperations": {
+ "methods": {
+ "delete": {
+ "id": "compute.zoneOperations.delete",
+ "path": "{project}/zones/{zone}/operations/{operation}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the specified zone-specific operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to delete.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "operation"
+ ],
+ "scopes": [
+ "https://www.googleapis.com/auth/compute"
+ ]
+ },
+ "get": {
+ "id": "compute.zoneOperations.get",
+ "path": "{project}/zones/{zone}/operations/{operation}",
+ "httpMethod": "GET",
+ "description": "Retrieves the specified zone-specific operation resource.",
+ "parameters": {
+ "operation": {
+ "type": "string",
+ "description": "Name of the operation resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone",
+ "operation"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.zoneOperations.list",
+ "path": "{project}/zones/{zone}/operations",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of operation resources contained within the specified zone.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone scoping this request.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "OperationList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ },
+ "zones": {
+ "methods": {
+ "get": {
+ "id": "compute.zones.get",
+ "path": "{project}/zones/{zone}",
+ "httpMethod": "GET",
+ "description": "Returns the specified zone resource.",
+ "parameters": {
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ },
+ "zone": {
+ "type": "string",
+ "description": "Name of the zone resource to return.",
+ "required": true,
+ "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project",
+ "zone"
+ ],
+ "response": {
+ "$ref": "Zone"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ },
+ "list": {
+ "id": "compute.zones.list",
+ "path": "{project}/zones",
+ "httpMethod": "GET",
+ "description": "Retrieves the list of zone resources available to the specified project.",
+ "parameters": {
+ "filter": {
+ "type": "string",
+ "description": "Optional. Filter expression for filtering listed resources.",
+ "location": "query"
+ },
+ "maxResults": {
+ "type": "integer",
+ "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ "default": "500",
+ "format": "uint32",
+ "minimum": "0",
+ "maximum": "500",
+ "location": "query"
+ },
+ "pageToken": {
+ "type": "string",
+ "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ "location": "query"
+ },
+ "project": {
+ "type": "string",
+ "description": "Name of the project scoping this request.",
+ "required": true,
+ "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "project"
+ ],
+ "response": {
+ "$ref": "ZoneList"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/compute",
+ "https://www.googleapis.com/auth/compute.readonly"
+ ]
+ }
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-gen.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-gen.go
new file mode 100644
index 000000000000..7d193b56b844
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/compute/v1/v1/compute-gen.go
@@ -0,0 +1,16952 @@
+// Package compute provides access to the Compute Engine API.
+//
+// See https://developers.google.com/compute/docs/reference/latest/
+//
+// Usage example:
+//
+// import "code.google.com/p/google-api-go-client/compute/v1"
+// ...
+// computeService, err := compute.New(oauthHttpClient)
+package compute
+
+import (
+ "bytes"
+ "code.google.com/p/google-api-go-client/googleapi"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Always reference these packages, just in case the auto-generated code
+// below doesn't.
+var _ = bytes.NewBuffer
+var _ = strconv.Itoa
+var _ = fmt.Sprintf
+var _ = json.NewDecoder
+var _ = io.Copy
+var _ = url.Parse
+var _ = googleapi.Version
+var _ = errors.New
+var _ = strings.Replace
+
+const apiId = "compute:v1"
+const apiName = "compute"
+const apiVersion = "v1"
+const basePath = "https://www.googleapis.com/compute/v1/projects/"
+
+// OAuth2 scopes used by this API.
+const (
+ // View and manage your Google Compute Engine resources
+ ComputeScope = "https://www.googleapis.com/auth/compute"
+
+ // View your Google Compute Engine resources
+ ComputeReadonlyScope = "https://www.googleapis.com/auth/compute.readonly"
+
+ // Manage your data and permissions in Google Cloud Storage
+ DevstorageFull_controlScope = "https://www.googleapis.com/auth/devstorage.full_control"
+
+ // View your data in Google Cloud Storage
+ DevstorageRead_onlyScope = "https://www.googleapis.com/auth/devstorage.read_only"
+
+ // Manage your data in Google Cloud Storage
+ DevstorageRead_writeScope = "https://www.googleapis.com/auth/devstorage.read_write"
+)
+
+func New(client *http.Client) (*Service, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+ s := &Service{client: client, BasePath: basePath}
+ s.Addresses = NewAddressesService(s)
+ s.BackendServices = NewBackendServicesService(s)
+ s.DiskTypes = NewDiskTypesService(s)
+ s.Disks = NewDisksService(s)
+ s.Firewalls = NewFirewallsService(s)
+ s.ForwardingRules = NewForwardingRulesService(s)
+ s.GlobalAddresses = NewGlobalAddressesService(s)
+ s.GlobalForwardingRules = NewGlobalForwardingRulesService(s)
+ s.GlobalOperations = NewGlobalOperationsService(s)
+ s.HttpHealthChecks = NewHttpHealthChecksService(s)
+ s.Images = NewImagesService(s)
+ s.InstanceTemplates = NewInstanceTemplatesService(s)
+ s.Instances = NewInstancesService(s)
+ s.Licenses = NewLicensesService(s)
+ s.MachineTypes = NewMachineTypesService(s)
+ s.Networks = NewNetworksService(s)
+ s.Projects = NewProjectsService(s)
+ s.RegionOperations = NewRegionOperationsService(s)
+ s.Regions = NewRegionsService(s)
+ s.Routes = NewRoutesService(s)
+ s.Snapshots = NewSnapshotsService(s)
+ s.TargetHttpProxies = NewTargetHttpProxiesService(s)
+ s.TargetInstances = NewTargetInstancesService(s)
+ s.TargetPools = NewTargetPoolsService(s)
+ s.UrlMaps = NewUrlMapsService(s)
+ s.ZoneOperations = NewZoneOperationsService(s)
+ s.Zones = NewZonesService(s)
+ return s, nil
+}
+
+type Service struct {
+ client *http.Client
+ BasePath string // API endpoint base URL
+
+ Addresses *AddressesService
+
+ BackendServices *BackendServicesService
+
+ DiskTypes *DiskTypesService
+
+ Disks *DisksService
+
+ Firewalls *FirewallsService
+
+ ForwardingRules *ForwardingRulesService
+
+ GlobalAddresses *GlobalAddressesService
+
+ GlobalForwardingRules *GlobalForwardingRulesService
+
+ GlobalOperations *GlobalOperationsService
+
+ HttpHealthChecks *HttpHealthChecksService
+
+ Images *ImagesService
+
+ InstanceTemplates *InstanceTemplatesService
+
+ Instances *InstancesService
+
+ Licenses *LicensesService
+
+ MachineTypes *MachineTypesService
+
+ Networks *NetworksService
+
+ Projects *ProjectsService
+
+ RegionOperations *RegionOperationsService
+
+ Regions *RegionsService
+
+ Routes *RoutesService
+
+ Snapshots *SnapshotsService
+
+ TargetHttpProxies *TargetHttpProxiesService
+
+ TargetInstances *TargetInstancesService
+
+ TargetPools *TargetPoolsService
+
+ UrlMaps *UrlMapsService
+
+ ZoneOperations *ZoneOperationsService
+
+ Zones *ZonesService
+}
+
+func NewAddressesService(s *Service) *AddressesService {
+ rs := &AddressesService{s: s}
+ return rs
+}
+
+type AddressesService struct {
+ s *Service
+}
+
+func NewBackendServicesService(s *Service) *BackendServicesService {
+ rs := &BackendServicesService{s: s}
+ return rs
+}
+
+type BackendServicesService struct {
+ s *Service
+}
+
+func NewDiskTypesService(s *Service) *DiskTypesService {
+ rs := &DiskTypesService{s: s}
+ return rs
+}
+
+type DiskTypesService struct {
+ s *Service
+}
+
+func NewDisksService(s *Service) *DisksService {
+ rs := &DisksService{s: s}
+ return rs
+}
+
+type DisksService struct {
+ s *Service
+}
+
+func NewFirewallsService(s *Service) *FirewallsService {
+ rs := &FirewallsService{s: s}
+ return rs
+}
+
+type FirewallsService struct {
+ s *Service
+}
+
+func NewForwardingRulesService(s *Service) *ForwardingRulesService {
+ rs := &ForwardingRulesService{s: s}
+ return rs
+}
+
+type ForwardingRulesService struct {
+ s *Service
+}
+
+func NewGlobalAddressesService(s *Service) *GlobalAddressesService {
+ rs := &GlobalAddressesService{s: s}
+ return rs
+}
+
+type GlobalAddressesService struct {
+ s *Service
+}
+
+func NewGlobalForwardingRulesService(s *Service) *GlobalForwardingRulesService {
+ rs := &GlobalForwardingRulesService{s: s}
+ return rs
+}
+
+type GlobalForwardingRulesService struct {
+ s *Service
+}
+
+func NewGlobalOperationsService(s *Service) *GlobalOperationsService {
+ rs := &GlobalOperationsService{s: s}
+ return rs
+}
+
+type GlobalOperationsService struct {
+ s *Service
+}
+
+func NewHttpHealthChecksService(s *Service) *HttpHealthChecksService {
+ rs := &HttpHealthChecksService{s: s}
+ return rs
+}
+
+type HttpHealthChecksService struct {
+ s *Service
+}
+
+func NewImagesService(s *Service) *ImagesService {
+ rs := &ImagesService{s: s}
+ return rs
+}
+
+type ImagesService struct {
+ s *Service
+}
+
+func NewInstanceTemplatesService(s *Service) *InstanceTemplatesService {
+ rs := &InstanceTemplatesService{s: s}
+ return rs
+}
+
+type InstanceTemplatesService struct {
+ s *Service
+}
+
+func NewInstancesService(s *Service) *InstancesService {
+ rs := &InstancesService{s: s}
+ return rs
+}
+
+type InstancesService struct {
+ s *Service
+}
+
+func NewLicensesService(s *Service) *LicensesService {
+ rs := &LicensesService{s: s}
+ return rs
+}
+
+type LicensesService struct {
+ s *Service
+}
+
+func NewMachineTypesService(s *Service) *MachineTypesService {
+ rs := &MachineTypesService{s: s}
+ return rs
+}
+
+type MachineTypesService struct {
+ s *Service
+}
+
+func NewNetworksService(s *Service) *NetworksService {
+ rs := &NetworksService{s: s}
+ return rs
+}
+
+type NetworksService struct {
+ s *Service
+}
+
+func NewProjectsService(s *Service) *ProjectsService {
+ rs := &ProjectsService{s: s}
+ return rs
+}
+
+type ProjectsService struct {
+ s *Service
+}
+
+func NewRegionOperationsService(s *Service) *RegionOperationsService {
+ rs := &RegionOperationsService{s: s}
+ return rs
+}
+
+type RegionOperationsService struct {
+ s *Service
+}
+
+func NewRegionsService(s *Service) *RegionsService {
+ rs := &RegionsService{s: s}
+ return rs
+}
+
+type RegionsService struct {
+ s *Service
+}
+
+func NewRoutesService(s *Service) *RoutesService {
+ rs := &RoutesService{s: s}
+ return rs
+}
+
+type RoutesService struct {
+ s *Service
+}
+
+func NewSnapshotsService(s *Service) *SnapshotsService {
+ rs := &SnapshotsService{s: s}
+ return rs
+}
+
+type SnapshotsService struct {
+ s *Service
+}
+
+func NewTargetHttpProxiesService(s *Service) *TargetHttpProxiesService {
+ rs := &TargetHttpProxiesService{s: s}
+ return rs
+}
+
+type TargetHttpProxiesService struct {
+ s *Service
+}
+
+func NewTargetInstancesService(s *Service) *TargetInstancesService {
+ rs := &TargetInstancesService{s: s}
+ return rs
+}
+
+type TargetInstancesService struct {
+ s *Service
+}
+
+func NewTargetPoolsService(s *Service) *TargetPoolsService {
+ rs := &TargetPoolsService{s: s}
+ return rs
+}
+
+type TargetPoolsService struct {
+ s *Service
+}
+
+func NewUrlMapsService(s *Service) *UrlMapsService {
+ rs := &UrlMapsService{s: s}
+ return rs
+}
+
+type UrlMapsService struct {
+ s *Service
+}
+
+func NewZoneOperationsService(s *Service) *ZoneOperationsService {
+ rs := &ZoneOperationsService{s: s}
+ return rs
+}
+
+type ZoneOperationsService struct {
+ s *Service
+}
+
+func NewZonesService(s *Service) *ZonesService {
+ rs := &ZonesService{s: s}
+ return rs
+}
+
+type ZonesService struct {
+ s *Service
+}
+
+type AccessConfig struct {
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of this access configuration.
+ Name string `json:"name,omitempty"`
+
+ // NatIP: An external IP address associated with this instance. Specify
+ // an unused static IP address available to the project. If not
+ // specified, the external IP will be drawn from a shared ephemeral
+ // pool.
+ NatIP string `json:"natIP,omitempty"`
+
+ // Type: Type of configuration. Must be set to "ONE_TO_ONE_NAT". This
+ // configures port-for-port NAT to the internet.
+ Type string `json:"type,omitempty"`
+}
+
+type Address struct {
+ // Address: The IP address represented by this resource.
+ Address string `json:"address,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Region: URL of the region where the regional address resides (output
+ // only). This field is not applicable to global addresses.
+ Region string `json:"region,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Status: The status of the address (output only).
+ Status string `json:"status,omitempty"`
+
+ // Users: The resources that are using this address resource.
+ Users []string `json:"users,omitempty"`
+}
+
+type AddressAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped address lists.
+ Items map[string]AddressesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type AddressList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The address resources.
+ Items []*Address `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type AddressesScopedList struct {
+ // Addresses: List of addresses contained in this scope.
+ Addresses []*Address `json:"addresses,omitempty"`
+
+ // Warning: Informational warning which replaces the list of addresses
+ // when the list is empty.
+ Warning *AddressesScopedListWarning `json:"warning,omitempty"`
+}
+
+type AddressesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*AddressesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type AddressesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type AttachedDisk struct {
+ // AutoDelete: Whether the disk will be auto-deleted when the instance
+ // is deleted (but not when the disk is detached from the instance).
+ AutoDelete bool `json:"autoDelete,omitempty"`
+
+ // Boot: Indicates that this is a boot disk. VM will use the first
+ // partition of the disk for its root filesystem.
+ Boot bool `json:"boot,omitempty"`
+
+ // DeviceName: Persistent disk only; must be unique within the instance
+ // when specified. This represents a unique device name that is
+ // reflected into the /dev/ tree of a Linux operating system running
+ // within the instance. If not specified, a default will be chosen by
+ // the system.
+ DeviceName string `json:"deviceName,omitempty"`
+
+ // Index: A zero-based index to assign to this disk, where 0 is reserved
+ // for the boot disk. If not specified, the server will choose an
+ // appropriate value (output only).
+ Index int64 `json:"index,omitempty"`
+
+ // InitializeParams: Initialization parameters.
+ InitializeParams *AttachedDiskInitializeParams `json:"initializeParams,omitempty"`
+
+ Interface string `json:"interface,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Licenses: Public visible licenses.
+ Licenses []string `json:"licenses,omitempty"`
+
+ // Mode: The mode in which to attach this disk, either "READ_WRITE" or
+ // "READ_ONLY".
+ Mode string `json:"mode,omitempty"`
+
+ // Source: Persistent disk only; the URL of the persistent disk
+ // resource.
+ Source string `json:"source,omitempty"`
+
+ // Type: Type of the disk, either "SCRATCH" or "PERSISTENT". Note that
+ // persistent disks must be created before you can specify them here.
+ Type string `json:"type,omitempty"`
+}
+
+type AttachedDiskInitializeParams struct {
+ // DiskName: Name of the disk (when not provided defaults to the name of
+ // the instance).
+ DiskName string `json:"diskName,omitempty"`
+
+ // DiskSizeGb: Size of the disk in base-2 GB.
+ DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
+
+ // DiskType: URL of the disk type resource describing which disk type to
+ // use to create the disk; provided by the client when the disk is
+ // created.
+ DiskType string `json:"diskType,omitempty"`
+
+ // SourceImage: The source image used to create this disk.
+ SourceImage string `json:"sourceImage,omitempty"`
+}
+
+type Backend struct {
+ // BalancingMode: The balancing mode of this backend, default is
+ // UTILIZATION.
+ BalancingMode string `json:"balancingMode,omitempty"`
+
+ // CapacityScaler: The multiplier (a value between 0 and 1e6) of the max
+ // capacity (CPU or RPS, depending on 'balancingMode') the group should
+ // serve up to. 0 means the group is totally drained. Default value is
+ // 1. Valid range is [0, 1e6].
+ CapacityScaler float64 `json:"capacityScaler,omitempty"`
+
+ // Description: An optional textual description of the resource, which
+ // is provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Group: URL of a zonal Cloud Resource View resource. This resource
+ // view defines the list of instances that serve traffic. Member virtual
+ // machine instances from each resource view must live in the same zone
+ // as the resource view itself. No two backends in a backend service are
+ // allowed to use same Resource View resource.
+ Group string `json:"group,omitempty"`
+
+ // MaxRate: The max RPS of the group. Can be used with either balancing
+ // mode, but required if RATE mode. For RATE mode, either maxRate or
+ // maxRatePerInstance must be set.
+ MaxRate int64 `json:"maxRate,omitempty"`
+
+ // MaxRatePerInstance: The max RPS that a single backed instance can
+ // handle. This is used to calculate the capacity of the group. Can be
+ // used in either balancing mode. For RATE mode, either maxRate or
+ // maxRatePerInstance must be set.
+ MaxRatePerInstance float64 `json:"maxRatePerInstance,omitempty"`
+
+ // MaxUtilization: Used when 'balancingMode' is UTILIZATION. This ratio
+ // defines the CPU utilization target for the group. The default is 0.8.
+ // Valid range is [0, 1].
+ MaxUtilization float64 `json:"maxUtilization,omitempty"`
+}
+
+type BackendService struct {
+ // Backends: The list of backends that serve this BackendService.
+ Backends []*Backend `json:"backends,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Fingerprint: Fingerprint of this resource. A hash of the contents
+ // stored in this object. This field is used in optimistic locking. This
+ // field will be ignored when inserting a BackendService. An up-to-date
+ // fingerprint must be provided in order to update the BackendService.
+ Fingerprint string `json:"fingerprint,omitempty"`
+
+ // HealthChecks: The list of URLs to the HttpHealthCheck resource for
+ // health checking this BackendService. Currently at most one health
+ // check can be specified, and a health check is required.
+ HealthChecks []string `json:"healthChecks,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Port: Deprecated in favor of port_name. The TCP port to connect on
+ // the backend. The default value is 80.
+ Port int64 `json:"port,omitempty"`
+
+ // PortName: Name of backend port. The same name should appear in the
+ // resource views referenced by this service. Required.
+ PortName string `json:"portName,omitempty"`
+
+ Protocol string `json:"protocol,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // TimeoutSec: How many seconds to wait for the backend before
+ // considering it a failed request. Default is 30 seconds.
+ TimeoutSec int64 `json:"timeoutSec,omitempty"`
+}
+
+type BackendServiceGroupHealth struct {
+ HealthStatus []*HealthStatus `json:"healthStatus,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+}
+
+type BackendServiceList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The BackendService resources.
+ Items []*BackendService `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DeprecationStatus struct {
+ // Deleted: An optional RFC3339 timestamp on or after which the
+ // deprecation state of this resource will be changed to DELETED.
+ Deleted string `json:"deleted,omitempty"`
+
+ // Deprecated: An optional RFC3339 timestamp on or after which the
+ // deprecation state of this resource will be changed to DEPRECATED.
+ Deprecated string `json:"deprecated,omitempty"`
+
+ // Obsolete: An optional RFC3339 timestamp on or after which the
+ // deprecation state of this resource will be changed to OBSOLETE.
+ Obsolete string `json:"obsolete,omitempty"`
+
+ // Replacement: A URL of the suggested replacement for the deprecated
+ // resource. The deprecated resource and its replacement must be
+ // resources of the same kind.
+ Replacement string `json:"replacement,omitempty"`
+
+ // State: The deprecation state. Can be "DEPRECATED", "OBSOLETE", or
+ // "DELETED". Operations which create a new resource using a
+ // "DEPRECATED" resource will return successfully, but with a warning
+ // indicating the deprecated resource and recommending its replacement.
+ // New uses of "OBSOLETE" or "DELETED" resources will result in an
+ // error.
+ State string `json:"state,omitempty"`
+}
+
+type Disk struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Licenses: Public visible licenses.
+ Licenses []string `json:"licenses,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Options: Internal use only.
+ Options string `json:"options,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // SizeGb: Size of the persistent disk, specified in GB. This parameter
+ // is optional when creating a disk from a disk image or a snapshot,
+ // otherwise it is required.
+ SizeGb int64 `json:"sizeGb,omitempty,string"`
+
+ // SourceImage: The source image used to create this disk.
+ SourceImage string `json:"sourceImage,omitempty"`
+
+ // SourceImageId: The 'id' value of the image used to create this disk.
+ // This value may be used to determine whether the disk was created from
+ // the current or a previous instance of a given image.
+ SourceImageId string `json:"sourceImageId,omitempty"`
+
+ // SourceSnapshot: The source snapshot used to create this disk.
+ SourceSnapshot string `json:"sourceSnapshot,omitempty"`
+
+ // SourceSnapshotId: The 'id' value of the snapshot used to create this
+ // disk. This value may be used to determine whether the disk was
+ // created from the current or a previous instance of a given disk
+ // snapshot.
+ SourceSnapshotId string `json:"sourceSnapshotId,omitempty"`
+
+ // Status: The status of disk creation (output only).
+ Status string `json:"status,omitempty"`
+
+ // Type: URL of the disk type resource describing which disk type to use
+ // to create the disk; provided by the client when the disk is created.
+ Type string `json:"type,omitempty"`
+
+ // Zone: URL of the zone where the disk resides (output only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type DiskAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped disk lists.
+ Items map[string]DisksScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DiskList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The persistent disk resources.
+ Items []*Disk `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DiskType struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // DefaultDiskSizeGb: Server defined default disk size in gb (output
+ // only).
+ DefaultDiskSizeGb int64 `json:"defaultDiskSizeGb,omitempty,string"`
+
+ // Deprecated: The deprecation status associated with this disk type.
+ Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
+
+ // Description: An optional textual description of the resource.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ValidDiskSize: An optional textual descroption of the valid disk
+ // size, e.g., "10GB-10TB".
+ ValidDiskSize string `json:"validDiskSize,omitempty"`
+
+ // Zone: Url of the zone where the disk type resides (output only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type DiskTypeAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped disk type lists.
+ Items map[string]DiskTypesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DiskTypeList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The disk type resources.
+ Items []*DiskType `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type DiskTypesScopedList struct {
+ // DiskTypes: List of disk types contained in this scope.
+ DiskTypes []*DiskType `json:"diskTypes,omitempty"`
+
+ // Warning: Informational warning which replaces the list of disk types
+ // when the list is empty.
+ Warning *DiskTypesScopedListWarning `json:"warning,omitempty"`
+}
+
+type DiskTypesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*DiskTypesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type DiskTypesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type DisksScopedList struct {
+ // Disks: List of disks contained in this scope.
+ Disks []*Disk `json:"disks,omitempty"`
+
+ // Warning: Informational warning which replaces the list of disks when
+ // the list is empty.
+ Warning *DisksScopedListWarning `json:"warning,omitempty"`
+}
+
+type DisksScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*DisksScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type DisksScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type Firewall struct {
+ // Allowed: The list of rules specified by this firewall. Each rule
+ // specifies a protocol and port-range tuple that describes a permitted
+ // connection.
+ Allowed []*FirewallAllowed `json:"allowed,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Network: URL of the network to which this firewall is applied;
+ // provided by the client when the firewall is created.
+ Network string `json:"network,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // SourceRanges: A list of IP address blocks expressed in CIDR format
+ // which this rule applies to. One or both of sourceRanges and
+ // sourceTags may be set; an inbound connection is allowed if either the
+ // range or the tag of the source matches.
+ SourceRanges []string `json:"sourceRanges,omitempty"`
+
+ // SourceTags: A list of instance tags which this rule applies to. One
+ // or both of sourceRanges and sourceTags may be set; an inbound
+ // connection is allowed if either the range or the tag of the source
+ // matches.
+ SourceTags []string `json:"sourceTags,omitempty"`
+
+ // TargetTags: A list of instance tags indicating sets of instances
+ // located on network which may make network connections as specified in
+ // allowed. If no targetTags are specified, the firewall rule applies to
+ // all instances on the specified network.
+ TargetTags []string `json:"targetTags,omitempty"`
+}
+
+type FirewallAllowed struct {
+ // IPProtocol: Required; this is the IP protocol that is allowed for
+ // this rule. This can either be one of the following well known
+ // protocol strings ["tcp", "udp", "icmp", "esp", "ah", "sctp"], or the
+ // IP protocol number.
+ IPProtocol string `json:"IPProtocol,omitempty"`
+
+ // Ports: An optional list of ports which are allowed. It is an error to
+ // specify this for any protocol that isn't UDP or TCP. Each entry must
+ // be either an integer or a range. If not specified, connections
+ // through any port are allowed.
+ //
+ // Example inputs include: ["22"],
+ // ["80","443"] and ["12345-12349"].
+ Ports []string `json:"ports,omitempty"`
+}
+
+type FirewallList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The firewall resources.
+ Items []*Firewall `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type ForwardingRule struct {
+ // IPAddress: Value of the reserved IP address that this forwarding rule
+ // is serving on behalf of. For global forwarding rules, the address
+ // must be a global IP; for regional forwarding rules, the address must
+ // live in the same region as the forwarding rule. If left empty
+ // (default value), an ephemeral IP from the same scope (global or
+ // regional) will be assigned.
+ IPAddress string `json:"IPAddress,omitempty"`
+
+ // IPProtocol: The IP protocol to which this rule applies, valid options
+ // are 'TCP', 'UDP', 'ESP', 'AH' or 'SCTP'.
+ IPProtocol string `json:"IPProtocol,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // PortRange: Applicable only when 'IPProtocol' is 'TCP', 'UDP' or
+ // 'SCTP', only packets addressed to ports in the specified range will
+ // be forwarded to 'target'. If 'portRange' is left empty (default
+ // value), all ports are forwarded. Forwarding rules with the same
+ // [IPAddress, IPProtocol] pair must have disjoint port ranges.
+ PortRange string `json:"portRange,omitempty"`
+
+ // Region: URL of the region where the regional forwarding rule resides
+ // (output only). This field is not applicable to global forwarding
+ // rules.
+ Region string `json:"region,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Target: The URL of the target resource to receive the matched
+ // traffic. For regional forwarding rules, this target must live in the
+ // same region as the forwarding rule. For global forwarding rules, this
+ // target must be a global TargetHttpProxy resource.
+ Target string `json:"target,omitempty"`
+}
+
+type ForwardingRuleAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped forwarding rule lists.
+ Items map[string]ForwardingRulesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type ForwardingRuleList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The ForwardingRule resources.
+ Items []*ForwardingRule `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type ForwardingRulesScopedList struct {
+ // ForwardingRules: List of forwarding rules contained in this scope.
+ ForwardingRules []*ForwardingRule `json:"forwardingRules,omitempty"`
+
+ // Warning: Informational warning which replaces the list of forwarding
+ // rules when the list is empty.
+ Warning *ForwardingRulesScopedListWarning `json:"warning,omitempty"`
+}
+
+type ForwardingRulesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*ForwardingRulesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type ForwardingRulesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type HealthCheckReference struct {
+ HealthCheck string `json:"healthCheck,omitempty"`
+}
+
+type HealthStatus struct {
+ // HealthState: Health state of the instance.
+ HealthState string `json:"healthState,omitempty"`
+
+ // Instance: URL of the instance resource.
+ Instance string `json:"instance,omitempty"`
+
+ // IpAddress: The IP address represented by this resource.
+ IpAddress string `json:"ipAddress,omitempty"`
+
+ // Port: The port on the instance.
+ Port int64 `json:"port,omitempty"`
+}
+
+type HostRule struct {
+ Description string `json:"description,omitempty"`
+
+ // Hosts: The list of host patterns to match. They must be valid
+ // hostnames except that they may start with *. or *-. The * acts like a
+ // glob and will match any string of atoms (separated by .s and -s) to
+ // the left.
+ Hosts []string `json:"hosts,omitempty"`
+
+ // PathMatcher: The name of the PathMatcher to match the path portion of
+ // the URL, if the this HostRule matches the URL's host portion.
+ PathMatcher string `json:"pathMatcher,omitempty"`
+}
+
+type HttpHealthCheck struct {
+ // CheckIntervalSec: How often (in seconds) to send a health check. The
+ // default value is 5 seconds.
+ CheckIntervalSec int64 `json:"checkIntervalSec,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // HealthyThreshold: A so-far unhealthy VM will be marked healthy after
+ // this many consecutive successes. The default value is 2.
+ HealthyThreshold int64 `json:"healthyThreshold,omitempty"`
+
+ // Host: The value of the host header in the HTTP health check request.
+ // If left empty (default value), the public IP on behalf of which this
+ // health check is performed will be used.
+ Host string `json:"host,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Port: The TCP port number for the HTTP health check request. The
+ // default value is 80.
+ Port int64 `json:"port,omitempty"`
+
+ // RequestPath: The request path of the HTTP health check request. The
+ // default value is "/".
+ RequestPath string `json:"requestPath,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // TimeoutSec: How long (in seconds) to wait before claiming failure.
+ // The default value is 5 seconds.
+ TimeoutSec int64 `json:"timeoutSec,omitempty"`
+
+ // UnhealthyThreshold: A so-far healthy VM will be marked unhealthy
+ // after this many consecutive failures. The default value is 2.
+ UnhealthyThreshold int64 `json:"unhealthyThreshold,omitempty"`
+}
+
+type HttpHealthCheckList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The HttpHealthCheck resources.
+ Items []*HttpHealthCheck `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type Image struct {
+ // ArchiveSizeBytes: Size of the image tar.gz archive stored in Google
+ // Cloud Storage (in bytes).
+ ArchiveSizeBytes int64 `json:"archiveSizeBytes,omitempty,string"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Deprecated: The deprecation status associated with this image.
+ Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
+
+ // Description: Textual description of the resource; provided by the
+ // client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // DiskSizeGb: Size of the image when restored onto a disk (in GiB).
+ DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Licenses: Public visible licenses.
+ Licenses []string `json:"licenses,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // RawDisk: The raw disk image parameters.
+ RawDisk *ImageRawDisk `json:"rawDisk,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // SourceDisk: The source disk used to create this image.
+ SourceDisk string `json:"sourceDisk,omitempty"`
+
+ // SourceDiskId: The 'id' value of the disk used to create this image.
+ // This value may be used to determine whether the image was taken from
+ // the current or a previous instance of a given disk name.
+ SourceDiskId string `json:"sourceDiskId,omitempty"`
+
+ // SourceType: Must be "RAW"; provided by the client when the disk image
+ // is created.
+ SourceType string `json:"sourceType,omitempty"`
+
+ // Status: Status of the image (output only). It will be one of the
+ // following READY - after image has been successfully created and is
+ // ready for use FAILED - if creating the image fails for some reason
+ // PENDING - the image creation is in progress An image can be used to
+ // create other resources suck as instances only after the image has
+ // been successfully created and the status is set to READY.
+ Status string `json:"status,omitempty"`
+}
+
+type ImageRawDisk struct {
+ // ContainerType: The format used to encode and transmit the block
+ // device. Should be TAR. This is just a container and transmission
+ // format and not a runtime format. Provided by the client when the disk
+ // image is created.
+ ContainerType string `json:"containerType,omitempty"`
+
+ // Sha1Checksum: An optional SHA1 checksum of the disk image before
+ // unpackaging; provided by the client when the disk image is created.
+ Sha1Checksum string `json:"sha1Checksum,omitempty"`
+
+ // Source: The full Google Cloud Storage URL where the disk image is
+ // stored; provided by the client when the disk image is created.
+ Source string `json:"source,omitempty"`
+}
+
+type ImageList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The disk image resources.
+ Items []*Image `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type Instance struct {
+ // CanIpForward: Allows this instance to send packets with source IP
+ // addresses other than its own and receive packets with destination IP
+ // addresses other than its own. If this instance will be used as an IP
+ // gateway or it will be set as the next-hop in a Route resource, say
+ // true. If unsure, leave this set to false.
+ CanIpForward bool `json:"canIpForward,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Disks: Array of disks associated with this instance. Persistent disks
+ // must be created before you can assign them.
+ Disks []*AttachedDisk `json:"disks,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // MachineType: URL of the machine type resource describing which
+ // machine type to use to host the instance; provided by the client when
+ // the instance is created.
+ MachineType string `json:"machineType,omitempty"`
+
+ // Metadata: Metadata key/value pairs assigned to this instance.
+ // Consists of custom metadata or predefined keys; see Instance
+ // documentation for more information.
+ Metadata *Metadata `json:"metadata,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // NetworkInterfaces: Array of configurations for this interface. This
+ // specifies how this interface is configured to interact with other
+ // network services, such as connecting to the internet. Currently,
+ // ONE_TO_ONE_NAT is the only access config supported. If there are no
+ // accessConfigs specified, then this instance will have no external
+ // internet access.
+ NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
+
+ // Scheduling: Scheduling options for this instance.
+ Scheduling *Scheduling `json:"scheduling,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // ServiceAccounts: A list of service accounts each with specified
+ // scopes, for which access tokens are to be made available to the
+ // instance through metadata queries.
+ ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"`
+
+ // Status: Instance status. One of the following values: "PROVISIONING",
+ // "STAGING", "RUNNING", "STOPPING", "STOPPED", "TERMINATED" (output
+ // only).
+ Status string `json:"status,omitempty"`
+
+ // StatusMessage: An optional, human-readable explanation of the status
+ // (output only).
+ StatusMessage string `json:"statusMessage,omitempty"`
+
+ // Tags: A list of tags to be applied to this instance. Used to identify
+ // valid sources or targets for network firewalls. Provided by the
+ // client on instance creation. The tags can be later modified by the
+ // setTags method. Each tag within the list must comply with RFC1035.
+ Tags *Tags `json:"tags,omitempty"`
+
+ // Zone: URL of the zone where the instance resides (output only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type InstanceAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped instance lists.
+ Items map[string]InstancesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type InstanceList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A list of instance resources.
+ Items []*Instance `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type InstanceProperties struct {
+ // CanIpForward: Allows instances created based on this template to send
+ // packets with source IP addresses other than their own and receive
+ // packets with destination IP addresses other than their own. If these
+ // instances will be used as an IP gateway or it will be set as the
+ // next-hop in a Route resource, say true. If unsure, leave this set to
+ // false.
+ CanIpForward bool `json:"canIpForward,omitempty"`
+
+ // Description: An optional textual description for the instances
+ // created based on the instance template resource; provided by the
+ // client when the template is created.
+ Description string `json:"description,omitempty"`
+
+ // Disks: Array of disks associated with instance created based on this
+ // template.
+ Disks []*AttachedDisk `json:"disks,omitempty"`
+
+ // MachineType: Name of the machine type resource describing which
+ // machine type to use to host the instances created based on this
+ // template; provided by the client when the instance template is
+ // created.
+ MachineType string `json:"machineType,omitempty"`
+
+ // Metadata: Metadata key/value pairs assigned to instances created
+ // based on this template. Consists of custom metadata or predefined
+ // keys; see Instance documentation for more information.
+ Metadata *Metadata `json:"metadata,omitempty"`
+
+ // NetworkInterfaces: Array of configurations for this interface. This
+ // specifies how this interface is configured to interact with other
+ // network services, such as connecting to the internet. Currently,
+ // ONE_TO_ONE_NAT is the only access config supported. If there are no
+ // accessConfigs specified, then this instances created based based on
+ // this template will have no external internet access.
+ NetworkInterfaces []*NetworkInterface `json:"networkInterfaces,omitempty"`
+
+ // Scheduling: Scheduling options for the instances created based on
+ // this template.
+ Scheduling *Scheduling `json:"scheduling,omitempty"`
+
+ // ServiceAccounts: A list of service accounts each with specified
+ // scopes, for which access tokens are to be made available to the
+ // instances created based on this template, through metadata queries.
+ ServiceAccounts []*ServiceAccount `json:"serviceAccounts,omitempty"`
+
+ // Tags: A list of tags to be applied to the instances created based on
+ // this template used to identify valid sources or targets for network
+ // firewalls. Provided by the client on instance creation. The tags can
+ // be later modified by the setTags method. Each tag within the list
+ // must comply with RFC1035.
+ Tags *Tags `json:"tags,omitempty"`
+}
+
+type InstanceReference struct {
+ Instance string `json:"instance,omitempty"`
+}
+
+type InstanceTemplate struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the instance template
+ // resource; provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the instance template resource; provided by the client
+ // when the resource is created. The name must be 1-63 characters long,
+ // and comply with RFC1035
+ Name string `json:"name,omitempty"`
+
+ // Properties: The instance properties portion of this instance template
+ // resource.
+ Properties *InstanceProperties `json:"properties,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type InstanceTemplateList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A list of instance template resources.
+ Items []*InstanceTemplate `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type InstancesScopedList struct {
+ // Instances: List of instances contained in this scope.
+ Instances []*Instance `json:"instances,omitempty"`
+
+ // Warning: Informational warning which replaces the list of instances
+ // when the list is empty.
+ Warning *InstancesScopedListWarning `json:"warning,omitempty"`
+}
+
+type InstancesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*InstancesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type InstancesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type License struct {
+ // ChargesUseFee: If true, the customer will be charged license fee for
+ // running software that contains this license on an instance.
+ ChargesUseFee bool `json:"chargesUseFee,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type MachineType struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Deprecated: The deprecation status associated with this machine type.
+ Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
+
+ // Description: An optional textual description of the resource.
+ Description string `json:"description,omitempty"`
+
+ // GuestCpus: Count of CPUs exposed to the instance.
+ GuestCpus int64 `json:"guestCpus,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // ImageSpaceGb: Space allotted for the image, defined in GB.
+ ImageSpaceGb int64 `json:"imageSpaceGb,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // MaximumPersistentDisks: Maximum persistent disks allowed.
+ MaximumPersistentDisks int64 `json:"maximumPersistentDisks,omitempty"`
+
+ // MaximumPersistentDisksSizeGb: Maximum total persistent disks size
+ // (GB) allowed.
+ MaximumPersistentDisksSizeGb int64 `json:"maximumPersistentDisksSizeGb,omitempty,string"`
+
+ // MemoryMb: Physical memory assigned to the instance, defined in MB.
+ MemoryMb int64 `json:"memoryMb,omitempty"`
+
+ // Name: Name of the resource.
+ Name string `json:"name,omitempty"`
+
+ // ScratchDisks: List of extended scratch disks assigned to the
+ // instance.
+ ScratchDisks []*MachineTypeScratchDisks `json:"scratchDisks,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Zone: Url of the zone where the machine type resides (output only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type MachineTypeScratchDisks struct {
+ // DiskGb: Size of the scratch disk, defined in GB.
+ DiskGb int64 `json:"diskGb,omitempty"`
+}
+
+type MachineTypeAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped machine type lists.
+ Items map[string]MachineTypesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type MachineTypeList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The machine type resources.
+ Items []*MachineType `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type MachineTypesScopedList struct {
+ // MachineTypes: List of machine types contained in this scope.
+ MachineTypes []*MachineType `json:"machineTypes,omitempty"`
+
+ // Warning: Informational warning which replaces the list of machine
+ // types when the list is empty.
+ Warning *MachineTypesScopedListWarning `json:"warning,omitempty"`
+}
+
+type MachineTypesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*MachineTypesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type MachineTypesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type Metadata struct {
+ // Fingerprint: Fingerprint of this resource. A hash of the metadata's
+ // contents. This field is used for optimistic locking. An up-to-date
+ // metadata fingerprint must be provided in order to modify metadata.
+ Fingerprint string `json:"fingerprint,omitempty"`
+
+ // Items: Array of key/value pairs. The total size of all keys and
+ // values must be less than 512 KB.
+ Items []*MetadataItems `json:"items,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+}
+
+type MetadataItems struct {
+ // Key: Key for the metadata entry. Keys must conform to the following
+ // regexp: [a-zA-Z0-9-_]+, and be less than 128 bytes in length. This is
+ // reflected as part of a URL in the metadata server. Additionally, to
+ // avoid ambiguity, keys must not conflict with any other metadata keys
+ // for the project.
+ Key string `json:"key,omitempty"`
+
+ // Value: Value for the metadata entry. These are free-form strings, and
+ // only have meaning as interpreted by the image running in the
+ // instance. The only restriction placed on values is that their size
+ // must be less than or equal to 32768 bytes.
+ Value string `json:"value,omitempty"`
+}
+
+type Network struct {
+ // IPv4Range: Required; The range of internal addresses that are legal
+ // on this network. This range is a CIDR specification, for example:
+ // 192.168.0.0/16. Provided by the client when the network is created.
+ IPv4Range string `json:"IPv4Range,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // GatewayIPv4: An optional address that is used for default routing to
+ // other networks. This must be within the range specified by IPv4Range,
+ // and is typically the first usable address in that range. If not
+ // specified, the default value is the first usable address in
+ // IPv4Range.
+ GatewayIPv4 string `json:"gatewayIPv4,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type NetworkInterface struct {
+ // AccessConfigs: Array of configurations for this interface. This
+ // specifies how this interface is configured to interact with other
+ // network services, such as connecting to the internet. Currently,
+ // ONE_TO_ONE_NAT is the only access config supported. If there are no
+ // accessConfigs specified, then this instance will have no external
+ // internet access.
+ AccessConfigs []*AccessConfig `json:"accessConfigs,omitempty"`
+
+ // Name: Name of the network interface, determined by the server; for
+ // network devices, these are e.g. eth0, eth1, etc. (output only).
+ Name string `json:"name,omitempty"`
+
+ // Network: URL of the network resource attached to this interface.
+ Network string `json:"network,omitempty"`
+
+ // NetworkIP: An optional IPV4 internal network address assigned to the
+ // instance for this network interface (output only).
+ NetworkIP string `json:"networkIP,omitempty"`
+}
+
+type NetworkList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The network resources.
+ Items []*Network `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type Operation struct {
+ // ClientOperationId: An optional identifier specified by the client
+ // when the mutation was initiated. Must be unique for all operation
+ // resources in the project (output only).
+ ClientOperationId string `json:"clientOperationId,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // EndTime: The time that this operation was completed. This is in RFC
+ // 3339 format (output only).
+ EndTime string `json:"endTime,omitempty"`
+
+ // Error: If errors occurred during processing of this operation, this
+ // field will be populated (output only).
+ Error *OperationError `json:"error,omitempty"`
+
+ // HttpErrorMessage: If operation fails, the HTTP error message
+ // returned, e.g. NOT FOUND. (output only).
+ HttpErrorMessage string `json:"httpErrorMessage,omitempty"`
+
+ // HttpErrorStatusCode: If operation fails, the HTTP error status code
+ // returned, e.g. 404. (output only).
+ HttpErrorStatusCode int64 `json:"httpErrorStatusCode,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // InsertTime: The time that this operation was requested. This is in
+ // RFC 3339 format (output only).
+ InsertTime string `json:"insertTime,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource (output only).
+ Name string `json:"name,omitempty"`
+
+ // OperationType: Type of the operation. Examples include "insert",
+ // "update", and "delete" (output only).
+ OperationType string `json:"operationType,omitempty"`
+
+ // Progress: An optional progress indicator that ranges from 0 to 100.
+ // There is no requirement that this be linear or support any
+ // granularity of operations. This should not be used to guess at when
+ // the operation will be complete. This number should be monotonically
+ // increasing as the operation progresses (output only).
+ Progress int64 `json:"progress,omitempty"`
+
+ // Region: URL of the region where the operation resides (output only).
+ Region string `json:"region,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // StartTime: The time that this operation was started by the server.
+ // This is in RFC 3339 format (output only).
+ StartTime string `json:"startTime,omitempty"`
+
+ // Status: Status of the operation. Can be one of the following:
+ // "PENDING", "RUNNING", or "DONE" (output only).
+ Status string `json:"status,omitempty"`
+
+ // StatusMessage: An optional textual description of the current status
+ // of the operation (output only).
+ StatusMessage string `json:"statusMessage,omitempty"`
+
+ // TargetId: Unique target id which identifies a particular incarnation
+ // of the target (output only).
+ TargetId uint64 `json:"targetId,omitempty,string"`
+
+ // TargetLink: URL of the resource the operation is mutating (output
+ // only).
+ TargetLink string `json:"targetLink,omitempty"`
+
+ // User: User who requested the operation, for example
+ // "user@example.com" (output only).
+ User string `json:"user,omitempty"`
+
+ // Warnings: If warning messages generated during processing of this
+ // operation, this field will be populated (output only).
+ Warnings []*OperationWarnings `json:"warnings,omitempty"`
+
+ // Zone: URL of the zone where the operation resides (output only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type OperationError struct {
+ // Errors: The array of errors encountered while processing this
+ // operation.
+ Errors []*OperationErrorErrors `json:"errors,omitempty"`
+}
+
+type OperationErrorErrors struct {
+ // Code: The error type identifier for this error.
+ Code string `json:"code,omitempty"`
+
+ // Location: Indicates the field in the request which caused the error.
+ // This property is optional.
+ Location string `json:"location,omitempty"`
+
+ // Message: An optional, human-readable error message.
+ Message string `json:"message,omitempty"`
+}
+
+type OperationWarnings struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*OperationWarningsData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type OperationWarningsData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type OperationAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped operation lists.
+ Items map[string]OperationsScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type OperationList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The operation resources.
+ Items []*Operation `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type OperationsScopedList struct {
+ // Operations: List of operations contained in this scope.
+ Operations []*Operation `json:"operations,omitempty"`
+
+ // Warning: Informational warning which replaces the list of operations
+ // when the list is empty.
+ Warning *OperationsScopedListWarning `json:"warning,omitempty"`
+}
+
+type OperationsScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*OperationsScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type OperationsScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type PathMatcher struct {
+ // DefaultService: The URL to the BackendService resource. This will be
+ // used if none of the 'pathRules' defined by this PathMatcher is met by
+ // the URL's path portion.
+ DefaultService string `json:"defaultService,omitempty"`
+
+ Description string `json:"description,omitempty"`
+
+ // Name: The name to which this PathMatcher is referred by the HostRule.
+ Name string `json:"name,omitempty"`
+
+ // PathRules: The list of path rules.
+ PathRules []*PathRule `json:"pathRules,omitempty"`
+}
+
+type PathRule struct {
+ // Paths: The list of path patterns to match. Each must start with / and
+ // the only place a * is allowed is at the end following a /. The string
+ // fed to the path matcher does not include any text after the first ?
+ // or #, and those chars are not allowed here.
+ Paths []string `json:"paths,omitempty"`
+
+ // Service: The URL of the BackendService resource if this rule is
+ // matched.
+ Service string `json:"service,omitempty"`
+}
+
+type Project struct {
+ // CommonInstanceMetadata: Metadata key/value pairs available to all
+ // instances contained in this project.
+ CommonInstanceMetadata *Metadata `json:"commonInstanceMetadata,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource.
+ Name string `json:"name,omitempty"`
+
+ // Quotas: Quotas assigned to this project.
+ Quotas []*Quota `json:"quotas,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // UsageExportLocation: The location in Cloud Storage and naming method
+ // of the daily usage report.
+ UsageExportLocation *UsageExportLocation `json:"usageExportLocation,omitempty"`
+}
+
+type Quota struct {
+ // Limit: Quota limit for this metric.
+ Limit float64 `json:"limit,omitempty"`
+
+ // Metric: Name of the quota metric.
+ Metric string `json:"metric,omitempty"`
+
+ // Usage: Current usage of this metric.
+ Usage float64 `json:"usage,omitempty"`
+}
+
+type Region struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Deprecated: The deprecation status associated with this region.
+ Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
+
+ // Description: Textual description of the resource.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource.
+ Name string `json:"name,omitempty"`
+
+ // Quotas: Quotas assigned to this region.
+ Quotas []*Quota `json:"quotas,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Status: Status of the region, "UP" or "DOWN".
+ Status string `json:"status,omitempty"`
+
+ // Zones: A list of zones homed in this region, in the form of resource
+ // URLs.
+ Zones []string `json:"zones,omitempty"`
+}
+
+type RegionList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The region resources.
+ Items []*Region `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type ResourceGroupReference struct {
+ // Group: A URI referencing one of the resource views listed in the
+ // backend service.
+ Group string `json:"group,omitempty"`
+}
+
+type Route struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // DestRange: Which packets does this route apply to?
+ DestRange string `json:"destRange,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Network: URL of the network to which this route is applied; provided
+ // by the client when the route is created.
+ Network string `json:"network,omitempty"`
+
+ // NextHopGateway: The URL to a gateway that should handle matching
+ // packets.
+ NextHopGateway string `json:"nextHopGateway,omitempty"`
+
+ // NextHopInstance: The URL to an instance that should handle matching
+ // packets.
+ NextHopInstance string `json:"nextHopInstance,omitempty"`
+
+ // NextHopIp: The network IP address of an instance that should handle
+ // matching packets.
+ NextHopIp string `json:"nextHopIp,omitempty"`
+
+ // NextHopNetwork: The URL of the local network if it should handle
+ // matching packets.
+ NextHopNetwork string `json:"nextHopNetwork,omitempty"`
+
+ // Priority: Breaks ties between Routes of equal specificity. Routes
+ // with smaller values win when tied with routes with larger values.
+ Priority int64 `json:"priority,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Tags: A list of instance tags to which this route applies.
+ Tags []string `json:"tags,omitempty"`
+
+ // Warnings: If potential misconfigurations are detected for this route,
+ // this field will be populated with warning messages.
+ Warnings []*RouteWarnings `json:"warnings,omitempty"`
+}
+
+type RouteWarnings struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*RouteWarningsData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type RouteWarningsData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type RouteList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The route resources.
+ Items []*Route `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type Scheduling struct {
+ // AutomaticRestart: Whether the Instance should be automatically
+ // restarted whenever it is terminated by Compute Engine (not terminated
+ // by user).
+ AutomaticRestart bool `json:"automaticRestart,omitempty"`
+
+ // OnHostMaintenance: How the instance should behave when the host
+ // machine undergoes maintenance that may temporarily impact instance
+ // performance.
+ OnHostMaintenance string `json:"onHostMaintenance,omitempty"`
+}
+
+type SerialPortOutput struct {
+ // Contents: The contents of the console output.
+ Contents string `json:"contents,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type ServiceAccount struct {
+ // Email: Email address of the service account.
+ Email string `json:"email,omitempty"`
+
+ // Scopes: The list of scopes to be made available for this service
+ // account.
+ Scopes []string `json:"scopes,omitempty"`
+}
+
+type Snapshot struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // DiskSizeGb: Size of the persistent disk snapshot, specified in GB
+ // (output only).
+ DiskSizeGb int64 `json:"diskSizeGb,omitempty,string"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Licenses: Public visible licenses.
+ Licenses []string `json:"licenses,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // SourceDisk: The source disk used to create this snapshot.
+ SourceDisk string `json:"sourceDisk,omitempty"`
+
+ // SourceDiskId: The 'id' value of the disk used to create this
+ // snapshot. This value may be used to determine whether the snapshot
+ // was taken from the current or a previous instance of a given disk
+ // name.
+ SourceDiskId string `json:"sourceDiskId,omitempty"`
+
+ // Status: The status of the persistent disk snapshot (output only).
+ Status string `json:"status,omitempty"`
+
+ // StorageBytes: A size of the the storage used by the snapshot. As
+ // snapshots share storage this number is expected to change with
+ // snapshot creation/deletion.
+ StorageBytes int64 `json:"storageBytes,omitempty,string"`
+
+ // StorageBytesStatus: An indicator whether storageBytes is in a stable
+ // state, or it is being adjusted as a result of shared storage
+ // reallocation.
+ StorageBytesStatus string `json:"storageBytesStatus,omitempty"`
+}
+
+type SnapshotList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The persistent snapshot resources.
+ Items []*Snapshot `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type Tags struct {
+ // Fingerprint: Fingerprint of this resource. A hash of the tags stored
+ // in this object. This field is used optimistic locking. An up-to-date
+ // tags fingerprint must be provided in order to modify tags.
+ Fingerprint string `json:"fingerprint,omitempty"`
+
+ // Items: An array of tags. Each tag must be 1-63 characters long, and
+ // comply with RFC1035.
+ Items []string `json:"items,omitempty"`
+}
+
+type TargetHttpProxy struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // UrlMap: URL to the UrlMap resource that defines the mapping from URL
+ // to the BackendService.
+ UrlMap string `json:"urlMap,omitempty"`
+}
+
+type TargetHttpProxyList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The TargetHttpProxy resources.
+ Items []*TargetHttpProxy `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type TargetInstance struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Instance: The URL to the instance that terminates the relevant
+ // traffic.
+ Instance string `json:"instance,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // NatPolicy: NAT option controlling how IPs are NAT'ed to the VM.
+ // Currently only NO_NAT (default value) is supported.
+ NatPolicy string `json:"natPolicy,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Zone: URL of the zone where the target instance resides (output
+ // only).
+ Zone string `json:"zone,omitempty"`
+}
+
+type TargetInstanceAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped target instance lists.
+ Items map[string]TargetInstancesScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type TargetInstanceList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The TargetInstance resources.
+ Items []*TargetInstance `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type TargetInstancesScopedList struct {
+ // TargetInstances: List of target instances contained in this scope.
+ TargetInstances []*TargetInstance `json:"targetInstances,omitempty"`
+
+ // Warning: Informational warning which replaces the list of addresses
+ // when the list is empty.
+ Warning *TargetInstancesScopedListWarning `json:"warning,omitempty"`
+}
+
+type TargetInstancesScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*TargetInstancesScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type TargetInstancesScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type TargetPool struct {
+ // BackupPool: This field is applicable only when the containing target
+ // pool is serving a forwarding rule as the primary pool, and its
+ // 'failoverRatio' field is properly set to a value between [0,
+ // 1].
+ //
+ // 'backupPool' and 'failoverRatio' together define the fallback
+ // behavior of the primary target pool: if the ratio of the healthy VMs
+ // in the primary pool is at or below 'failoverRatio', traffic arriving
+ // at the load-balanced IP will be directed to the backup pool.
+ //
+ // In case
+ // where 'failoverRatio' and 'backupPool' are not set, or all the VMs in
+ // the backup pool are unhealthy, the traffic will be directed back to
+ // the primary pool in the "force" mode, where traffic will be spread to
+ // the healthy VMs with the best effort, or to all VMs when no VM is
+ // healthy.
+ BackupPool string `json:"backupPool,omitempty"`
+
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // FailoverRatio: This field is applicable only when the containing
+ // target pool is serving a forwarding rule as the primary pool (i.e.,
+ // not as a backup pool to some other target pool). The value of the
+ // field must be in [0, 1].
+ //
+ // If set, 'backupPool' must also be set. They
+ // together define the fallback behavior of the primary target pool: if
+ // the ratio of the healthy VMs in the primary pool is at or below this
+ // number, traffic arriving at the load-balanced IP will be directed to
+ // the backup pool.
+ //
+ // In case where 'failoverRatio' is not set or all the
+ // VMs in the backup pool are unhealthy, the traffic will be directed
+ // back to the primary pool in the "force" mode, where traffic will be
+ // spread to the healthy VMs with the best effort, or to all VMs when no
+ // VM is healthy.
+ FailoverRatio float64 `json:"failoverRatio,omitempty"`
+
+ // HealthChecks: A list of URLs to the HttpHealthCheck resource. A
+ // member VM in this pool is considered healthy if and only if all
+ // specified health checks pass. An empty list means all member VMs will
+ // be considered healthy at all times.
+ HealthChecks []string `json:"healthChecks,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Instances: A list of resource URLs to the member VMs serving this
+ // pool. They must live in zones contained in the same region as this
+ // pool.
+ Instances []string `json:"instances,omitempty"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // Region: URL of the region where the target pool resides (output
+ // only).
+ Region string `json:"region,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // SessionAffinity: Sesssion affinity option, must be one of the
+ // following values: 'NONE': Connections from the same client IP may go
+ // to any VM in the pool; 'CLIENT_IP': Connections from the same client
+ // IP will go to the same VM in the pool while that VM remains healthy.
+ // 'CLIENT_IP_PROTO': Connections from the same client IP with the same
+ // IP protocol will go to the same VM in the pool while that VM remains
+ // healthy.
+ SessionAffinity string `json:"sessionAffinity,omitempty"`
+}
+
+type TargetPoolAggregatedList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: A map of scoped target pool lists.
+ Items map[string]TargetPoolsScopedList `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type TargetPoolInstanceHealth struct {
+ HealthStatus []*HealthStatus `json:"healthStatus,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+}
+
+type TargetPoolList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The TargetPool resources.
+ Items []*TargetPool `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type TargetPoolsAddHealthCheckRequest struct {
+ // HealthChecks: Health check URLs to be added to targetPool.
+ HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"`
+}
+
+type TargetPoolsAddInstanceRequest struct {
+ // Instances: URLs of the instances to be added to targetPool.
+ Instances []*InstanceReference `json:"instances,omitempty"`
+}
+
+type TargetPoolsRemoveHealthCheckRequest struct {
+ // HealthChecks: Health check URLs to be removed from targetPool.
+ HealthChecks []*HealthCheckReference `json:"healthChecks,omitempty"`
+}
+
+type TargetPoolsRemoveInstanceRequest struct {
+ // Instances: URLs of the instances to be removed from targetPool.
+ Instances []*InstanceReference `json:"instances,omitempty"`
+}
+
+type TargetPoolsScopedList struct {
+ // TargetPools: List of target pools contained in this scope.
+ TargetPools []*TargetPool `json:"targetPools,omitempty"`
+
+ // Warning: Informational warning which replaces the list of addresses
+ // when the list is empty.
+ Warning *TargetPoolsScopedListWarning `json:"warning,omitempty"`
+}
+
+type TargetPoolsScopedListWarning struct {
+ // Code: The warning type identifier for this warning.
+ Code string `json:"code,omitempty"`
+
+ // Data: Metadata for this warning in 'key: value' format.
+ Data []*TargetPoolsScopedListWarningData `json:"data,omitempty"`
+
+ // Message: Optional human-readable details for this warning.
+ Message string `json:"message,omitempty"`
+}
+
+type TargetPoolsScopedListWarningData struct {
+ // Key: A key for the warning data.
+ Key string `json:"key,omitempty"`
+
+ // Value: A warning data value corresponding to the key.
+ Value string `json:"value,omitempty"`
+}
+
+type TargetReference struct {
+ Target string `json:"target,omitempty"`
+}
+
+type TestFailure struct {
+ ActualService string `json:"actualService,omitempty"`
+
+ ExpectedService string `json:"expectedService,omitempty"`
+
+ Host string `json:"host,omitempty"`
+
+ Path string `json:"path,omitempty"`
+}
+
+type UrlMap struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // DefaultService: The URL of the BackendService resource if none of the
+ // hostRules match.
+ DefaultService string `json:"defaultService,omitempty"`
+
+ // Description: An optional textual description of the resource;
+ // provided by the client when the resource is created.
+ Description string `json:"description,omitempty"`
+
+ // Fingerprint: Fingerprint of this resource. A hash of the contents
+ // stored in this object. This field is used in optimistic locking. This
+ // field will be ignored when inserting a UrlMap. An up-to-date
+ // fingerprint must be provided in order to update the UrlMap.
+ Fingerprint string `json:"fingerprint,omitempty"`
+
+ // HostRules: The list of HostRules to use against the URL.
+ HostRules []*HostRule `json:"hostRules,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // Name: Name of the resource; provided by the client when the resource
+ // is created. The name must be 1-63 characters long, and comply with
+ // RFC1035.
+ Name string `json:"name,omitempty"`
+
+ // PathMatchers: The list of named PathMatchers to use against the URL.
+ PathMatchers []*PathMatcher `json:"pathMatchers,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Tests: The list of expected URL mappings. Request to update this
+ // UrlMap will succeed only all of the test cases pass.
+ Tests []*UrlMapTest `json:"tests,omitempty"`
+}
+
+type UrlMapList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The UrlMap resources.
+ Items []*UrlMap `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+type UrlMapReference struct {
+ UrlMap string `json:"urlMap,omitempty"`
+}
+
+type UrlMapTest struct {
+ // Description: Description of this test case.
+ Description string `json:"description,omitempty"`
+
+ // Host: Host portion of the URL.
+ Host string `json:"host,omitempty"`
+
+ // Path: Path portion of the URL.
+ Path string `json:"path,omitempty"`
+
+ // Service: Expected BackendService resource the given URL should be
+ // mapped to.
+ Service string `json:"service,omitempty"`
+}
+
+type UrlMapValidationResult struct {
+ LoadErrors []string `json:"loadErrors,omitempty"`
+
+ // LoadSucceeded: Whether the given UrlMap can be successfully loaded.
+ // If false, 'loadErrors' indicates the reasons.
+ LoadSucceeded bool `json:"loadSucceeded,omitempty"`
+
+ TestFailures []*TestFailure `json:"testFailures,omitempty"`
+
+ // TestPassed: If successfully loaded, this field indicates whether the
+ // test passed. If false, 'testFailures's indicate the reason of
+ // failure.
+ TestPassed bool `json:"testPassed,omitempty"`
+}
+
+type UrlMapsValidateRequest struct {
+ // Resource: Content of the UrlMap to be validated.
+ Resource *UrlMap `json:"resource,omitempty"`
+}
+
+type UrlMapsValidateResponse struct {
+ Result *UrlMapValidationResult `json:"result,omitempty"`
+}
+
+type UsageExportLocation struct {
+ // BucketName: The name of an existing bucket in Cloud Storage where the
+ // usage report object is stored. The Google Service Account is granted
+ // write access to this bucket. This is simply the bucket name, with no
+ // "gs://" or "https://storage.googleapis.com/" in front of it.
+ BucketName string `json:"bucketName,omitempty"`
+
+ // ReportNamePrefix: An optional prefix for the name of the usage report
+ // object stored in bucket_name. If not supplied, defaults to "usage_".
+ // The report is stored as a CSV file named _gce_.csv. where is the day
+ // of the usage according to Pacific Time. The prefix should conform to
+ // Cloud Storage object naming conventions.
+ ReportNamePrefix string `json:"reportNamePrefix,omitempty"`
+}
+
+type Zone struct {
+ // CreationTimestamp: Creation timestamp in RFC3339 text format (output
+ // only).
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Deprecated: The deprecation status associated with this zone.
+ Deprecated *DeprecationStatus `json:"deprecated,omitempty"`
+
+ // Description: Textual description of the resource.
+ Description string `json:"description,omitempty"`
+
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id uint64 `json:"id,omitempty,string"`
+
+ // Kind: Type of the resource.
+ Kind string `json:"kind,omitempty"`
+
+ // MaintenanceWindows: Scheduled maintenance windows for the zone. When
+ // the zone is in a maintenance window, all resources which reside in
+ // the zone will be unavailable.
+ MaintenanceWindows []*ZoneMaintenanceWindows `json:"maintenanceWindows,omitempty"`
+
+ // Name: Name of the resource.
+ Name string `json:"name,omitempty"`
+
+ // Region: Full URL reference to the region which hosts the zone (output
+ // only).
+ Region string `json:"region,omitempty"`
+
+ // SelfLink: Server defined URL for the resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+
+ // Status: Status of the zone. "UP" or "DOWN".
+ Status string `json:"status,omitempty"`
+}
+
+type ZoneMaintenanceWindows struct {
+ // BeginTime: Begin time of the maintenance window, in RFC 3339 format.
+ BeginTime string `json:"beginTime,omitempty"`
+
+ // Description: Textual description of the maintenance window.
+ Description string `json:"description,omitempty"`
+
+ // EndTime: End time of the maintenance window, in RFC 3339 format.
+ EndTime string `json:"endTime,omitempty"`
+
+ // Name: Name of the maintenance window.
+ Name string `json:"name,omitempty"`
+}
+
+type ZoneList struct {
+ // Id: Unique identifier for the resource; defined by the server (output
+ // only).
+ Id string `json:"id,omitempty"`
+
+ // Items: The zone resources.
+ Items []*Zone `json:"items,omitempty"`
+
+ // Kind: Type of resource.
+ Kind string `json:"kind,omitempty"`
+
+ // NextPageToken: A token used to continue a truncated list request
+ // (output only).
+ NextPageToken string `json:"nextPageToken,omitempty"`
+
+ // SelfLink: Server defined URL for this resource (output only).
+ SelfLink string `json:"selfLink,omitempty"`
+}
+
+// method id "compute.addresses.aggregatedList":
+
+type AddressesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of addresses grouped by scope.
+func (r *AddressesService) AggregatedList(project string) *AddressesAggregatedListCall {
+ c := &AddressesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *AddressesAggregatedListCall) Filter(filter string) *AddressesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *AddressesAggregatedListCall) MaxResults(maxResults int64) *AddressesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *AddressesAggregatedListCall) PageToken(pageToken string) *AddressesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *AddressesAggregatedListCall) Fields(s ...googleapi.Field) *AddressesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *AddressesAggregatedListCall) Do() (*AddressAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/addresses")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *AddressAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of addresses grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.addresses.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/addresses",
+ // "response": {
+ // "$ref": "AddressAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.addresses.delete":
+
+type AddressesDeleteCall struct {
+ s *Service
+ project string
+ region string
+ address string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified address resource.
+func (r *AddressesService) Delete(project string, region string, address string) *AddressesDeleteCall {
+ c := &AddressesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *AddressesDeleteCall) Fields(s ...googleapi.Field) *AddressesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *AddressesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "address": c.address,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified address resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.addresses.delete",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "address"
+ // ],
+ // "parameters": {
+ // "address": {
+ // "description": "Name of the address resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/addresses/{address}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.addresses.get":
+
+type AddressesGetCall struct {
+ s *Service
+ project string
+ region string
+ address string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified address resource.
+func (r *AddressesService) Get(project string, region string, address string) *AddressesGetCall {
+ c := &AddressesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *AddressesGetCall) Fields(s ...googleapi.Field) *AddressesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *AddressesGetCall) Do() (*Address, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses/{address}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "address": c.address,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Address
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified address resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.addresses.get",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "address"
+ // ],
+ // "parameters": {
+ // "address": {
+ // "description": "Name of the address resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/addresses/{address}",
+ // "response": {
+ // "$ref": "Address"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.addresses.insert":
+
+type AddressesInsertCall struct {
+ s *Service
+ project string
+ region string
+ address *Address
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates an address resource in the specified project using
+// the data included in the request.
+func (r *AddressesService) Insert(project string, region string, address *Address) *AddressesInsertCall {
+ c := &AddressesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *AddressesInsertCall) Fields(s ...googleapi.Field) *AddressesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *AddressesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates an address resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.addresses.insert",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/addresses",
+ // "request": {
+ // "$ref": "Address"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.addresses.list":
+
+type AddressesListCall struct {
+ s *Service
+ project string
+ region string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of address resources contained within the
+// specified region.
+func (r *AddressesService) List(project string, region string) *AddressesListCall {
+ c := &AddressesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *AddressesListCall) Filter(filter string) *AddressesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *AddressesListCall) MaxResults(maxResults int64) *AddressesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *AddressesListCall) PageToken(pageToken string) *AddressesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *AddressesListCall) Fields(s ...googleapi.Field) *AddressesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *AddressesListCall) Do() (*AddressList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/addresses")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *AddressList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of address resources contained within the specified region.",
+ // "httpMethod": "GET",
+ // "id": "compute.addresses.list",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/addresses",
+ // "response": {
+ // "$ref": "AddressList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.delete":
+
+type BackendServicesDeleteCall struct {
+ s *Service
+ project string
+ backendService string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified BackendService resource.
+func (r *BackendServicesService) Delete(project string, backendService string) *BackendServicesDeleteCall {
+ c := &BackendServicesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendService = backendService
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesDeleteCall) Fields(s ...googleapi.Field) *BackendServicesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "backendService": c.backendService,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified BackendService resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.backendServices.delete",
+ // "parameterOrder": [
+ // "project",
+ // "backendService"
+ // ],
+ // "parameters": {
+ // "backendService": {
+ // "description": "Name of the BackendService resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices/{backendService}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.get":
+
+type BackendServicesGetCall struct {
+ s *Service
+ project string
+ backendService string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified BackendService resource.
+func (r *BackendServicesService) Get(project string, backendService string) *BackendServicesGetCall {
+ c := &BackendServicesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendService = backendService
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesGetCall) Fields(s ...googleapi.Field) *BackendServicesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesGetCall) Do() (*BackendService, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "backendService": c.backendService,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *BackendService
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified BackendService resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.backendServices.get",
+ // "parameterOrder": [
+ // "project",
+ // "backendService"
+ // ],
+ // "parameters": {
+ // "backendService": {
+ // "description": "Name of the BackendService resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices/{backendService}",
+ // "response": {
+ // "$ref": "BackendService"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.getHealth":
+
+type BackendServicesGetHealthCall struct {
+ s *Service
+ project string
+ backendService string
+ resourcegroupreference *ResourceGroupReference
+ opt_ map[string]interface{}
+}
+
+// GetHealth: Gets the most recent health check results for this
+// BackendService.
+func (r *BackendServicesService) GetHealth(project string, backendService string, resourcegroupreference *ResourceGroupReference) *BackendServicesGetHealthCall {
+ c := &BackendServicesGetHealthCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendService = backendService
+ c.resourcegroupreference = resourcegroupreference
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesGetHealthCall) Fields(s ...googleapi.Field) *BackendServicesGetHealthCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesGetHealthCall) Do() (*BackendServiceGroupHealth, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.resourcegroupreference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}/getHealth")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "backendService": c.backendService,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *BackendServiceGroupHealth
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets the most recent health check results for this BackendService.",
+ // "httpMethod": "POST",
+ // "id": "compute.backendServices.getHealth",
+ // "parameterOrder": [
+ // "project",
+ // "backendService"
+ // ],
+ // "parameters": {
+ // "backendService": {
+ // "description": "Name of the BackendService resource to which the queried instance belongs.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices/{backendService}/getHealth",
+ // "request": {
+ // "$ref": "ResourceGroupReference"
+ // },
+ // "response": {
+ // "$ref": "BackendServiceGroupHealth"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.insert":
+
+type BackendServicesInsertCall struct {
+ s *Service
+ project string
+ backendservice *BackendService
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a BackendService resource in the specified project
+// using the data included in the request.
+func (r *BackendServicesService) Insert(project string, backendservice *BackendService) *BackendServicesInsertCall {
+ c := &BackendServicesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendservice = backendservice
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesInsertCall) Fields(s ...googleapi.Field) *BackendServicesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a BackendService resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.backendServices.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices",
+ // "request": {
+ // "$ref": "BackendService"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.list":
+
+type BackendServicesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of BackendService resources available to the
+// specified project.
+func (r *BackendServicesService) List(project string) *BackendServicesListCall {
+ c := &BackendServicesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *BackendServicesListCall) Filter(filter string) *BackendServicesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *BackendServicesListCall) MaxResults(maxResults int64) *BackendServicesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *BackendServicesListCall) PageToken(pageToken string) *BackendServicesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesListCall) Fields(s ...googleapi.Field) *BackendServicesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesListCall) Do() (*BackendServiceList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *BackendServiceList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of BackendService resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.backendServices.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices",
+ // "response": {
+ // "$ref": "BackendServiceList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.patch":
+
+type BackendServicesPatchCall struct {
+ s *Service
+ project string
+ backendService string
+ backendservice *BackendService
+ opt_ map[string]interface{}
+}
+
+// Patch: Update the entire content of the BackendService resource. This
+// method supports patch semantics.
+func (r *BackendServicesService) Patch(project string, backendService string, backendservice *BackendService) *BackendServicesPatchCall {
+ c := &BackendServicesPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendService = backendService
+ c.backendservice = backendservice
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesPatchCall) Fields(s ...googleapi.Field) *BackendServicesPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesPatchCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "backendService": c.backendService,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Update the entire content of the BackendService resource. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "compute.backendServices.patch",
+ // "parameterOrder": [
+ // "project",
+ // "backendService"
+ // ],
+ // "parameters": {
+ // "backendService": {
+ // "description": "Name of the BackendService resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices/{backendService}",
+ // "request": {
+ // "$ref": "BackendService"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.backendServices.update":
+
+type BackendServicesUpdateCall struct {
+ s *Service
+ project string
+ backendService string
+ backendservice *BackendService
+ opt_ map[string]interface{}
+}
+
+// Update: Update the entire content of the BackendService resource.
+func (r *BackendServicesService) Update(project string, backendService string, backendservice *BackendService) *BackendServicesUpdateCall {
+ c := &BackendServicesUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.backendService = backendService
+ c.backendservice = backendservice
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *BackendServicesUpdateCall) Fields(s ...googleapi.Field) *BackendServicesUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *BackendServicesUpdateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.backendservice)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/backendServices/{backendService}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "backendService": c.backendService,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Update the entire content of the BackendService resource.",
+ // "httpMethod": "PUT",
+ // "id": "compute.backendServices.update",
+ // "parameterOrder": [
+ // "project",
+ // "backendService"
+ // ],
+ // "parameters": {
+ // "backendService": {
+ // "description": "Name of the BackendService resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/backendServices/{backendService}",
+ // "request": {
+ // "$ref": "BackendService"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.diskTypes.aggregatedList":
+
+type DiskTypesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of disk type resources grouped by
+// scope.
+func (r *DiskTypesService) AggregatedList(project string) *DiskTypesAggregatedListCall {
+ c := &DiskTypesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *DiskTypesAggregatedListCall) Filter(filter string) *DiskTypesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *DiskTypesAggregatedListCall) MaxResults(maxResults int64) *DiskTypesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *DiskTypesAggregatedListCall) PageToken(pageToken string) *DiskTypesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DiskTypesAggregatedListCall) Fields(s ...googleapi.Field) *DiskTypesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DiskTypesAggregatedListCall) Do() (*DiskTypeAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/diskTypes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DiskTypeAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of disk type resources grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.diskTypes.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/diskTypes",
+ // "response": {
+ // "$ref": "DiskTypeAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.diskTypes.get":
+
+type DiskTypesGetCall struct {
+ s *Service
+ project string
+ zone string
+ diskType string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified disk type resource.
+func (r *DiskTypesService) Get(project string, zone string, diskType string) *DiskTypesGetCall {
+ c := &DiskTypesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.diskType = diskType
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DiskTypesGetCall) Fields(s ...googleapi.Field) *DiskTypesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DiskTypesGetCall) Do() (*DiskType, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes/{diskType}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "diskType": c.diskType,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DiskType
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified disk type resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.diskTypes.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "diskType"
+ // ],
+ // "parameters": {
+ // "diskType": {
+ // "description": "Name of the disk type resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/diskTypes/{diskType}",
+ // "response": {
+ // "$ref": "DiskType"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.diskTypes.list":
+
+type DiskTypesListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of disk type resources available to the
+// specified project.
+func (r *DiskTypesService) List(project string, zone string) *DiskTypesListCall {
+ c := &DiskTypesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *DiskTypesListCall) Filter(filter string) *DiskTypesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *DiskTypesListCall) MaxResults(maxResults int64) *DiskTypesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *DiskTypesListCall) PageToken(pageToken string) *DiskTypesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DiskTypesListCall) Fields(s ...googleapi.Field) *DiskTypesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DiskTypesListCall) Do() (*DiskTypeList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/diskTypes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DiskTypeList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of disk type resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.diskTypes.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/diskTypes",
+ // "response": {
+ // "$ref": "DiskTypeList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.aggregatedList":
+
+type DisksAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of disks grouped by scope.
+func (r *DisksService) AggregatedList(project string) *DisksAggregatedListCall {
+ c := &DisksAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *DisksAggregatedListCall) Filter(filter string) *DisksAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *DisksAggregatedListCall) MaxResults(maxResults int64) *DisksAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *DisksAggregatedListCall) PageToken(pageToken string) *DisksAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksAggregatedListCall) Fields(s ...googleapi.Field) *DisksAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksAggregatedListCall) Do() (*DiskAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/disks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DiskAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of disks grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.disks.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/disks",
+ // "response": {
+ // "$ref": "DiskAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.createSnapshot":
+
+type DisksCreateSnapshotCall struct {
+ s *Service
+ project string
+ zone string
+ disk string
+ snapshot *Snapshot
+ opt_ map[string]interface{}
+}
+
+// CreateSnapshot:
+func (r *DisksService) CreateSnapshot(project string, zone string, disk string, snapshot *Snapshot) *DisksCreateSnapshotCall {
+ c := &DisksCreateSnapshotCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.disk = disk
+ c.snapshot = snapshot
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksCreateSnapshotCall) Fields(s ...googleapi.Field) *DisksCreateSnapshotCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksCreateSnapshotCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.snapshot)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}/createSnapshot")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "disk": c.disk,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "httpMethod": "POST",
+ // "id": "compute.disks.createSnapshot",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "disk"
+ // ],
+ // "parameters": {
+ // "disk": {
+ // "description": "Name of the persistent disk resource to snapshot.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/disks/{disk}/createSnapshot",
+ // "request": {
+ // "$ref": "Snapshot"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.delete":
+
+type DisksDeleteCall struct {
+ s *Service
+ project string
+ zone string
+ disk string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified persistent disk resource.
+func (r *DisksService) Delete(project string, zone string, disk string) *DisksDeleteCall {
+ c := &DisksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.disk = disk
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksDeleteCall) Fields(s ...googleapi.Field) *DisksDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "disk": c.disk,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified persistent disk resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.disks.delete",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "disk"
+ // ],
+ // "parameters": {
+ // "disk": {
+ // "description": "Name of the persistent disk resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/disks/{disk}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.get":
+
+type DisksGetCall struct {
+ s *Service
+ project string
+ zone string
+ disk string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified persistent disk resource.
+func (r *DisksService) Get(project string, zone string, disk string) *DisksGetCall {
+ c := &DisksGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.disk = disk
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksGetCall) Fields(s ...googleapi.Field) *DisksGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksGetCall) Do() (*Disk, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks/{disk}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "disk": c.disk,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Disk
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified persistent disk resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.disks.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "disk"
+ // ],
+ // "parameters": {
+ // "disk": {
+ // "description": "Name of the persistent disk resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/disks/{disk}",
+ // "response": {
+ // "$ref": "Disk"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.insert":
+
+type DisksInsertCall struct {
+ s *Service
+ project string
+ zone string
+ disk *Disk
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a persistent disk resource in the specified project
+// using the data included in the request.
+func (r *DisksService) Insert(project string, zone string, disk *Disk) *DisksInsertCall {
+ c := &DisksInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.disk = disk
+ return c
+}
+
+// SourceImage sets the optional parameter "sourceImage": Source image
+// to restore onto a disk.
+func (c *DisksInsertCall) SourceImage(sourceImage string) *DisksInsertCall {
+ c.opt_["sourceImage"] = sourceImage
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksInsertCall) Fields(s ...googleapi.Field) *DisksInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.disk)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["sourceImage"]; ok {
+ params.Set("sourceImage", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a persistent disk resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.disks.insert",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "sourceImage": {
+ // "description": "Optional. Source image to restore onto a disk.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/disks",
+ // "request": {
+ // "$ref": "Disk"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.disks.list":
+
+type DisksListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of persistent disk resources contained
+// within the specified zone.
+func (r *DisksService) List(project string, zone string) *DisksListCall {
+ c := &DisksListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *DisksListCall) Filter(filter string) *DisksListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *DisksListCall) MaxResults(maxResults int64) *DisksListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *DisksListCall) PageToken(pageToken string) *DisksListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *DisksListCall) Fields(s ...googleapi.Field) *DisksListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *DisksListCall) Do() (*DiskList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/disks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *DiskList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of persistent disk resources contained within the specified zone.",
+ // "httpMethod": "GET",
+ // "id": "compute.disks.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/disks",
+ // "response": {
+ // "$ref": "DiskList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.delete":
+
+type FirewallsDeleteCall struct {
+ s *Service
+ project string
+ firewall string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified firewall resource.
+func (r *FirewallsService) Delete(project string, firewall string) *FirewallsDeleteCall {
+ c := &FirewallsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.firewall = firewall
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsDeleteCall) Fields(s ...googleapi.Field) *FirewallsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "firewall": c.firewall,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified firewall resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.firewalls.delete",
+ // "parameterOrder": [
+ // "project",
+ // "firewall"
+ // ],
+ // "parameters": {
+ // "firewall": {
+ // "description": "Name of the firewall resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls/{firewall}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.get":
+
+type FirewallsGetCall struct {
+ s *Service
+ project string
+ firewall string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified firewall resource.
+func (r *FirewallsService) Get(project string, firewall string) *FirewallsGetCall {
+ c := &FirewallsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.firewall = firewall
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsGetCall) Fields(s ...googleapi.Field) *FirewallsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsGetCall) Do() (*Firewall, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "firewall": c.firewall,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Firewall
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified firewall resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.firewalls.get",
+ // "parameterOrder": [
+ // "project",
+ // "firewall"
+ // ],
+ // "parameters": {
+ // "firewall": {
+ // "description": "Name of the firewall resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls/{firewall}",
+ // "response": {
+ // "$ref": "Firewall"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.insert":
+
+type FirewallsInsertCall struct {
+ s *Service
+ project string
+ firewall *Firewall
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a firewall resource in the specified project using
+// the data included in the request.
+func (r *FirewallsService) Insert(project string, firewall *Firewall) *FirewallsInsertCall {
+ c := &FirewallsInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.firewall = firewall
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsInsertCall) Fields(s ...googleapi.Field) *FirewallsInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a firewall resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.firewalls.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls",
+ // "request": {
+ // "$ref": "Firewall"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.list":
+
+type FirewallsListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of firewall resources available to the
+// specified project.
+func (r *FirewallsService) List(project string) *FirewallsListCall {
+ c := &FirewallsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *FirewallsListCall) Filter(filter string) *FirewallsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *FirewallsListCall) MaxResults(maxResults int64) *FirewallsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *FirewallsListCall) PageToken(pageToken string) *FirewallsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsListCall) Fields(s ...googleapi.Field) *FirewallsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsListCall) Do() (*FirewallList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *FirewallList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of firewall resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.firewalls.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls",
+ // "response": {
+ // "$ref": "FirewallList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.patch":
+
+type FirewallsPatchCall struct {
+ s *Service
+ project string
+ firewall string
+ firewall2 *Firewall
+ opt_ map[string]interface{}
+}
+
+// Patch: Updates the specified firewall resource with the data included
+// in the request. This method supports patch semantics.
+func (r *FirewallsService) Patch(project string, firewall string, firewall2 *Firewall) *FirewallsPatchCall {
+ c := &FirewallsPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.firewall = firewall
+ c.firewall2 = firewall2
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsPatchCall) Fields(s ...googleapi.Field) *FirewallsPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsPatchCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "firewall": c.firewall,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates the specified firewall resource with the data included in the request. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "compute.firewalls.patch",
+ // "parameterOrder": [
+ // "project",
+ // "firewall"
+ // ],
+ // "parameters": {
+ // "firewall": {
+ // "description": "Name of the firewall resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls/{firewall}",
+ // "request": {
+ // "$ref": "Firewall"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.firewalls.update":
+
+type FirewallsUpdateCall struct {
+ s *Service
+ project string
+ firewall string
+ firewall2 *Firewall
+ opt_ map[string]interface{}
+}
+
+// Update: Updates the specified firewall resource with the data
+// included in the request.
+func (r *FirewallsService) Update(project string, firewall string, firewall2 *Firewall) *FirewallsUpdateCall {
+ c := &FirewallsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.firewall = firewall
+ c.firewall2 = firewall2
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *FirewallsUpdateCall) Fields(s ...googleapi.Field) *FirewallsUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *FirewallsUpdateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.firewall2)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/firewalls/{firewall}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "firewall": c.firewall,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates the specified firewall resource with the data included in the request.",
+ // "httpMethod": "PUT",
+ // "id": "compute.firewalls.update",
+ // "parameterOrder": [
+ // "project",
+ // "firewall"
+ // ],
+ // "parameters": {
+ // "firewall": {
+ // "description": "Name of the firewall resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/firewalls/{firewall}",
+ // "request": {
+ // "$ref": "Firewall"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.aggregatedList":
+
+type ForwardingRulesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of forwarding rules grouped by
+// scope.
+func (r *ForwardingRulesService) AggregatedList(project string) *ForwardingRulesAggregatedListCall {
+ c := &ForwardingRulesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *ForwardingRulesAggregatedListCall) Filter(filter string) *ForwardingRulesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *ForwardingRulesAggregatedListCall) MaxResults(maxResults int64) *ForwardingRulesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *ForwardingRulesAggregatedListCall) PageToken(pageToken string) *ForwardingRulesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesAggregatedListCall) Fields(s ...googleapi.Field) *ForwardingRulesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesAggregatedListCall) Do() (*ForwardingRuleAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/forwardingRules")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ForwardingRuleAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of forwarding rules grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.forwardingRules.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/forwardingRules",
+ // "response": {
+ // "$ref": "ForwardingRuleAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.delete":
+
+type ForwardingRulesDeleteCall struct {
+ s *Service
+ project string
+ region string
+ forwardingRule string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified ForwardingRule resource.
+func (r *ForwardingRulesService) Delete(project string, region string, forwardingRule string) *ForwardingRulesDeleteCall {
+ c := &ForwardingRulesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.forwardingRule = forwardingRule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *ForwardingRulesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified ForwardingRule resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.forwardingRules.delete",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.get":
+
+type ForwardingRulesGetCall struct {
+ s *Service
+ project string
+ region string
+ forwardingRule string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified ForwardingRule resource.
+func (r *ForwardingRulesService) Get(project string, region string, forwardingRule string) *ForwardingRulesGetCall {
+ c := &ForwardingRulesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.forwardingRule = forwardingRule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesGetCall) Fields(s ...googleapi.Field) *ForwardingRulesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesGetCall) Do() (*ForwardingRule, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ForwardingRule
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified ForwardingRule resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.forwardingRules.get",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}",
+ // "response": {
+ // "$ref": "ForwardingRule"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.insert":
+
+type ForwardingRulesInsertCall struct {
+ s *Service
+ project string
+ region string
+ forwardingrule *ForwardingRule
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a ForwardingRule resource in the specified project
+// and region using the data included in the request.
+func (r *ForwardingRulesService) Insert(project string, region string, forwardingrule *ForwardingRule) *ForwardingRulesInsertCall {
+ c := &ForwardingRulesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.forwardingrule = forwardingrule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesInsertCall) Fields(s ...googleapi.Field) *ForwardingRulesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.forwardingRules.insert",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/forwardingRules",
+ // "request": {
+ // "$ref": "ForwardingRule"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.list":
+
+type ForwardingRulesListCall struct {
+ s *Service
+ project string
+ region string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of ForwardingRule resources available to the
+// specified project and region.
+func (r *ForwardingRulesService) List(project string, region string) *ForwardingRulesListCall {
+ c := &ForwardingRulesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *ForwardingRulesListCall) Filter(filter string) *ForwardingRulesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *ForwardingRulesListCall) MaxResults(maxResults int64) *ForwardingRulesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *ForwardingRulesListCall) PageToken(pageToken string) *ForwardingRulesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesListCall) Fields(s ...googleapi.Field) *ForwardingRulesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesListCall) Do() (*ForwardingRuleList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ForwardingRuleList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of ForwardingRule resources available to the specified project and region.",
+ // "httpMethod": "GET",
+ // "id": "compute.forwardingRules.list",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/forwardingRules",
+ // "response": {
+ // "$ref": "ForwardingRuleList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.forwardingRules.setTarget":
+
+type ForwardingRulesSetTargetCall struct {
+ s *Service
+ project string
+ region string
+ forwardingRule string
+ targetreference *TargetReference
+ opt_ map[string]interface{}
+}
+
+// SetTarget: Changes target url for forwarding rule.
+func (r *ForwardingRulesService) SetTarget(project string, region string, forwardingRule string, targetreference *TargetReference) *ForwardingRulesSetTargetCall {
+ c := &ForwardingRulesSetTargetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.forwardingRule = forwardingRule
+ c.targetreference = targetreference
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *ForwardingRulesSetTargetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ForwardingRulesSetTargetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Changes target url for forwarding rule.",
+ // "httpMethod": "POST",
+ // "id": "compute.forwardingRules.setTarget",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource in which target is to be set.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/forwardingRules/{forwardingRule}/setTarget",
+ // "request": {
+ // "$ref": "TargetReference"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalAddresses.delete":
+
+type GlobalAddressesDeleteCall struct {
+ s *Service
+ project string
+ address string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified address resource.
+func (r *GlobalAddressesService) Delete(project string, address string) *GlobalAddressesDeleteCall {
+ c := &GlobalAddressesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalAddressesDeleteCall) Fields(s ...googleapi.Field) *GlobalAddressesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalAddressesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "address": c.address,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified address resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.globalAddresses.delete",
+ // "parameterOrder": [
+ // "project",
+ // "address"
+ // ],
+ // "parameters": {
+ // "address": {
+ // "description": "Name of the address resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/addresses/{address}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalAddresses.get":
+
+type GlobalAddressesGetCall struct {
+ s *Service
+ project string
+ address string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified address resource.
+func (r *GlobalAddressesService) Get(project string, address string) *GlobalAddressesGetCall {
+ c := &GlobalAddressesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalAddressesGetCall) Fields(s ...googleapi.Field) *GlobalAddressesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalAddressesGetCall) Do() (*Address, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses/{address}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "address": c.address,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Address
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified address resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalAddresses.get",
+ // "parameterOrder": [
+ // "project",
+ // "address"
+ // ],
+ // "parameters": {
+ // "address": {
+ // "description": "Name of the address resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/addresses/{address}",
+ // "response": {
+ // "$ref": "Address"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalAddresses.insert":
+
+type GlobalAddressesInsertCall struct {
+ s *Service
+ project string
+ address *Address
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates an address resource in the specified project using
+// the data included in the request.
+func (r *GlobalAddressesService) Insert(project string, address *Address) *GlobalAddressesInsertCall {
+ c := &GlobalAddressesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.address = address
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalAddressesInsertCall) Fields(s ...googleapi.Field) *GlobalAddressesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalAddressesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.address)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates an address resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.globalAddresses.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/addresses",
+ // "request": {
+ // "$ref": "Address"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalAddresses.list":
+
+type GlobalAddressesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of global address resources.
+func (r *GlobalAddressesService) List(project string) *GlobalAddressesListCall {
+ c := &GlobalAddressesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *GlobalAddressesListCall) Filter(filter string) *GlobalAddressesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *GlobalAddressesListCall) MaxResults(maxResults int64) *GlobalAddressesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *GlobalAddressesListCall) PageToken(pageToken string) *GlobalAddressesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalAddressesListCall) Fields(s ...googleapi.Field) *GlobalAddressesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalAddressesListCall) Do() (*AddressList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/addresses")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *AddressList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of global address resources.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalAddresses.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/addresses",
+ // "response": {
+ // "$ref": "AddressList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalForwardingRules.delete":
+
+type GlobalForwardingRulesDeleteCall struct {
+ s *Service
+ project string
+ forwardingRule string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified ForwardingRule resource.
+func (r *GlobalForwardingRulesService) Delete(project string, forwardingRule string) *GlobalForwardingRulesDeleteCall {
+ c := &GlobalForwardingRulesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.forwardingRule = forwardingRule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalForwardingRulesDeleteCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalForwardingRulesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified ForwardingRule resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.globalForwardingRules.delete",
+ // "parameterOrder": [
+ // "project",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/forwardingRules/{forwardingRule}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalForwardingRules.get":
+
+type GlobalForwardingRulesGetCall struct {
+ s *Service
+ project string
+ forwardingRule string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified ForwardingRule resource.
+func (r *GlobalForwardingRulesService) Get(project string, forwardingRule string) *GlobalForwardingRulesGetCall {
+ c := &GlobalForwardingRulesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.forwardingRule = forwardingRule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalForwardingRulesGetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalForwardingRulesGetCall) Do() (*ForwardingRule, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ForwardingRule
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified ForwardingRule resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalForwardingRules.get",
+ // "parameterOrder": [
+ // "project",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/forwardingRules/{forwardingRule}",
+ // "response": {
+ // "$ref": "ForwardingRule"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalForwardingRules.insert":
+
+type GlobalForwardingRulesInsertCall struct {
+ s *Service
+ project string
+ forwardingrule *ForwardingRule
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a ForwardingRule resource in the specified project
+// and region using the data included in the request.
+func (r *GlobalForwardingRulesService) Insert(project string, forwardingrule *ForwardingRule) *GlobalForwardingRulesInsertCall {
+ c := &GlobalForwardingRulesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.forwardingrule = forwardingrule
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalForwardingRulesInsertCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalForwardingRulesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.forwardingrule)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a ForwardingRule resource in the specified project and region using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.globalForwardingRules.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/forwardingRules",
+ // "request": {
+ // "$ref": "ForwardingRule"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalForwardingRules.list":
+
+type GlobalForwardingRulesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of ForwardingRule resources available to the
+// specified project.
+func (r *GlobalForwardingRulesService) List(project string) *GlobalForwardingRulesListCall {
+ c := &GlobalForwardingRulesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *GlobalForwardingRulesListCall) Filter(filter string) *GlobalForwardingRulesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *GlobalForwardingRulesListCall) MaxResults(maxResults int64) *GlobalForwardingRulesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *GlobalForwardingRulesListCall) PageToken(pageToken string) *GlobalForwardingRulesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalForwardingRulesListCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalForwardingRulesListCall) Do() (*ForwardingRuleList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ForwardingRuleList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of ForwardingRule resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalForwardingRules.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/forwardingRules",
+ // "response": {
+ // "$ref": "ForwardingRuleList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalForwardingRules.setTarget":
+
+type GlobalForwardingRulesSetTargetCall struct {
+ s *Service
+ project string
+ forwardingRule string
+ targetreference *TargetReference
+ opt_ map[string]interface{}
+}
+
+// SetTarget: Changes target url for forwarding rule.
+func (r *GlobalForwardingRulesService) SetTarget(project string, forwardingRule string, targetreference *TargetReference) *GlobalForwardingRulesSetTargetCall {
+ c := &GlobalForwardingRulesSetTargetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.forwardingRule = forwardingRule
+ c.targetreference = targetreference
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalForwardingRulesSetTargetCall) Fields(s ...googleapi.Field) *GlobalForwardingRulesSetTargetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalForwardingRulesSetTargetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/forwardingRules/{forwardingRule}/setTarget")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "forwardingRule": c.forwardingRule,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Changes target url for forwarding rule.",
+ // "httpMethod": "POST",
+ // "id": "compute.globalForwardingRules.setTarget",
+ // "parameterOrder": [
+ // "project",
+ // "forwardingRule"
+ // ],
+ // "parameters": {
+ // "forwardingRule": {
+ // "description": "Name of the ForwardingRule resource in which target is to be set.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/forwardingRules/{forwardingRule}/setTarget",
+ // "request": {
+ // "$ref": "TargetReference"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalOperations.aggregatedList":
+
+type GlobalOperationsAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of all operations grouped by
+// scope.
+func (r *GlobalOperationsService) AggregatedList(project string) *GlobalOperationsAggregatedListCall {
+ c := &GlobalOperationsAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *GlobalOperationsAggregatedListCall) Filter(filter string) *GlobalOperationsAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *GlobalOperationsAggregatedListCall) MaxResults(maxResults int64) *GlobalOperationsAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *GlobalOperationsAggregatedListCall) PageToken(pageToken string) *GlobalOperationsAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalOperationsAggregatedListCall) Fields(s ...googleapi.Field) *GlobalOperationsAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalOperationsAggregatedListCall) Do() (*OperationAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *OperationAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of all operations grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalOperations.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/operations",
+ // "response": {
+ // "$ref": "OperationAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalOperations.delete":
+
+type GlobalOperationsDeleteCall struct {
+ s *Service
+ project string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified operation resource.
+func (r *GlobalOperationsService) Delete(project string, operation string) *GlobalOperationsDeleteCall {
+ c := &GlobalOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalOperationsDeleteCall) Fields(s ...googleapi.Field) *GlobalOperationsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalOperationsDeleteCall) Do() error {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes the specified operation resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.globalOperations.delete",
+ // "parameterOrder": [
+ // "project",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/operations/{operation}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalOperations.get":
+
+type GlobalOperationsGetCall struct {
+ s *Service
+ project string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Get: Retrieves the specified operation resource.
+func (r *GlobalOperationsService) Get(project string, operation string) *GlobalOperationsGetCall {
+ c := &GlobalOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalOperationsGetCall) Fields(s ...googleapi.Field) *GlobalOperationsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalOperationsGetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the specified operation resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalOperations.get",
+ // "parameterOrder": [
+ // "project",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/operations/{operation}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.globalOperations.list":
+
+type GlobalOperationsListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of operation resources contained within the
+// specified project.
+func (r *GlobalOperationsService) List(project string) *GlobalOperationsListCall {
+ c := &GlobalOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *GlobalOperationsListCall) Filter(filter string) *GlobalOperationsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *GlobalOperationsListCall) MaxResults(maxResults int64) *GlobalOperationsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *GlobalOperationsListCall) PageToken(pageToken string) *GlobalOperationsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *GlobalOperationsListCall) Fields(s ...googleapi.Field) *GlobalOperationsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *GlobalOperationsListCall) Do() (*OperationList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *OperationList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of operation resources contained within the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.globalOperations.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/operations",
+ // "response": {
+ // "$ref": "OperationList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.delete":
+
+type HttpHealthChecksDeleteCall struct {
+ s *Service
+ project string
+ httpHealthCheck string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified HttpHealthCheck resource.
+func (r *HttpHealthChecksService) Delete(project string, httpHealthCheck string) *HttpHealthChecksDeleteCall {
+ c := &HttpHealthChecksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.httpHealthCheck = httpHealthCheck
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksDeleteCall) Fields(s ...googleapi.Field) *HttpHealthChecksDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "httpHealthCheck": c.httpHealthCheck,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified HttpHealthCheck resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.httpHealthChecks.delete",
+ // "parameterOrder": [
+ // "project",
+ // "httpHealthCheck"
+ // ],
+ // "parameters": {
+ // "httpHealthCheck": {
+ // "description": "Name of the HttpHealthCheck resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.get":
+
+type HttpHealthChecksGetCall struct {
+ s *Service
+ project string
+ httpHealthCheck string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified HttpHealthCheck resource.
+func (r *HttpHealthChecksService) Get(project string, httpHealthCheck string) *HttpHealthChecksGetCall {
+ c := &HttpHealthChecksGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.httpHealthCheck = httpHealthCheck
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksGetCall) Fields(s ...googleapi.Field) *HttpHealthChecksGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksGetCall) Do() (*HttpHealthCheck, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "httpHealthCheck": c.httpHealthCheck,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *HttpHealthCheck
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified HttpHealthCheck resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.httpHealthChecks.get",
+ // "parameterOrder": [
+ // "project",
+ // "httpHealthCheck"
+ // ],
+ // "parameters": {
+ // "httpHealthCheck": {
+ // "description": "Name of the HttpHealthCheck resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ // "response": {
+ // "$ref": "HttpHealthCheck"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.insert":
+
+type HttpHealthChecksInsertCall struct {
+ s *Service
+ project string
+ httphealthcheck *HttpHealthCheck
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a HttpHealthCheck resource in the specified project
+// using the data included in the request.
+func (r *HttpHealthChecksService) Insert(project string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksInsertCall {
+ c := &HttpHealthChecksInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.httphealthcheck = httphealthcheck
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksInsertCall) Fields(s ...googleapi.Field) *HttpHealthChecksInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a HttpHealthCheck resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.httpHealthChecks.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks",
+ // "request": {
+ // "$ref": "HttpHealthCheck"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.list":
+
+type HttpHealthChecksListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of HttpHealthCheck resources available to
+// the specified project.
+func (r *HttpHealthChecksService) List(project string) *HttpHealthChecksListCall {
+ c := &HttpHealthChecksListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *HttpHealthChecksListCall) Filter(filter string) *HttpHealthChecksListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *HttpHealthChecksListCall) MaxResults(maxResults int64) *HttpHealthChecksListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *HttpHealthChecksListCall) PageToken(pageToken string) *HttpHealthChecksListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksListCall) Fields(s ...googleapi.Field) *HttpHealthChecksListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksListCall) Do() (*HttpHealthCheckList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *HttpHealthCheckList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of HttpHealthCheck resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.httpHealthChecks.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks",
+ // "response": {
+ // "$ref": "HttpHealthCheckList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.patch":
+
+type HttpHealthChecksPatchCall struct {
+ s *Service
+ project string
+ httpHealthCheck string
+ httphealthcheck *HttpHealthCheck
+ opt_ map[string]interface{}
+}
+
+// Patch: Updates a HttpHealthCheck resource in the specified project
+// using the data included in the request. This method supports patch
+// semantics.
+func (r *HttpHealthChecksService) Patch(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksPatchCall {
+ c := &HttpHealthChecksPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.httpHealthCheck = httpHealthCheck
+ c.httphealthcheck = httphealthcheck
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksPatchCall) Fields(s ...googleapi.Field) *HttpHealthChecksPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksPatchCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "httpHealthCheck": c.httpHealthCheck,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "compute.httpHealthChecks.patch",
+ // "parameterOrder": [
+ // "project",
+ // "httpHealthCheck"
+ // ],
+ // "parameters": {
+ // "httpHealthCheck": {
+ // "description": "Name of the HttpHealthCheck resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ // "request": {
+ // "$ref": "HttpHealthCheck"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.httpHealthChecks.update":
+
+type HttpHealthChecksUpdateCall struct {
+ s *Service
+ project string
+ httpHealthCheck string
+ httphealthcheck *HttpHealthCheck
+ opt_ map[string]interface{}
+}
+
+// Update: Updates a HttpHealthCheck resource in the specified project
+// using the data included in the request.
+func (r *HttpHealthChecksService) Update(project string, httpHealthCheck string, httphealthcheck *HttpHealthCheck) *HttpHealthChecksUpdateCall {
+ c := &HttpHealthChecksUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.httpHealthCheck = httpHealthCheck
+ c.httphealthcheck = httphealthcheck
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *HttpHealthChecksUpdateCall) Fields(s ...googleapi.Field) *HttpHealthChecksUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *HttpHealthChecksUpdateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.httphealthcheck)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/httpHealthChecks/{httpHealthCheck}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "httpHealthCheck": c.httpHealthCheck,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Updates a HttpHealthCheck resource in the specified project using the data included in the request.",
+ // "httpMethod": "PUT",
+ // "id": "compute.httpHealthChecks.update",
+ // "parameterOrder": [
+ // "project",
+ // "httpHealthCheck"
+ // ],
+ // "parameters": {
+ // "httpHealthCheck": {
+ // "description": "Name of the HttpHealthCheck resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/httpHealthChecks/{httpHealthCheck}",
+ // "request": {
+ // "$ref": "HttpHealthCheck"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.images.delete":
+
+type ImagesDeleteCall struct {
+ s *Service
+ project string
+ image string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified image resource.
+func (r *ImagesService) Delete(project string, image string) *ImagesDeleteCall {
+ c := &ImagesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.image = image
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ImagesDeleteCall) Fields(s ...googleapi.Field) *ImagesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ImagesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "image": c.image,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified image resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.images.delete",
+ // "parameterOrder": [
+ // "project",
+ // "image"
+ // ],
+ // "parameters": {
+ // "image": {
+ // "description": "Name of the image resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/images/{image}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.images.deprecate":
+
+type ImagesDeprecateCall struct {
+ s *Service
+ project string
+ image string
+ deprecationstatus *DeprecationStatus
+ opt_ map[string]interface{}
+}
+
+// Deprecate: Sets the deprecation status of an image. If no message
+// body is given, clears the deprecation status instead.
+func (r *ImagesService) Deprecate(project string, image string, deprecationstatus *DeprecationStatus) *ImagesDeprecateCall {
+ c := &ImagesDeprecateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.image = image
+ c.deprecationstatus = deprecationstatus
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ImagesDeprecateCall) Fields(s ...googleapi.Field) *ImagesDeprecateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ImagesDeprecateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.deprecationstatus)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}/deprecate")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "image": c.image,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets the deprecation status of an image. If no message body is given, clears the deprecation status instead.",
+ // "httpMethod": "POST",
+ // "id": "compute.images.deprecate",
+ // "parameterOrder": [
+ // "project",
+ // "image"
+ // ],
+ // "parameters": {
+ // "image": {
+ // "description": "Image name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/images/{image}/deprecate",
+ // "request": {
+ // "$ref": "DeprecationStatus"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.images.get":
+
+type ImagesGetCall struct {
+ s *Service
+ project string
+ image string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified image resource.
+func (r *ImagesService) Get(project string, image string) *ImagesGetCall {
+ c := &ImagesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.image = image
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ImagesGetCall) Fields(s ...googleapi.Field) *ImagesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ImagesGetCall) Do() (*Image, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images/{image}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "image": c.image,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Image
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified image resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.images.get",
+ // "parameterOrder": [
+ // "project",
+ // "image"
+ // ],
+ // "parameters": {
+ // "image": {
+ // "description": "Name of the image resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/images/{image}",
+ // "response": {
+ // "$ref": "Image"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.images.insert":
+
+type ImagesInsertCall struct {
+ s *Service
+ project string
+ image *Image
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates an image resource in the specified project using the
+// data included in the request.
+func (r *ImagesService) Insert(project string, image *Image) *ImagesInsertCall {
+ c := &ImagesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.image = image
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ImagesInsertCall) Fields(s ...googleapi.Field) *ImagesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ImagesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.image)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates an image resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.images.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/images",
+ // "request": {
+ // "$ref": "Image"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "compute.images.list":
+
+type ImagesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of image resources available to the
+// specified project.
+func (r *ImagesService) List(project string) *ImagesListCall {
+ c := &ImagesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *ImagesListCall) Filter(filter string) *ImagesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *ImagesListCall) MaxResults(maxResults int64) *ImagesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *ImagesListCall) PageToken(pageToken string) *ImagesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ImagesListCall) Fields(s ...googleapi.Field) *ImagesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ImagesListCall) Do() (*ImageList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/images")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ImageList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of image resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.images.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/images",
+ // "response": {
+ // "$ref": "ImageList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instanceTemplates.delete":
+
+type InstanceTemplatesDeleteCall struct {
+ s *Service
+ project string
+ instanceTemplate string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified instance template resource.
+func (r *InstanceTemplatesService) Delete(project string, instanceTemplate string) *InstanceTemplatesDeleteCall {
+ c := &InstanceTemplatesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.instanceTemplate = instanceTemplate
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstanceTemplatesDeleteCall) Fields(s ...googleapi.Field) *InstanceTemplatesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstanceTemplatesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "instanceTemplate": c.instanceTemplate,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified instance template resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.instanceTemplates.delete",
+ // "parameterOrder": [
+ // "project",
+ // "instanceTemplate"
+ // ],
+ // "parameters": {
+ // "instanceTemplate": {
+ // "description": "Name of the instance template resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/instanceTemplates/{instanceTemplate}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instanceTemplates.get":
+
+type InstanceTemplatesGetCall struct {
+ s *Service
+ project string
+ instanceTemplate string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified instance template resource.
+func (r *InstanceTemplatesService) Get(project string, instanceTemplate string) *InstanceTemplatesGetCall {
+ c := &InstanceTemplatesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.instanceTemplate = instanceTemplate
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstanceTemplatesGetCall) Fields(s ...googleapi.Field) *InstanceTemplatesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstanceTemplatesGetCall) Do() (*InstanceTemplate, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates/{instanceTemplate}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "instanceTemplate": c.instanceTemplate,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *InstanceTemplate
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified instance template resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.instanceTemplates.get",
+ // "parameterOrder": [
+ // "project",
+ // "instanceTemplate"
+ // ],
+ // "parameters": {
+ // "instanceTemplate": {
+ // "description": "Name of the instance template resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/instanceTemplates/{instanceTemplate}",
+ // "response": {
+ // "$ref": "InstanceTemplate"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instanceTemplates.insert":
+
+type InstanceTemplatesInsertCall struct {
+ s *Service
+ project string
+ instancetemplate *InstanceTemplate
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates an instance template resource in the specified
+// project using the data included in the request.
+func (r *InstanceTemplatesService) Insert(project string, instancetemplate *InstanceTemplate) *InstanceTemplatesInsertCall {
+ c := &InstanceTemplatesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.instancetemplate = instancetemplate
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstanceTemplatesInsertCall) Fields(s ...googleapi.Field) *InstanceTemplatesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstanceTemplatesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancetemplate)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates an instance template resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.instanceTemplates.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/instanceTemplates",
+ // "request": {
+ // "$ref": "InstanceTemplate"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instanceTemplates.list":
+
+type InstanceTemplatesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of instance template resources contained
+// within the specified project.
+func (r *InstanceTemplatesService) List(project string) *InstanceTemplatesListCall {
+ c := &InstanceTemplatesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *InstanceTemplatesListCall) Filter(filter string) *InstanceTemplatesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *InstanceTemplatesListCall) MaxResults(maxResults int64) *InstanceTemplatesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *InstanceTemplatesListCall) PageToken(pageToken string) *InstanceTemplatesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstanceTemplatesListCall) Fields(s ...googleapi.Field) *InstanceTemplatesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstanceTemplatesListCall) Do() (*InstanceTemplateList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/instanceTemplates")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *InstanceTemplateList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of instance template resources contained within the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.instanceTemplates.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/instanceTemplates",
+ // "response": {
+ // "$ref": "InstanceTemplateList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.addAccessConfig":
+
+type InstancesAddAccessConfigCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ networkInterface string
+ accessconfig *AccessConfig
+ opt_ map[string]interface{}
+}
+
+// AddAccessConfig: Adds an access config to an instance's network
+// interface.
+func (r *InstancesService) AddAccessConfig(project string, zone string, instance string, networkInterface string, accessconfig *AccessConfig) *InstancesAddAccessConfigCall {
+ c := &InstancesAddAccessConfigCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.networkInterface = networkInterface
+ c.accessconfig = accessconfig
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesAddAccessConfigCall) Fields(s ...googleapi.Field) *InstancesAddAccessConfigCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesAddAccessConfigCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.accessconfig)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ params.Set("networkInterface", fmt.Sprintf("%v", c.networkInterface))
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/addAccessConfig")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Adds an access config to an instance's network interface.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.addAccessConfig",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance",
+ // "networkInterface"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "networkInterface": {
+ // "description": "Network interface name.",
+ // "location": "query",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/addAccessConfig",
+ // "request": {
+ // "$ref": "AccessConfig"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.aggregatedList":
+
+type InstancesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList:
+func (r *InstancesService) AggregatedList(project string) *InstancesAggregatedListCall {
+ c := &InstancesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *InstancesAggregatedListCall) Filter(filter string) *InstancesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *InstancesAggregatedListCall) MaxResults(maxResults int64) *InstancesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *InstancesAggregatedListCall) PageToken(pageToken string) *InstancesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesAggregatedListCall) Fields(s ...googleapi.Field) *InstancesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesAggregatedListCall) Do() (*InstanceAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/instances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *InstanceAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "httpMethod": "GET",
+ // "id": "compute.instances.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/instances",
+ // "response": {
+ // "$ref": "InstanceAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.attachDisk":
+
+type InstancesAttachDiskCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ attacheddisk *AttachedDisk
+ opt_ map[string]interface{}
+}
+
+// AttachDisk: Attaches a disk resource to an instance.
+func (r *InstancesService) AttachDisk(project string, zone string, instance string, attacheddisk *AttachedDisk) *InstancesAttachDiskCall {
+ c := &InstancesAttachDiskCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.attacheddisk = attacheddisk
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesAttachDiskCall) Fields(s ...googleapi.Field) *InstancesAttachDiskCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesAttachDiskCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.attacheddisk)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/attachDisk")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Attaches a disk resource to an instance.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.attachDisk",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/attachDisk",
+ // "request": {
+ // "$ref": "AttachedDisk"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.delete":
+
+type InstancesDeleteCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified instance resource.
+func (r *InstancesService) Delete(project string, zone string, instance string) *InstancesDeleteCall {
+ c := &InstancesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesDeleteCall) Fields(s ...googleapi.Field) *InstancesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified instance resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.instances.delete",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.deleteAccessConfig":
+
+type InstancesDeleteAccessConfigCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ accessConfig string
+ networkInterface string
+ opt_ map[string]interface{}
+}
+
+// DeleteAccessConfig: Deletes an access config from an instance's
+// network interface.
+func (r *InstancesService) DeleteAccessConfig(project string, zone string, instance string, accessConfig string, networkInterface string) *InstancesDeleteAccessConfigCall {
+ c := &InstancesDeleteAccessConfigCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.accessConfig = accessConfig
+ c.networkInterface = networkInterface
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesDeleteAccessConfigCall) Fields(s ...googleapi.Field) *InstancesDeleteAccessConfigCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesDeleteAccessConfigCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ params.Set("accessConfig", fmt.Sprintf("%v", c.accessConfig))
+ params.Set("networkInterface", fmt.Sprintf("%v", c.networkInterface))
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes an access config from an instance's network interface.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.deleteAccessConfig",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance",
+ // "accessConfig",
+ // "networkInterface"
+ // ],
+ // "parameters": {
+ // "accessConfig": {
+ // "description": "Access config name.",
+ // "location": "query",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "networkInterface": {
+ // "description": "Network interface name.",
+ // "location": "query",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/deleteAccessConfig",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.detachDisk":
+
+type InstancesDetachDiskCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ deviceName string
+ opt_ map[string]interface{}
+}
+
+// DetachDisk: Detaches a disk from an instance.
+func (r *InstancesService) DetachDisk(project string, zone string, instance string, deviceName string) *InstancesDetachDiskCall {
+ c := &InstancesDetachDiskCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.deviceName = deviceName
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesDetachDiskCall) Fields(s ...googleapi.Field) *InstancesDetachDiskCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesDetachDiskCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ params.Set("deviceName", fmt.Sprintf("%v", c.deviceName))
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/detachDisk")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Detaches a disk from an instance.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.detachDisk",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance",
+ // "deviceName"
+ // ],
+ // "parameters": {
+ // "deviceName": {
+ // "description": "Disk device name to detach.",
+ // "location": "query",
+ // "pattern": "\\w[\\w.-]{0,254}",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/detachDisk",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.get":
+
+type InstancesGetCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified instance resource.
+func (r *InstancesService) Get(project string, zone string, instance string) *InstancesGetCall {
+ c := &InstancesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesGetCall) Fields(s ...googleapi.Field) *InstancesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesGetCall) Do() (*Instance, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Instance
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified instance resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.instances.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}",
+ // "response": {
+ // "$ref": "Instance"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.getSerialPortOutput":
+
+type InstancesGetSerialPortOutputCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ opt_ map[string]interface{}
+}
+
+// GetSerialPortOutput: Returns the specified instance's serial port
+// output.
+func (r *InstancesService) GetSerialPortOutput(project string, zone string, instance string) *InstancesGetSerialPortOutputCall {
+ c := &InstancesGetSerialPortOutputCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesGetSerialPortOutputCall) Fields(s ...googleapi.Field) *InstancesGetSerialPortOutputCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesGetSerialPortOutputCall) Do() (*SerialPortOutput, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/serialPort")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *SerialPortOutput
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified instance's serial port output.",
+ // "httpMethod": "GET",
+ // "id": "compute.instances.getSerialPortOutput",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/serialPort",
+ // "response": {
+ // "$ref": "SerialPortOutput"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.insert":
+
+type InstancesInsertCall struct {
+ s *Service
+ project string
+ zone string
+ instance *Instance
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates an instance resource in the specified project using
+// the data included in the request.
+func (r *InstancesService) Insert(project string, zone string, instance *Instance) *InstancesInsertCall {
+ c := &InstancesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesInsertCall) Fields(s ...googleapi.Field) *InstancesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instance)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates an instance resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.insert",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances",
+ // "request": {
+ // "$ref": "Instance"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.list":
+
+type InstancesListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of instance resources contained within the
+// specified zone.
+func (r *InstancesService) List(project string, zone string) *InstancesListCall {
+ c := &InstancesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *InstancesListCall) Filter(filter string) *InstancesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *InstancesListCall) MaxResults(maxResults int64) *InstancesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *InstancesListCall) PageToken(pageToken string) *InstancesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesListCall) Fields(s ...googleapi.Field) *InstancesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesListCall) Do() (*InstanceList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *InstanceList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of instance resources contained within the specified zone.",
+ // "httpMethod": "GET",
+ // "id": "compute.instances.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances",
+ // "response": {
+ // "$ref": "InstanceList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.reset":
+
+type InstancesResetCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ opt_ map[string]interface{}
+}
+
+// Reset: Performs a hard reset on the instance.
+func (r *InstancesService) Reset(project string, zone string, instance string) *InstancesResetCall {
+ c := &InstancesResetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesResetCall) Fields(s ...googleapi.Field) *InstancesResetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesResetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/reset")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Performs a hard reset on the instance.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.reset",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/reset",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.setDiskAutoDelete":
+
+type InstancesSetDiskAutoDeleteCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ autoDelete bool
+ deviceName string
+ opt_ map[string]interface{}
+}
+
+// SetDiskAutoDelete: Sets the auto-delete flag for a disk attached to
+// an instance
+func (r *InstancesService) SetDiskAutoDelete(project string, zone string, instance string, autoDelete bool, deviceName string) *InstancesSetDiskAutoDeleteCall {
+ c := &InstancesSetDiskAutoDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.autoDelete = autoDelete
+ c.deviceName = deviceName
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesSetDiskAutoDeleteCall) Fields(s ...googleapi.Field) *InstancesSetDiskAutoDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesSetDiskAutoDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ params.Set("autoDelete", fmt.Sprintf("%v", c.autoDelete))
+ params.Set("deviceName", fmt.Sprintf("%v", c.deviceName))
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets the auto-delete flag for a disk attached to an instance",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.setDiskAutoDelete",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance",
+ // "autoDelete",
+ // "deviceName"
+ // ],
+ // "parameters": {
+ // "autoDelete": {
+ // "description": "Whether to auto-delete the disk when the instance is deleted.",
+ // "location": "query",
+ // "required": true,
+ // "type": "boolean"
+ // },
+ // "deviceName": {
+ // "description": "Disk device name to modify.",
+ // "location": "query",
+ // "pattern": "\\w[\\w.-]{0,254}",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/setDiskAutoDelete",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.setMetadata":
+
+type InstancesSetMetadataCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ metadata *Metadata
+ opt_ map[string]interface{}
+}
+
+// SetMetadata: Sets metadata for the specified instance to the data
+// included in the request.
+func (r *InstancesService) SetMetadata(project string, zone string, instance string, metadata *Metadata) *InstancesSetMetadataCall {
+ c := &InstancesSetMetadataCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.metadata = metadata
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesSetMetadataCall) Fields(s ...googleapi.Field) *InstancesSetMetadataCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesSetMetadataCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setMetadata")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets metadata for the specified instance to the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.setMetadata",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/setMetadata",
+ // "request": {
+ // "$ref": "Metadata"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.setScheduling":
+
+type InstancesSetSchedulingCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ scheduling *Scheduling
+ opt_ map[string]interface{}
+}
+
+// SetScheduling: Sets an instance's scheduling options.
+func (r *InstancesService) SetScheduling(project string, zone string, instance string, scheduling *Scheduling) *InstancesSetSchedulingCall {
+ c := &InstancesSetSchedulingCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.scheduling = scheduling
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesSetSchedulingCall) Fields(s ...googleapi.Field) *InstancesSetSchedulingCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesSetSchedulingCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.scheduling)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setScheduling")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets an instance's scheduling options.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.setScheduling",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Instance name.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Project name.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/setScheduling",
+ // "request": {
+ // "$ref": "Scheduling"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.instances.setTags":
+
+type InstancesSetTagsCall struct {
+ s *Service
+ project string
+ zone string
+ instance string
+ tags *Tags
+ opt_ map[string]interface{}
+}
+
+// SetTags: Sets tags for the specified instance to the data included in
+// the request.
+func (r *InstancesService) SetTags(project string, zone string, instance string, tags *Tags) *InstancesSetTagsCall {
+ c := &InstancesSetTagsCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.instance = instance
+ c.tags = tags
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *InstancesSetTagsCall) Fields(s ...googleapi.Field) *InstancesSetTagsCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *InstancesSetTagsCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.tags)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/instances/{instance}/setTags")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "instance": c.instance,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets tags for the specified instance to the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.instances.setTags",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "instance"
+ // ],
+ // "parameters": {
+ // "instance": {
+ // "description": "Name of the instance scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/instances/{instance}/setTags",
+ // "request": {
+ // "$ref": "Tags"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.licenses.get":
+
+type LicensesGetCall struct {
+ s *Service
+ project string
+ license string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified license resource.
+func (r *LicensesService) Get(project string, license string) *LicensesGetCall {
+ c := &LicensesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.license = license
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *LicensesGetCall) Fields(s ...googleapi.Field) *LicensesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *LicensesGetCall) Do() (*License, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/licenses/{license}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "license": c.license,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *License
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified license resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.licenses.get",
+ // "parameterOrder": [
+ // "project",
+ // "license"
+ // ],
+ // "parameters": {
+ // "license": {
+ // "description": "Name of the license resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/licenses/{license}",
+ // "response": {
+ // "$ref": "License"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.machineTypes.aggregatedList":
+
+type MachineTypesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of machine type resources grouped
+// by scope.
+func (r *MachineTypesService) AggregatedList(project string) *MachineTypesAggregatedListCall {
+ c := &MachineTypesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *MachineTypesAggregatedListCall) Filter(filter string) *MachineTypesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *MachineTypesAggregatedListCall) MaxResults(maxResults int64) *MachineTypesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *MachineTypesAggregatedListCall) PageToken(pageToken string) *MachineTypesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *MachineTypesAggregatedListCall) Fields(s ...googleapi.Field) *MachineTypesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *MachineTypesAggregatedListCall) Do() (*MachineTypeAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/machineTypes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *MachineTypeAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of machine type resources grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.machineTypes.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/machineTypes",
+ // "response": {
+ // "$ref": "MachineTypeAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.machineTypes.get":
+
+type MachineTypesGetCall struct {
+ s *Service
+ project string
+ zone string
+ machineType string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified machine type resource.
+func (r *MachineTypesService) Get(project string, zone string, machineType string) *MachineTypesGetCall {
+ c := &MachineTypesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.machineType = machineType
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *MachineTypesGetCall) Fields(s ...googleapi.Field) *MachineTypesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *MachineTypesGetCall) Do() (*MachineType, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes/{machineType}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "machineType": c.machineType,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *MachineType
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified machine type resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.machineTypes.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "machineType"
+ // ],
+ // "parameters": {
+ // "machineType": {
+ // "description": "Name of the machine type resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/machineTypes/{machineType}",
+ // "response": {
+ // "$ref": "MachineType"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.machineTypes.list":
+
+type MachineTypesListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of machine type resources available to the
+// specified project.
+func (r *MachineTypesService) List(project string, zone string) *MachineTypesListCall {
+ c := &MachineTypesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *MachineTypesListCall) Filter(filter string) *MachineTypesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *MachineTypesListCall) MaxResults(maxResults int64) *MachineTypesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *MachineTypesListCall) PageToken(pageToken string) *MachineTypesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *MachineTypesListCall) Fields(s ...googleapi.Field) *MachineTypesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *MachineTypesListCall) Do() (*MachineTypeList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/machineTypes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *MachineTypeList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of machine type resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.machineTypes.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/machineTypes",
+ // "response": {
+ // "$ref": "MachineTypeList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.networks.delete":
+
+type NetworksDeleteCall struct {
+ s *Service
+ project string
+ network string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified network resource.
+func (r *NetworksService) Delete(project string, network string) *NetworksDeleteCall {
+ c := &NetworksDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.network = network
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *NetworksDeleteCall) Fields(s ...googleapi.Field) *NetworksDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *NetworksDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "network": c.network,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified network resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.networks.delete",
+ // "parameterOrder": [
+ // "project",
+ // "network"
+ // ],
+ // "parameters": {
+ // "network": {
+ // "description": "Name of the network resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/networks/{network}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.networks.get":
+
+type NetworksGetCall struct {
+ s *Service
+ project string
+ network string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified network resource.
+func (r *NetworksService) Get(project string, network string) *NetworksGetCall {
+ c := &NetworksGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.network = network
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *NetworksGetCall) Fields(s ...googleapi.Field) *NetworksGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *NetworksGetCall) Do() (*Network, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks/{network}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "network": c.network,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Network
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified network resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.networks.get",
+ // "parameterOrder": [
+ // "project",
+ // "network"
+ // ],
+ // "parameters": {
+ // "network": {
+ // "description": "Name of the network resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/networks/{network}",
+ // "response": {
+ // "$ref": "Network"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.networks.insert":
+
+type NetworksInsertCall struct {
+ s *Service
+ project string
+ network *Network
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a network resource in the specified project using the
+// data included in the request.
+func (r *NetworksService) Insert(project string, network *Network) *NetworksInsertCall {
+ c := &NetworksInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.network = network
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *NetworksInsertCall) Fields(s ...googleapi.Field) *NetworksInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *NetworksInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.network)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a network resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.networks.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/networks",
+ // "request": {
+ // "$ref": "Network"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.networks.list":
+
+type NetworksListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of network resources available to the
+// specified project.
+func (r *NetworksService) List(project string) *NetworksListCall {
+ c := &NetworksListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *NetworksListCall) Filter(filter string) *NetworksListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *NetworksListCall) MaxResults(maxResults int64) *NetworksListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *NetworksListCall) PageToken(pageToken string) *NetworksListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *NetworksListCall) Fields(s ...googleapi.Field) *NetworksListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *NetworksListCall) Do() (*NetworkList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/networks")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *NetworkList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of network resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.networks.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/networks",
+ // "response": {
+ // "$ref": "NetworkList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.projects.get":
+
+type ProjectsGetCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified project resource.
+func (r *ProjectsService) Get(project string) *ProjectsGetCall {
+ c := &ProjectsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsGetCall) Fields(s ...googleapi.Field) *ProjectsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsGetCall) Do() (*Project, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Project
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified project resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.projects.get",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project resource to retrieve.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}",
+ // "response": {
+ // "$ref": "Project"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.projects.setCommonInstanceMetadata":
+
+type ProjectsSetCommonInstanceMetadataCall struct {
+ s *Service
+ project string
+ metadata *Metadata
+ opt_ map[string]interface{}
+}
+
+// SetCommonInstanceMetadata: Sets metadata common to all instances
+// within the specified project using the data included in the request.
+func (r *ProjectsService) SetCommonInstanceMetadata(project string, metadata *Metadata) *ProjectsSetCommonInstanceMetadataCall {
+ c := &ProjectsSetCommonInstanceMetadataCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.metadata = metadata
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSetCommonInstanceMetadataCall) Fields(s ...googleapi.Field) *ProjectsSetCommonInstanceMetadataCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsSetCommonInstanceMetadataCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.metadata)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setCommonInstanceMetadata")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets metadata common to all instances within the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.projects.setCommonInstanceMetadata",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/setCommonInstanceMetadata",
+ // "request": {
+ // "$ref": "Metadata"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.projects.setUsageExportBucket":
+
+type ProjectsSetUsageExportBucketCall struct {
+ s *Service
+ project string
+ usageexportlocation *UsageExportLocation
+ opt_ map[string]interface{}
+}
+
+// SetUsageExportBucket: Sets usage export location
+func (r *ProjectsService) SetUsageExportBucket(project string, usageexportlocation *UsageExportLocation) *ProjectsSetUsageExportBucketCall {
+ c := &ProjectsSetUsageExportBucketCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.usageexportlocation = usageexportlocation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsSetUsageExportBucketCall) Fields(s ...googleapi.Field) *ProjectsSetUsageExportBucketCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsSetUsageExportBucketCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.usageexportlocation)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/setUsageExportBucket")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Sets usage export location",
+ // "httpMethod": "POST",
+ // "id": "compute.projects.setUsageExportBucket",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/setUsageExportBucket",
+ // "request": {
+ // "$ref": "UsageExportLocation"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/devstorage.full_control",
+ // "https://www.googleapis.com/auth/devstorage.read_only",
+ // "https://www.googleapis.com/auth/devstorage.read_write"
+ // ]
+ // }
+
+}
+
+// method id "compute.regionOperations.delete":
+
+type RegionOperationsDeleteCall struct {
+ s *Service
+ project string
+ region string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified region-specific operation resource.
+func (r *RegionOperationsService) Delete(project string, region string, operation string) *RegionOperationsDeleteCall {
+ c := &RegionOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RegionOperationsDeleteCall) Fields(s ...googleapi.Field) *RegionOperationsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RegionOperationsDeleteCall) Do() error {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes the specified region-specific operation resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.regionOperations.delete",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/operations/{operation}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.regionOperations.get":
+
+type RegionOperationsGetCall struct {
+ s *Service
+ project string
+ region string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Get: Retrieves the specified region-specific operation resource.
+func (r *RegionOperationsService) Get(project string, region string, operation string) *RegionOperationsGetCall {
+ c := &RegionOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RegionOperationsGetCall) Fields(s ...googleapi.Field) *RegionOperationsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RegionOperationsGetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the specified region-specific operation resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.regionOperations.get",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/operations/{operation}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.regionOperations.list":
+
+type RegionOperationsListCall struct {
+ s *Service
+ project string
+ region string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of operation resources contained within the
+// specified region.
+func (r *RegionOperationsService) List(project string, region string) *RegionOperationsListCall {
+ c := &RegionOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *RegionOperationsListCall) Filter(filter string) *RegionOperationsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *RegionOperationsListCall) MaxResults(maxResults int64) *RegionOperationsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *RegionOperationsListCall) PageToken(pageToken string) *RegionOperationsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RegionOperationsListCall) Fields(s ...googleapi.Field) *RegionOperationsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RegionOperationsListCall) Do() (*OperationList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *OperationList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of operation resources contained within the specified region.",
+ // "httpMethod": "GET",
+ // "id": "compute.regionOperations.list",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/operations",
+ // "response": {
+ // "$ref": "OperationList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.regions.get":
+
+type RegionsGetCall struct {
+ s *Service
+ project string
+ region string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified region resource.
+func (r *RegionsService) Get(project string, region string) *RegionsGetCall {
+ c := &RegionsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RegionsGetCall) Fields(s ...googleapi.Field) *RegionsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RegionsGetCall) Do() (*Region, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Region
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified region resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.regions.get",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}",
+ // "response": {
+ // "$ref": "Region"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.regions.list":
+
+type RegionsListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of region resources available to the
+// specified project.
+func (r *RegionsService) List(project string) *RegionsListCall {
+ c := &RegionsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *RegionsListCall) Filter(filter string) *RegionsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *RegionsListCall) MaxResults(maxResults int64) *RegionsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *RegionsListCall) PageToken(pageToken string) *RegionsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RegionsListCall) Fields(s ...googleapi.Field) *RegionsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RegionsListCall) Do() (*RegionList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *RegionList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of region resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.regions.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions",
+ // "response": {
+ // "$ref": "RegionList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.routes.delete":
+
+type RoutesDeleteCall struct {
+ s *Service
+ project string
+ route string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified route resource.
+func (r *RoutesService) Delete(project string, route string) *RoutesDeleteCall {
+ c := &RoutesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.route = route
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RoutesDeleteCall) Fields(s ...googleapi.Field) *RoutesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RoutesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "route": c.route,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified route resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.routes.delete",
+ // "parameterOrder": [
+ // "project",
+ // "route"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "route": {
+ // "description": "Name of the route resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/routes/{route}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.routes.get":
+
+type RoutesGetCall struct {
+ s *Service
+ project string
+ route string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified route resource.
+func (r *RoutesService) Get(project string, route string) *RoutesGetCall {
+ c := &RoutesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.route = route
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RoutesGetCall) Fields(s ...googleapi.Field) *RoutesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RoutesGetCall) Do() (*Route, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes/{route}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "route": c.route,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Route
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified route resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.routes.get",
+ // "parameterOrder": [
+ // "project",
+ // "route"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "route": {
+ // "description": "Name of the route resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/routes/{route}",
+ // "response": {
+ // "$ref": "Route"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.routes.insert":
+
+type RoutesInsertCall struct {
+ s *Service
+ project string
+ route *Route
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a route resource in the specified project using the
+// data included in the request.
+func (r *RoutesService) Insert(project string, route *Route) *RoutesInsertCall {
+ c := &RoutesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.route = route
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RoutesInsertCall) Fields(s ...googleapi.Field) *RoutesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RoutesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.route)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a route resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.routes.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/routes",
+ // "request": {
+ // "$ref": "Route"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.routes.list":
+
+type RoutesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of route resources available to the
+// specified project.
+func (r *RoutesService) List(project string) *RoutesListCall {
+ c := &RoutesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *RoutesListCall) Filter(filter string) *RoutesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *RoutesListCall) MaxResults(maxResults int64) *RoutesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *RoutesListCall) PageToken(pageToken string) *RoutesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *RoutesListCall) Fields(s ...googleapi.Field) *RoutesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *RoutesListCall) Do() (*RouteList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/routes")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *RouteList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of route resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.routes.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/routes",
+ // "response": {
+ // "$ref": "RouteList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.snapshots.delete":
+
+type SnapshotsDeleteCall struct {
+ s *Service
+ project string
+ snapshot string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified persistent disk snapshot resource.
+func (r *SnapshotsService) Delete(project string, snapshot string) *SnapshotsDeleteCall {
+ c := &SnapshotsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.snapshot = snapshot
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *SnapshotsDeleteCall) Fields(s ...googleapi.Field) *SnapshotsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *SnapshotsDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "snapshot": c.snapshot,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified persistent disk snapshot resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.snapshots.delete",
+ // "parameterOrder": [
+ // "project",
+ // "snapshot"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "snapshot": {
+ // "description": "Name of the persistent disk snapshot resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/snapshots/{snapshot}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.snapshots.get":
+
+type SnapshotsGetCall struct {
+ s *Service
+ project string
+ snapshot string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified persistent disk snapshot resource.
+func (r *SnapshotsService) Get(project string, snapshot string) *SnapshotsGetCall {
+ c := &SnapshotsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.snapshot = snapshot
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *SnapshotsGetCall) Fields(s ...googleapi.Field) *SnapshotsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *SnapshotsGetCall) Do() (*Snapshot, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots/{snapshot}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "snapshot": c.snapshot,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Snapshot
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified persistent disk snapshot resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.snapshots.get",
+ // "parameterOrder": [
+ // "project",
+ // "snapshot"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "snapshot": {
+ // "description": "Name of the persistent disk snapshot resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/snapshots/{snapshot}",
+ // "response": {
+ // "$ref": "Snapshot"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.snapshots.list":
+
+type SnapshotsListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of persistent disk snapshot resources
+// contained within the specified project.
+func (r *SnapshotsService) List(project string) *SnapshotsListCall {
+ c := &SnapshotsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *SnapshotsListCall) Filter(filter string) *SnapshotsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *SnapshotsListCall) MaxResults(maxResults int64) *SnapshotsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *SnapshotsListCall) PageToken(pageToken string) *SnapshotsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *SnapshotsListCall) Fields(s ...googleapi.Field) *SnapshotsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *SnapshotsListCall) Do() (*SnapshotList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/snapshots")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *SnapshotList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of persistent disk snapshot resources contained within the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.snapshots.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/snapshots",
+ // "response": {
+ // "$ref": "SnapshotList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetHttpProxies.delete":
+
+type TargetHttpProxiesDeleteCall struct {
+ s *Service
+ project string
+ targetHttpProxy string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified TargetHttpProxy resource.
+func (r *TargetHttpProxiesService) Delete(project string, targetHttpProxy string) *TargetHttpProxiesDeleteCall {
+ c := &TargetHttpProxiesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.targetHttpProxy = targetHttpProxy
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetHttpProxiesDeleteCall) Fields(s ...googleapi.Field) *TargetHttpProxiesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetHttpProxiesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "targetHttpProxy": c.targetHttpProxy,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified TargetHttpProxy resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.targetHttpProxies.delete",
+ // "parameterOrder": [
+ // "project",
+ // "targetHttpProxy"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetHttpProxy": {
+ // "description": "Name of the TargetHttpProxy resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetHttpProxies.get":
+
+type TargetHttpProxiesGetCall struct {
+ s *Service
+ project string
+ targetHttpProxy string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified TargetHttpProxy resource.
+func (r *TargetHttpProxiesService) Get(project string, targetHttpProxy string) *TargetHttpProxiesGetCall {
+ c := &TargetHttpProxiesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.targetHttpProxy = targetHttpProxy
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetHttpProxiesGetCall) Fields(s ...googleapi.Field) *TargetHttpProxiesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetHttpProxiesGetCall) Do() (*TargetHttpProxy, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies/{targetHttpProxy}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "targetHttpProxy": c.targetHttpProxy,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetHttpProxy
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified TargetHttpProxy resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetHttpProxies.get",
+ // "parameterOrder": [
+ // "project",
+ // "targetHttpProxy"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetHttpProxy": {
+ // "description": "Name of the TargetHttpProxy resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/targetHttpProxies/{targetHttpProxy}",
+ // "response": {
+ // "$ref": "TargetHttpProxy"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetHttpProxies.insert":
+
+type TargetHttpProxiesInsertCall struct {
+ s *Service
+ project string
+ targethttpproxy *TargetHttpProxy
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a TargetHttpProxy resource in the specified project
+// using the data included in the request.
+func (r *TargetHttpProxiesService) Insert(project string, targethttpproxy *TargetHttpProxy) *TargetHttpProxiesInsertCall {
+ c := &TargetHttpProxiesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.targethttpproxy = targethttpproxy
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetHttpProxiesInsertCall) Fields(s ...googleapi.Field) *TargetHttpProxiesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetHttpProxiesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targethttpproxy)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a TargetHttpProxy resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetHttpProxies.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/targetHttpProxies",
+ // "request": {
+ // "$ref": "TargetHttpProxy"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetHttpProxies.list":
+
+type TargetHttpProxiesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of TargetHttpProxy resources available to
+// the specified project.
+func (r *TargetHttpProxiesService) List(project string) *TargetHttpProxiesListCall {
+ c := &TargetHttpProxiesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *TargetHttpProxiesListCall) Filter(filter string) *TargetHttpProxiesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *TargetHttpProxiesListCall) MaxResults(maxResults int64) *TargetHttpProxiesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *TargetHttpProxiesListCall) PageToken(pageToken string) *TargetHttpProxiesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetHttpProxiesListCall) Fields(s ...googleapi.Field) *TargetHttpProxiesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetHttpProxiesListCall) Do() (*TargetHttpProxyList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/targetHttpProxies")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetHttpProxyList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of TargetHttpProxy resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetHttpProxies.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/targetHttpProxies",
+ // "response": {
+ // "$ref": "TargetHttpProxyList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetHttpProxies.setUrlMap":
+
+type TargetHttpProxiesSetUrlMapCall struct {
+ s *Service
+ project string
+ targetHttpProxy string
+ urlmapreference *UrlMapReference
+ opt_ map[string]interface{}
+}
+
+// SetUrlMap: Changes the URL map for TargetHttpProxy.
+func (r *TargetHttpProxiesService) SetUrlMap(project string, targetHttpProxy string, urlmapreference *UrlMapReference) *TargetHttpProxiesSetUrlMapCall {
+ c := &TargetHttpProxiesSetUrlMapCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.targetHttpProxy = targetHttpProxy
+ c.urlmapreference = urlmapreference
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetHttpProxiesSetUrlMapCall) Fields(s ...googleapi.Field) *TargetHttpProxiesSetUrlMapCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetHttpProxiesSetUrlMapCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapreference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "targetHttpProxy": c.targetHttpProxy,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Changes the URL map for TargetHttpProxy.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetHttpProxies.setUrlMap",
+ // "parameterOrder": [
+ // "project",
+ // "targetHttpProxy"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetHttpProxy": {
+ // "description": "Name of the TargetHttpProxy resource whose URL map is to be set.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/targetHttpProxies/{targetHttpProxy}/setUrlMap",
+ // "request": {
+ // "$ref": "UrlMapReference"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetInstances.aggregatedList":
+
+type TargetInstancesAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of target instances grouped by
+// scope.
+func (r *TargetInstancesService) AggregatedList(project string) *TargetInstancesAggregatedListCall {
+ c := &TargetInstancesAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *TargetInstancesAggregatedListCall) Filter(filter string) *TargetInstancesAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *TargetInstancesAggregatedListCall) MaxResults(maxResults int64) *TargetInstancesAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *TargetInstancesAggregatedListCall) PageToken(pageToken string) *TargetInstancesAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetInstancesAggregatedListCall) Fields(s ...googleapi.Field) *TargetInstancesAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetInstancesAggregatedListCall) Do() (*TargetInstanceAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetInstances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetInstanceAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of target instances grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetInstances.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/targetInstances",
+ // "response": {
+ // "$ref": "TargetInstanceAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetInstances.delete":
+
+type TargetInstancesDeleteCall struct {
+ s *Service
+ project string
+ zone string
+ targetInstance string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified TargetInstance resource.
+func (r *TargetInstancesService) Delete(project string, zone string, targetInstance string) *TargetInstancesDeleteCall {
+ c := &TargetInstancesDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.targetInstance = targetInstance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetInstancesDeleteCall) Fields(s ...googleapi.Field) *TargetInstancesDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetInstancesDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "targetInstance": c.targetInstance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified TargetInstance resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.targetInstances.delete",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "targetInstance"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetInstance": {
+ // "description": "Name of the TargetInstance resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetInstances.get":
+
+type TargetInstancesGetCall struct {
+ s *Service
+ project string
+ zone string
+ targetInstance string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified TargetInstance resource.
+func (r *TargetInstancesService) Get(project string, zone string, targetInstance string) *TargetInstancesGetCall {
+ c := &TargetInstancesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.targetInstance = targetInstance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetInstancesGetCall) Fields(s ...googleapi.Field) *TargetInstancesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetInstancesGetCall) Do() (*TargetInstance, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances/{targetInstance}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "targetInstance": c.targetInstance,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetInstance
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified TargetInstance resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetInstances.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "targetInstance"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetInstance": {
+ // "description": "Name of the TargetInstance resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/targetInstances/{targetInstance}",
+ // "response": {
+ // "$ref": "TargetInstance"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetInstances.insert":
+
+type TargetInstancesInsertCall struct {
+ s *Service
+ project string
+ zone string
+ targetinstance *TargetInstance
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a TargetInstance resource in the specified project
+// and zone using the data included in the request.
+func (r *TargetInstancesService) Insert(project string, zone string, targetinstance *TargetInstance) *TargetInstancesInsertCall {
+ c := &TargetInstancesInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.targetinstance = targetinstance
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetInstancesInsertCall) Fields(s ...googleapi.Field) *TargetInstancesInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetInstancesInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetinstance)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a TargetInstance resource in the specified project and zone using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetInstances.insert",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/targetInstances",
+ // "request": {
+ // "$ref": "TargetInstance"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetInstances.list":
+
+type TargetInstancesListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of TargetInstance resources available to the
+// specified project and zone.
+func (r *TargetInstancesService) List(project string, zone string) *TargetInstancesListCall {
+ c := &TargetInstancesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *TargetInstancesListCall) Filter(filter string) *TargetInstancesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *TargetInstancesListCall) MaxResults(maxResults int64) *TargetInstancesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *TargetInstancesListCall) PageToken(pageToken string) *TargetInstancesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetInstancesListCall) Fields(s ...googleapi.Field) *TargetInstancesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetInstancesListCall) Do() (*TargetInstanceList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/targetInstances")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetInstanceList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of TargetInstance resources available to the specified project and zone.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetInstances.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/targetInstances",
+ // "response": {
+ // "$ref": "TargetInstanceList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.addHealthCheck":
+
+type TargetPoolsAddHealthCheckCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest
+ opt_ map[string]interface{}
+}
+
+// AddHealthCheck: Adds health check URL to targetPool.
+func (r *TargetPoolsService) AddHealthCheck(project string, region string, targetPool string, targetpoolsaddhealthcheckrequest *TargetPoolsAddHealthCheckRequest) *TargetPoolsAddHealthCheckCall {
+ c := &TargetPoolsAddHealthCheckCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.targetpoolsaddhealthcheckrequest = targetpoolsaddhealthcheckrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsAddHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsAddHealthCheckCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsAddHealthCheckCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddhealthcheckrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Adds health check URL to targetPool.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.addHealthCheck",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to which health_check_url is to be added.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/addHealthCheck",
+ // "request": {
+ // "$ref": "TargetPoolsAddHealthCheckRequest"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.addInstance":
+
+type TargetPoolsAddInstanceCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest
+ opt_ map[string]interface{}
+}
+
+// AddInstance: Adds instance url to targetPool.
+func (r *TargetPoolsService) AddInstance(project string, region string, targetPool string, targetpoolsaddinstancerequest *TargetPoolsAddInstanceRequest) *TargetPoolsAddInstanceCall {
+ c := &TargetPoolsAddInstanceCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.targetpoolsaddinstancerequest = targetpoolsaddinstancerequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsAddInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsAddInstanceCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsAddInstanceCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsaddinstancerequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/addInstance")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Adds instance url to targetPool.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.addInstance",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to which instance_url is to be added.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/addInstance",
+ // "request": {
+ // "$ref": "TargetPoolsAddInstanceRequest"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.aggregatedList":
+
+type TargetPoolsAggregatedListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// AggregatedList: Retrieves the list of target pools grouped by scope.
+func (r *TargetPoolsService) AggregatedList(project string) *TargetPoolsAggregatedListCall {
+ c := &TargetPoolsAggregatedListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *TargetPoolsAggregatedListCall) Filter(filter string) *TargetPoolsAggregatedListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *TargetPoolsAggregatedListCall) MaxResults(maxResults int64) *TargetPoolsAggregatedListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *TargetPoolsAggregatedListCall) PageToken(pageToken string) *TargetPoolsAggregatedListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsAggregatedListCall) Fields(s ...googleapi.Field) *TargetPoolsAggregatedListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsAggregatedListCall) Do() (*TargetPoolAggregatedList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/aggregated/targetPools")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetPoolAggregatedList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of target pools grouped by scope.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetPools.aggregatedList",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/aggregated/targetPools",
+ // "response": {
+ // "$ref": "TargetPoolAggregatedList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.delete":
+
+type TargetPoolsDeleteCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified TargetPool resource.
+func (r *TargetPoolsService) Delete(project string, region string, targetPool string) *TargetPoolsDeleteCall {
+ c := &TargetPoolsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsDeleteCall) Fields(s ...googleapi.Field) *TargetPoolsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified TargetPool resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.targetPools.delete",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.get":
+
+type TargetPoolsGetCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified TargetPool resource.
+func (r *TargetPoolsService) Get(project string, region string, targetPool string) *TargetPoolsGetCall {
+ c := &TargetPoolsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsGetCall) Fields(s ...googleapi.Field) *TargetPoolsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsGetCall) Do() (*TargetPool, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetPool
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified TargetPool resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetPools.get",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}",
+ // "response": {
+ // "$ref": "TargetPool"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.getHealth":
+
+type TargetPoolsGetHealthCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ instancereference *InstanceReference
+ opt_ map[string]interface{}
+}
+
+// GetHealth: Gets the most recent health check results for each IP for
+// the given instance that is referenced by given TargetPool.
+func (r *TargetPoolsService) GetHealth(project string, region string, targetPool string, instancereference *InstanceReference) *TargetPoolsGetHealthCall {
+ c := &TargetPoolsGetHealthCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.instancereference = instancereference
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsGetHealthCall) Fields(s ...googleapi.Field) *TargetPoolsGetHealthCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsGetHealthCall) Do() (*TargetPoolInstanceHealth, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.instancereference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/getHealth")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetPoolInstanceHealth
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets the most recent health check results for each IP for the given instance that is referenced by given TargetPool.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.getHealth",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to which the queried instance belongs.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/getHealth",
+ // "request": {
+ // "$ref": "InstanceReference"
+ // },
+ // "response": {
+ // "$ref": "TargetPoolInstanceHealth"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.insert":
+
+type TargetPoolsInsertCall struct {
+ s *Service
+ project string
+ region string
+ targetpool *TargetPool
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a TargetPool resource in the specified project and
+// region using the data included in the request.
+func (r *TargetPoolsService) Insert(project string, region string, targetpool *TargetPool) *TargetPoolsInsertCall {
+ c := &TargetPoolsInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetpool = targetpool
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsInsertCall) Fields(s ...googleapi.Field) *TargetPoolsInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpool)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a TargetPool resource in the specified project and region using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.insert",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools",
+ // "request": {
+ // "$ref": "TargetPool"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.list":
+
+type TargetPoolsListCall struct {
+ s *Service
+ project string
+ region string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of TargetPool resources available to the
+// specified project and region.
+func (r *TargetPoolsService) List(project string, region string) *TargetPoolsListCall {
+ c := &TargetPoolsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *TargetPoolsListCall) Filter(filter string) *TargetPoolsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *TargetPoolsListCall) MaxResults(maxResults int64) *TargetPoolsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *TargetPoolsListCall) PageToken(pageToken string) *TargetPoolsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsListCall) Fields(s ...googleapi.Field) *TargetPoolsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsListCall) Do() (*TargetPoolList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *TargetPoolList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of TargetPool resources available to the specified project and region.",
+ // "httpMethod": "GET",
+ // "id": "compute.targetPools.list",
+ // "parameterOrder": [
+ // "project",
+ // "region"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools",
+ // "response": {
+ // "$ref": "TargetPoolList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.removeHealthCheck":
+
+type TargetPoolsRemoveHealthCheckCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest
+ opt_ map[string]interface{}
+}
+
+// RemoveHealthCheck: Removes health check URL from targetPool.
+func (r *TargetPoolsService) RemoveHealthCheck(project string, region string, targetPool string, targetpoolsremovehealthcheckrequest *TargetPoolsRemoveHealthCheckRequest) *TargetPoolsRemoveHealthCheckCall {
+ c := &TargetPoolsRemoveHealthCheckCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.targetpoolsremovehealthcheckrequest = targetpoolsremovehealthcheckrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsRemoveHealthCheckCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveHealthCheckCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsRemoveHealthCheckCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremovehealthcheckrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Removes health check URL from targetPool.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.removeHealthCheck",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to which health_check_url is to be removed.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeHealthCheck",
+ // "request": {
+ // "$ref": "TargetPoolsRemoveHealthCheckRequest"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.removeInstance":
+
+type TargetPoolsRemoveInstanceCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest
+ opt_ map[string]interface{}
+}
+
+// RemoveInstance: Removes instance URL from targetPool.
+func (r *TargetPoolsService) RemoveInstance(project string, region string, targetPool string, targetpoolsremoveinstancerequest *TargetPoolsRemoveInstanceRequest) *TargetPoolsRemoveInstanceCall {
+ c := &TargetPoolsRemoveInstanceCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.targetpoolsremoveinstancerequest = targetpoolsremoveinstancerequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsRemoveInstanceCall) Fields(s ...googleapi.Field) *TargetPoolsRemoveInstanceCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsRemoveInstanceCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetpoolsremoveinstancerequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/removeInstance")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Removes instance URL from targetPool.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.removeInstance",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource to which instance_url is to be removed.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/removeInstance",
+ // "request": {
+ // "$ref": "TargetPoolsRemoveInstanceRequest"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.targetPools.setBackup":
+
+type TargetPoolsSetBackupCall struct {
+ s *Service
+ project string
+ region string
+ targetPool string
+ targetreference *TargetReference
+ opt_ map[string]interface{}
+}
+
+// SetBackup: Changes backup pool configurations.
+func (r *TargetPoolsService) SetBackup(project string, region string, targetPool string, targetreference *TargetReference) *TargetPoolsSetBackupCall {
+ c := &TargetPoolsSetBackupCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.region = region
+ c.targetPool = targetPool
+ c.targetreference = targetreference
+ return c
+}
+
+// FailoverRatio sets the optional parameter "failoverRatio": New
+// failoverRatio value for the containing target pool.
+func (c *TargetPoolsSetBackupCall) FailoverRatio(failoverRatio float64) *TargetPoolsSetBackupCall {
+ c.opt_["failoverRatio"] = failoverRatio
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *TargetPoolsSetBackupCall) Fields(s ...googleapi.Field) *TargetPoolsSetBackupCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *TargetPoolsSetBackupCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.targetreference)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["failoverRatio"]; ok {
+ params.Set("failoverRatio", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/regions/{region}/targetPools/{targetPool}/setBackup")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "region": c.region,
+ "targetPool": c.targetPool,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Changes backup pool configurations.",
+ // "httpMethod": "POST",
+ // "id": "compute.targetPools.setBackup",
+ // "parameterOrder": [
+ // "project",
+ // "region",
+ // "targetPool"
+ // ],
+ // "parameters": {
+ // "failoverRatio": {
+ // "description": "New failoverRatio value for the containing target pool.",
+ // "format": "float",
+ // "location": "query",
+ // "type": "number"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "region": {
+ // "description": "Name of the region scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "targetPool": {
+ // "description": "Name of the TargetPool resource for which the backup is to be set.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/regions/{region}/targetPools/{targetPool}/setBackup",
+ // "request": {
+ // "$ref": "TargetReference"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.delete":
+
+type UrlMapsDeleteCall struct {
+ s *Service
+ project string
+ urlMap string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified UrlMap resource.
+func (r *UrlMapsService) Delete(project string, urlMap string) *UrlMapsDeleteCall {
+ c := &UrlMapsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlMap = urlMap
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsDeleteCall) Fields(s ...googleapi.Field) *UrlMapsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "urlMap": c.urlMap,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the specified UrlMap resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.urlMaps.delete",
+ // "parameterOrder": [
+ // "project",
+ // "urlMap"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "urlMap": {
+ // "description": "Name of the UrlMap resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps/{urlMap}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.get":
+
+type UrlMapsGetCall struct {
+ s *Service
+ project string
+ urlMap string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified UrlMap resource.
+func (r *UrlMapsService) Get(project string, urlMap string) *UrlMapsGetCall {
+ c := &UrlMapsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlMap = urlMap
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsGetCall) Fields(s ...googleapi.Field) *UrlMapsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsGetCall) Do() (*UrlMap, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "urlMap": c.urlMap,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *UrlMap
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified UrlMap resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.urlMaps.get",
+ // "parameterOrder": [
+ // "project",
+ // "urlMap"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "urlMap": {
+ // "description": "Name of the UrlMap resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps/{urlMap}",
+ // "response": {
+ // "$ref": "UrlMap"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.insert":
+
+type UrlMapsInsertCall struct {
+ s *Service
+ project string
+ urlmap *UrlMap
+ opt_ map[string]interface{}
+}
+
+// Insert: Creates a UrlMap resource in the specified project using the
+// data included in the request.
+func (r *UrlMapsService) Insert(project string, urlmap *UrlMap) *UrlMapsInsertCall {
+ c := &UrlMapsInsertCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlmap = urlmap
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsInsertCall) Fields(s ...googleapi.Field) *UrlMapsInsertCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsInsertCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a UrlMap resource in the specified project using the data included in the request.",
+ // "httpMethod": "POST",
+ // "id": "compute.urlMaps.insert",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps",
+ // "request": {
+ // "$ref": "UrlMap"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.list":
+
+type UrlMapsListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of UrlMap resources available to the
+// specified project.
+func (r *UrlMapsService) List(project string) *UrlMapsListCall {
+ c := &UrlMapsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *UrlMapsListCall) Filter(filter string) *UrlMapsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *UrlMapsListCall) MaxResults(maxResults int64) *UrlMapsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *UrlMapsListCall) PageToken(pageToken string) *UrlMapsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsListCall) Fields(s ...googleapi.Field) *UrlMapsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsListCall) Do() (*UrlMapList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *UrlMapList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of UrlMap resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.urlMaps.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps",
+ // "response": {
+ // "$ref": "UrlMapList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.patch":
+
+type UrlMapsPatchCall struct {
+ s *Service
+ project string
+ urlMap string
+ urlmap *UrlMap
+ opt_ map[string]interface{}
+}
+
+// Patch: Update the entire content of the UrlMap resource. This method
+// supports patch semantics.
+func (r *UrlMapsService) Patch(project string, urlMap string, urlmap *UrlMap) *UrlMapsPatchCall {
+ c := &UrlMapsPatchCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlMap = urlMap
+ c.urlmap = urlmap
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsPatchCall) Fields(s ...googleapi.Field) *UrlMapsPatchCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsPatchCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PATCH", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "urlMap": c.urlMap,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Update the entire content of the UrlMap resource. This method supports patch semantics.",
+ // "httpMethod": "PATCH",
+ // "id": "compute.urlMaps.patch",
+ // "parameterOrder": [
+ // "project",
+ // "urlMap"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "urlMap": {
+ // "description": "Name of the UrlMap resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps/{urlMap}",
+ // "request": {
+ // "$ref": "UrlMap"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.update":
+
+type UrlMapsUpdateCall struct {
+ s *Service
+ project string
+ urlMap string
+ urlmap *UrlMap
+ opt_ map[string]interface{}
+}
+
+// Update: Update the entire content of the UrlMap resource.
+func (r *UrlMapsService) Update(project string, urlMap string, urlmap *UrlMap) *UrlMapsUpdateCall {
+ c := &UrlMapsUpdateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlMap = urlMap
+ c.urlmap = urlmap
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsUpdateCall) Fields(s ...googleapi.Field) *UrlMapsUpdateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsUpdateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmap)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("PUT", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "urlMap": c.urlMap,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Update the entire content of the UrlMap resource.",
+ // "httpMethod": "PUT",
+ // "id": "compute.urlMaps.update",
+ // "parameterOrder": [
+ // "project",
+ // "urlMap"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "urlMap": {
+ // "description": "Name of the UrlMap resource to update.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps/{urlMap}",
+ // "request": {
+ // "$ref": "UrlMap"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.urlMaps.validate":
+
+type UrlMapsValidateCall struct {
+ s *Service
+ project string
+ urlMap string
+ urlmapsvalidaterequest *UrlMapsValidateRequest
+ opt_ map[string]interface{}
+}
+
+// Validate: Run static validation for the UrlMap. In particular, the
+// tests of the provided UrlMap will be run. Calling this method does
+// NOT create the UrlMap.
+func (r *UrlMapsService) Validate(project string, urlMap string, urlmapsvalidaterequest *UrlMapsValidateRequest) *UrlMapsValidateCall {
+ c := &UrlMapsValidateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.urlMap = urlMap
+ c.urlmapsvalidaterequest = urlmapsvalidaterequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *UrlMapsValidateCall) Fields(s ...googleapi.Field) *UrlMapsValidateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *UrlMapsValidateCall) Do() (*UrlMapsValidateResponse, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.urlmapsvalidaterequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/global/urlMaps/{urlMap}/validate")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "urlMap": c.urlMap,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *UrlMapsValidateResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Run static validation for the UrlMap. In particular, the tests of the provided UrlMap will be run. Calling this method does NOT create the UrlMap.",
+ // "httpMethod": "POST",
+ // "id": "compute.urlMaps.validate",
+ // "parameterOrder": [
+ // "project",
+ // "urlMap"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "urlMap": {
+ // "description": "Name of the UrlMap resource to be validated as.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/global/urlMaps/{urlMap}/validate",
+ // "request": {
+ // "$ref": "UrlMapsValidateRequest"
+ // },
+ // "response": {
+ // "$ref": "UrlMapsValidateResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.zoneOperations.delete":
+
+type ZoneOperationsDeleteCall struct {
+ s *Service
+ project string
+ zone string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the specified zone-specific operation resource.
+func (r *ZoneOperationsService) Delete(project string, zone string, operation string) *ZoneOperationsDeleteCall {
+ c := &ZoneOperationsDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ZoneOperationsDeleteCall) Fields(s ...googleapi.Field) *ZoneOperationsDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ZoneOperationsDeleteCall) Do() error {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return err
+ }
+ return nil
+ // {
+ // "description": "Deletes the specified zone-specific operation resource.",
+ // "httpMethod": "DELETE",
+ // "id": "compute.zoneOperations.delete",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to delete.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/operations/{operation}",
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute"
+ // ]
+ // }
+
+}
+
+// method id "compute.zoneOperations.get":
+
+type ZoneOperationsGetCall struct {
+ s *Service
+ project string
+ zone string
+ operation string
+ opt_ map[string]interface{}
+}
+
+// Get: Retrieves the specified zone-specific operation resource.
+func (r *ZoneOperationsService) Get(project string, zone string, operation string) *ZoneOperationsGetCall {
+ c := &ZoneOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ c.operation = operation
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ZoneOperationsGetCall) Fields(s ...googleapi.Field) *ZoneOperationsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ZoneOperationsGetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations/{operation}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ "operation": c.operation,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the specified zone-specific operation resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.zoneOperations.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone",
+ // "operation"
+ // ],
+ // "parameters": {
+ // "operation": {
+ // "description": "Name of the operation resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/operations/{operation}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.zoneOperations.list":
+
+type ZoneOperationsListCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of operation resources contained within the
+// specified zone.
+func (r *ZoneOperationsService) List(project string, zone string) *ZoneOperationsListCall {
+ c := &ZoneOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *ZoneOperationsListCall) Filter(filter string) *ZoneOperationsListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *ZoneOperationsListCall) MaxResults(maxResults int64) *ZoneOperationsListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *ZoneOperationsListCall) PageToken(pageToken string) *ZoneOperationsListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ZoneOperationsListCall) Fields(s ...googleapi.Field) *ZoneOperationsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ZoneOperationsListCall) Do() (*OperationList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *OperationList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of operation resources contained within the specified zone.",
+ // "httpMethod": "GET",
+ // "id": "compute.zoneOperations.list",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone scoping this request.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}/operations",
+ // "response": {
+ // "$ref": "OperationList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.zones.get":
+
+type ZonesGetCall struct {
+ s *Service
+ project string
+ zone string
+ opt_ map[string]interface{}
+}
+
+// Get: Returns the specified zone resource.
+func (r *ZonesService) Get(project string, zone string) *ZonesGetCall {
+ c := &ZonesGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ c.zone = zone
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ZonesGetCall) Fields(s ...googleapi.Field) *ZonesGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ZonesGetCall) Do() (*Zone, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones/{zone}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ "zone": c.zone,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Zone
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Returns the specified zone resource.",
+ // "httpMethod": "GET",
+ // "id": "compute.zones.get",
+ // "parameterOrder": [
+ // "project",
+ // "zone"
+ // ],
+ // "parameters": {
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zone": {
+ // "description": "Name of the zone resource to return.",
+ // "location": "path",
+ // "pattern": "[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones/{zone}",
+ // "response": {
+ // "$ref": "Zone"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
+
+// method id "compute.zones.list":
+
+type ZonesListCall struct {
+ s *Service
+ project string
+ opt_ map[string]interface{}
+}
+
+// List: Retrieves the list of zone resources available to the specified
+// project.
+func (r *ZonesService) List(project string) *ZonesListCall {
+ c := &ZonesListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.project = project
+ return c
+}
+
+// Filter sets the optional parameter "filter": Filter expression for
+// filtering listed resources.
+func (c *ZonesListCall) Filter(filter string) *ZonesListCall {
+ c.opt_["filter"] = filter
+ return c
+}
+
+// MaxResults sets the optional parameter "maxResults": Maximum count of
+// results to be returned. Maximum value is 500 and default value is
+// 500.
+func (c *ZonesListCall) MaxResults(maxResults int64) *ZonesListCall {
+ c.opt_["maxResults"] = maxResults
+ return c
+}
+
+// PageToken sets the optional parameter "pageToken": Tag returned by a
+// previous list request truncated by maxResults. Used to continue a
+// previous list request.
+func (c *ZonesListCall) PageToken(pageToken string) *ZonesListCall {
+ c.opt_["pageToken"] = pageToken
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ZonesListCall) Fields(s ...googleapi.Field) *ZonesListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ZonesListCall) Do() (*ZoneList, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["filter"]; ok {
+ params.Set("filter", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["maxResults"]; ok {
+ params.Set("maxResults", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["pageToken"]; ok {
+ params.Set("pageToken", fmt.Sprintf("%v", v))
+ }
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{project}/zones")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "project": c.project,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ZoneList
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Retrieves the list of zone resources available to the specified project.",
+ // "httpMethod": "GET",
+ // "id": "compute.zones.list",
+ // "parameterOrder": [
+ // "project"
+ // ],
+ // "parameters": {
+ // "filter": {
+ // "description": "Optional. Filter expression for filtering listed resources.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "maxResults": {
+ // "default": "500",
+ // "description": "Optional. Maximum count of results to be returned. Maximum value is 500 and default value is 500.",
+ // "format": "uint32",
+ // "location": "query",
+ // "maximum": "500",
+ // "minimum": "0",
+ // "type": "integer"
+ // },
+ // "pageToken": {
+ // "description": "Optional. Tag returned by a previous list request truncated by maxResults. Used to continue a previous list request.",
+ // "location": "query",
+ // "type": "string"
+ // },
+ // "project": {
+ // "description": "Name of the project scoping this request.",
+ // "location": "path",
+ // "pattern": "(?:(?:[-a-z0-9]{1,63}\\.)*(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?):)?(?:[0-9]{1,19}|(?:[a-z](?:[-a-z0-9]{0,61}[a-z0-9])?))",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{project}/zones",
+ // "response": {
+ // "$ref": "ZoneList"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/compute",
+ // "https://www.googleapis.com/auth/compute.readonly"
+ // ]
+ // }
+
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-api.json b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-api.json
new file mode 100644
index 000000000000..bca5a313cfe8
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-api.json
@@ -0,0 +1,579 @@
+{
+ "kind": "discovery#restDescription",
+ "etag": "\"l66ggWbucbkBw9Lpos72oziyefE/ZrZBeDfQYPqAxFURJt0IhCOLUHQ\"",
+ "discoveryVersion": "v1",
+ "id": "container:v1beta1",
+ "name": "container",
+ "version": "v1beta1",
+ "revision": "20141103",
+ "title": "Google Container Engine API",
+ "description": "The Google Container Engine API is used for building and managing container based applications, powered by the open source Kubernetes technology.",
+ "ownerDomain": "google.com",
+ "ownerName": "Google",
+ "icons": {
+ "x16": "http://www.google.com/images/icons/product/search-16.gif",
+ "x32": "http://www.google.com/images/icons/product/search-32.gif"
+ },
+ "protocol": "rest",
+ "baseUrl": "https://www.googleapis.com/container/v1beta1/projects/",
+ "basePath": "/container/v1beta1/projects/",
+ "rootUrl": "https://www.googleapis.com/",
+ "servicePath": "container/v1beta1/projects/",
+ "batchPath": "batch",
+ "parameters": {
+ "alt": {
+ "type": "string",
+ "description": "Data format for the response.",
+ "default": "json",
+ "enum": [
+ "json"
+ ],
+ "enumDescriptions": [
+ "Responses with Content-Type of application/json"
+ ],
+ "location": "query"
+ },
+ "fields": {
+ "type": "string",
+ "description": "Selector specifying which fields to include in a partial response.",
+ "location": "query"
+ },
+ "key": {
+ "type": "string",
+ "description": "API key. Your API key identifies your project and provides you with API access, quota, and reports. Required unless you provide an OAuth 2.0 token.",
+ "location": "query"
+ },
+ "oauth_token": {
+ "type": "string",
+ "description": "OAuth 2.0 token for the current user.",
+ "location": "query"
+ },
+ "prettyPrint": {
+ "type": "boolean",
+ "description": "Returns response with indentations and line breaks.",
+ "default": "true",
+ "location": "query"
+ },
+ "quotaUser": {
+ "type": "string",
+ "description": "Available to use for quota purposes for server-side applications. Can be any arbitrary string assigned to a user, but should not exceed 40 characters. Overrides userIp if both are provided.",
+ "location": "query"
+ },
+ "userIp": {
+ "type": "string",
+ "description": "IP address of the site where the request originates. Use this if you want to enforce per-user limits.",
+ "location": "query"
+ }
+ },
+ "auth": {
+ "oauth2": {
+ "scopes": {
+ "https://www.googleapis.com/auth/cloud-platform": {
+ "description": "View and manage your data across Google Cloud Platform services"
+ }
+ }
+ }
+ },
+ "schemas": {
+ "Cluster": {
+ "id": "Cluster",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.Cluster",
+ "properties": {
+ "clusterApiVersion": {
+ "type": "string",
+ "description": "The API version of the Kubernetes master and kubelets running in this cluster. Allowed value is 0.4.2, or leave blank to pick up the latest stable release."
+ },
+ "containerIpv4Cidr": {
+ "type": "string",
+ "description": "[Output only] The IP addresses of the container pods in this cluster, in CIDR notation (e.g. 1.2.3.4/29)."
+ },
+ "creationTimestamp": {
+ "type": "string",
+ "description": "[Output only] The time the cluster was created, in RFC3339 text format."
+ },
+ "description": {
+ "type": "string",
+ "description": "An optional description of this cluster."
+ },
+ "endpoint": {
+ "type": "string",
+ "description": "[Output only] The IP address of this cluster's Kubernetes master. The endpoint can be accessed from the internet at https://username:password@endpoint/.\n\nSee the masterAuth property of this resource for username and password information."
+ },
+ "masterAuth": {
+ "$ref": "MasterAuth",
+ "description": "The HTTP basic authentication information for accessing the master. Because the master endpoint is open to the internet, you should create a strong password."
+ },
+ "name": {
+ "type": "string",
+ "description": "The name of this cluster. The name must be unique within this project and zone, and can be up to 40 characters with the following restrictions: \n- Lowercase letters, numbers, and hyphens only.\n- Must start with a letter.\n- Must end with a number or a letter."
+ },
+ "nodeConfig": {
+ "$ref": "NodeConfig",
+ "description": "The machine type and image to use for all nodes in this cluster. See the descriptions of the child properties of nodeConfig."
+ },
+ "nodeRoutingPrefixSize": {
+ "type": "integer",
+ "description": "[Output only] The size of the address space on each node for hosting containers.",
+ "format": "int32"
+ },
+ "numNodes": {
+ "type": "integer",
+ "description": "The number of nodes to create in this cluster. You must ensure that your Compute Engine resource quota is sufficient for this number of instances plus one (to include the master). You must also have available firewall and routes quota.",
+ "format": "int32"
+ },
+ "servicesIpv4Cidr": {
+ "type": "string",
+ "description": "[Output only] The IP addresses of the Kubernetes services in this cluster, in CIDR notation (e.g. 1.2.3.4/29). Service addresses are always in the 10.0.0.0/16 range."
+ },
+ "status": {
+ "type": "string",
+ "description": "[Output only] The current status of this cluster.",
+ "enum": [
+ "error",
+ "provisioning",
+ "running",
+ "stopping"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ "",
+ ""
+ ]
+ },
+ "statusMessage": {
+ "type": "string",
+ "description": "[Output only] Additional information about the current status of this cluster, if available."
+ },
+ "zone": {
+ "type": "string",
+ "description": "[Output only] The name of the Google Compute Engine zone in which the cluster resides."
+ }
+ }
+ },
+ "CreateClusterRequest": {
+ "id": "CreateClusterRequest",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.CreateClusterRequest",
+ "properties": {
+ "cluster": {
+ "$ref": "Cluster",
+ "description": "A cluster resource."
+ }
+ }
+ },
+ "ListAggregatedClustersResponse": {
+ "id": "ListAggregatedClustersResponse",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.ListAggregatedClustersResponse",
+ "properties": {
+ "clusters": {
+ "type": "array",
+ "description": "A list of clusters in the project, across all zones.",
+ "items": {
+ "$ref": "Cluster"
+ }
+ }
+ }
+ },
+ "ListAggregatedOperationsResponse": {
+ "id": "ListAggregatedOperationsResponse",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.ListAggregatedOperationsResponse",
+ "properties": {
+ "operations": {
+ "type": "array",
+ "description": "A list of operations in the project, across all zones.",
+ "items": {
+ "$ref": "Operation"
+ }
+ }
+ }
+ },
+ "ListClustersResponse": {
+ "id": "ListClustersResponse",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.ListClustersResponse",
+ "properties": {
+ "clusters": {
+ "type": "array",
+ "description": "A list of clusters in the project in the specified zone.",
+ "items": {
+ "$ref": "Cluster"
+ }
+ }
+ }
+ },
+ "ListOperationsResponse": {
+ "id": "ListOperationsResponse",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.ListOperationsResponse",
+ "properties": {
+ "operations": {
+ "type": "array",
+ "description": "A list of operations in the project in the specified zone.",
+ "items": {
+ "$ref": "Operation"
+ }
+ }
+ }
+ },
+ "MasterAuth": {
+ "id": "MasterAuth",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.MasterAuth",
+ "properties": {
+ "password": {
+ "type": "string",
+ "description": "The password to use when accessing the Kubernetes master endpoint."
+ },
+ "user": {
+ "type": "string",
+ "description": "The username to use when accessing the Kubernetes master endpoint."
+ }
+ }
+ },
+ "NodeConfig": {
+ "id": "NodeConfig",
+ "type": "object",
+ "externalTypeName": "container.v1beta1.NodeConfig",
+ "properties": {
+ "machineType": {
+ "type": "string",
+ "description": "The name of a Google Compute Engine machine type (e.g. n1-standard-1).\n\nIf unspecified, the default machine type is n1-standard-1."
+ },
+ "sourceImage": {
+ "type": "string",
+ "description": "The fully-specified name of a Google Compute Engine image. For example: https://www.googleapis.com/compute/v1/projects/debian-cloud/global/images/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version date).\n\nIf specifying an image, you are responsible for ensuring its compatibility with the Debian 7 backports image. We recommend leaving this field blank to accept the default backports-debian-7-wheezy value."
+ }
+ }
+ },
+ "Operation": {
+ "id": "Operation",
+ "type": "object",
+ "description": "Defines the operation resource. All fields are output only.",
+ "externalTypeName": "container.v1beta1.Operation",
+ "properties": {
+ "errorMessage": {
+ "type": "string",
+ "description": "If an error has occurred, a textual description of the error."
+ },
+ "name": {
+ "type": "string",
+ "description": "The server-assigned ID for this operation. If the operation is fulfilled upfront, it may not have a resource name."
+ },
+ "operationType": {
+ "type": "string",
+ "description": "The operation type.",
+ "enum": [
+ "createCluster",
+ "deleteCluster"
+ ],
+ "enumDescriptions": [
+ "",
+ ""
+ ]
+ },
+ "status": {
+ "type": "string",
+ "description": "The current status of the operation.",
+ "enum": [
+ "done",
+ "pending",
+ "running"
+ ],
+ "enumDescriptions": [
+ "",
+ "",
+ ""
+ ]
+ },
+ "target": {
+ "type": "string",
+ "description": "[Optional] The URL of the cluster resource that this operation is associated with."
+ },
+ "zone": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the operation is taking place."
+ }
+ }
+ }
+ },
+ "resources": {
+ "projects": {
+ "resources": {
+ "clusters": {
+ "methods": {
+ "list": {
+ "id": "container.projects.clusters.list",
+ "path": "{projectId}/clusters",
+ "httpMethod": "GET",
+ "description": "Lists all clusters owned by a project across all zones.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "response": {
+ "$ref": "ListAggregatedClustersResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "operations": {
+ "methods": {
+ "list": {
+ "id": "container.projects.operations.list",
+ "path": "{projectId}/operations",
+ "httpMethod": "GET",
+ "description": "Lists all operations in a project, across all zones.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId"
+ ],
+ "response": {
+ "$ref": "ListAggregatedOperationsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "zones": {
+ "resources": {
+ "clusters": {
+ "methods": {
+ "create": {
+ "id": "container.projects.zones.clusters.create",
+ "path": "{projectId}/zones/{zoneId}/clusters",
+ "httpMethod": "POST",
+ "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, a route named k8s-iproute-10-xx-0-0 is created to track that the cluster's 10.xx.0.0/16 CIDR has been assigned.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId"
+ ],
+ "request": {
+ "$ref": "CreateClusterRequest"
+ },
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "delete": {
+ "id": "container.projects.zones.clusters.delete",
+ "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
+ "httpMethod": "DELETE",
+ "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
+ "parameters": {
+ "clusterId": {
+ "type": "string",
+ "description": "The name of the cluster to delete.",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId",
+ "clusterId"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "get": {
+ "id": "container.projects.zones.clusters.get",
+ "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
+ "httpMethod": "GET",
+ "description": "Gets a specific cluster.",
+ "parameters": {
+ "clusterId": {
+ "type": "string",
+ "description": "The name of the cluster to retrieve.",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId",
+ "clusterId"
+ ],
+ "response": {
+ "$ref": "Cluster"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "list": {
+ "id": "container.projects.zones.clusters.list",
+ "path": "{projectId}/zones/{zoneId}/clusters",
+ "httpMethod": "GET",
+ "description": "Lists all clusters owned by a project in the specified zone.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId"
+ ],
+ "response": {
+ "$ref": "ListClustersResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ },
+ "operations": {
+ "methods": {
+ "get": {
+ "id": "container.projects.zones.operations.get",
+ "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
+ "httpMethod": "GET",
+ "description": "Gets the specified operation.",
+ "parameters": {
+ "operationId": {
+ "type": "string",
+ "description": "The server-assigned name of the operation.",
+ "required": true,
+ "location": "path"
+ },
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId",
+ "operationId"
+ ],
+ "response": {
+ "$ref": "Operation"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ },
+ "list": {
+ "id": "container.projects.zones.operations.list",
+ "path": "{projectId}/zones/{zoneId}/operations",
+ "httpMethod": "GET",
+ "description": "Lists all operations in a project in a specific zone.",
+ "parameters": {
+ "projectId": {
+ "type": "string",
+ "description": "The Google Developers Console project ID or project number.",
+ "required": true,
+ "location": "path"
+ },
+ "zoneId": {
+ "type": "string",
+ "description": "The name of the Google Compute Engine zone to return operations for.",
+ "required": true,
+ "location": "path"
+ }
+ },
+ "parameterOrder": [
+ "projectId",
+ "zoneId"
+ ],
+ "response": {
+ "$ref": "ListOperationsResponse"
+ },
+ "scopes": [
+ "https://www.googleapis.com/auth/cloud-platform"
+ ]
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-gen.go b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-gen.go
new file mode 100644
index 000000000000..c9fce64414bb
--- /dev/null
+++ b/Godeps/_workspace/src/code.google.com/p/google-api-go-client/container/v1beta1/v1beta1/container-gen.go
@@ -0,0 +1,1007 @@
+// Package container provides access to the Google Container Engine API.
+//
+// Usage example:
+//
+// import "code.google.com/p/google-api-go-client/container/v1beta1"
+// ...
+// containerService, err := container.New(oauthHttpClient)
+package container
+
+import (
+ "bytes"
+ "code.google.com/p/google-api-go-client/googleapi"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+)
+
+// Always reference these packages, just in case the auto-generated code
+// below doesn't.
+var _ = bytes.NewBuffer
+var _ = strconv.Itoa
+var _ = fmt.Sprintf
+var _ = json.NewDecoder
+var _ = io.Copy
+var _ = url.Parse
+var _ = googleapi.Version
+var _ = errors.New
+var _ = strings.Replace
+
+const apiId = "container:v1beta1"
+const apiName = "container"
+const apiVersion = "v1beta1"
+const basePath = "https://www.googleapis.com/container/v1beta1/projects/"
+
+// OAuth2 scopes used by this API.
+const (
+ // View and manage your data across Google Cloud Platform services
+ CloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
+)
+
+func New(client *http.Client) (*Service, error) {
+ if client == nil {
+ return nil, errors.New("client is nil")
+ }
+ s := &Service{client: client, BasePath: basePath}
+ s.Projects = NewProjectsService(s)
+ return s, nil
+}
+
+type Service struct {
+ client *http.Client
+ BasePath string // API endpoint base URL
+
+ Projects *ProjectsService
+}
+
+func NewProjectsService(s *Service) *ProjectsService {
+ rs := &ProjectsService{s: s}
+ rs.Clusters = NewProjectsClustersService(s)
+ rs.Operations = NewProjectsOperationsService(s)
+ rs.Zones = NewProjectsZonesService(s)
+ return rs
+}
+
+type ProjectsService struct {
+ s *Service
+
+ Clusters *ProjectsClustersService
+
+ Operations *ProjectsOperationsService
+
+ Zones *ProjectsZonesService
+}
+
+func NewProjectsClustersService(s *Service) *ProjectsClustersService {
+ rs := &ProjectsClustersService{s: s}
+ return rs
+}
+
+type ProjectsClustersService struct {
+ s *Service
+}
+
+func NewProjectsOperationsService(s *Service) *ProjectsOperationsService {
+ rs := &ProjectsOperationsService{s: s}
+ return rs
+}
+
+type ProjectsOperationsService struct {
+ s *Service
+}
+
+func NewProjectsZonesService(s *Service) *ProjectsZonesService {
+ rs := &ProjectsZonesService{s: s}
+ rs.Clusters = NewProjectsZonesClustersService(s)
+ rs.Operations = NewProjectsZonesOperationsService(s)
+ return rs
+}
+
+type ProjectsZonesService struct {
+ s *Service
+
+ Clusters *ProjectsZonesClustersService
+
+ Operations *ProjectsZonesOperationsService
+}
+
+func NewProjectsZonesClustersService(s *Service) *ProjectsZonesClustersService {
+ rs := &ProjectsZonesClustersService{s: s}
+ return rs
+}
+
+type ProjectsZonesClustersService struct {
+ s *Service
+}
+
+func NewProjectsZonesOperationsService(s *Service) *ProjectsZonesOperationsService {
+ rs := &ProjectsZonesOperationsService{s: s}
+ return rs
+}
+
+type ProjectsZonesOperationsService struct {
+ s *Service
+}
+
+type Cluster struct {
+ // ClusterApiVersion: The API version of the Kubernetes master and
+ // kubelets running in this cluster. Allowed value is 0.4.2, or leave
+ // blank to pick up the latest stable release.
+ ClusterApiVersion string `json:"clusterApiVersion,omitempty"`
+
+ // ContainerIpv4Cidr: [Output only] The IP addresses of the container
+ // pods in this cluster, in CIDR notation (e.g. 1.2.3.4/29).
+ ContainerIpv4Cidr string `json:"containerIpv4Cidr,omitempty"`
+
+ // CreationTimestamp: [Output only] The time the cluster was created, in
+ // RFC3339 text format.
+ CreationTimestamp string `json:"creationTimestamp,omitempty"`
+
+ // Description: An optional description of this cluster.
+ Description string `json:"description,omitempty"`
+
+ // Endpoint: [Output only] The IP address of this cluster's Kubernetes
+ // master. The endpoint can be accessed from the internet at
+ // https://username:password@endpoint/.
+ //
+ // See the masterAuth property of
+ // this resource for username and password information.
+ Endpoint string `json:"endpoint,omitempty"`
+
+ // MasterAuth: The HTTP basic authentication information for accessing
+ // the master. Because the master endpoint is open to the internet, you
+ // should create a strong password.
+ MasterAuth *MasterAuth `json:"masterAuth,omitempty"`
+
+ // Name: The name of this cluster. The name must be unique within this
+ // project and zone, and can be up to 40 characters with the following
+ // restrictions:
+ // - Lowercase letters, numbers, and hyphens only.
+ // -
+ // Must start with a letter.
+ // - Must end with a number or a letter.
+ Name string `json:"name,omitempty"`
+
+ // NodeConfig: The machine type and image to use for all nodes in this
+ // cluster. See the descriptions of the child properties of nodeConfig.
+ NodeConfig *NodeConfig `json:"nodeConfig,omitempty"`
+
+ // NodeRoutingPrefixSize: [Output only] The size of the address space on
+ // each node for hosting containers.
+ NodeRoutingPrefixSize int64 `json:"nodeRoutingPrefixSize,omitempty"`
+
+ // NumNodes: The number of nodes to create in this cluster. You must
+ // ensure that your Compute Engine resource quota is sufficient for this
+ // number of instances plus one (to include the master). You must also
+ // have available firewall and routes quota.
+ NumNodes int64 `json:"numNodes,omitempty"`
+
+ // ServicesIpv4Cidr: [Output only] The IP addresses of the Kubernetes
+ // services in this cluster, in CIDR notation (e.g. 1.2.3.4/29).
+ // Service addresses are always in the 10.0.0.0/16 range.
+ ServicesIpv4Cidr string `json:"servicesIpv4Cidr,omitempty"`
+
+ // Status: [Output only] The current status of this cluster.
+ Status string `json:"status,omitempty"`
+
+ // StatusMessage: [Output only] Additional information about the current
+ // status of this cluster, if available.
+ StatusMessage string `json:"statusMessage,omitempty"`
+
+ // Zone: [Output only] The name of the Google Compute Engine zone in
+ // which the cluster resides.
+ Zone string `json:"zone,omitempty"`
+}
+
+type CreateClusterRequest struct {
+ // Cluster: A cluster resource.
+ Cluster *Cluster `json:"cluster,omitempty"`
+}
+
+type ListAggregatedClustersResponse struct {
+ // Clusters: A list of clusters in the project, across all zones.
+ Clusters []*Cluster `json:"clusters,omitempty"`
+}
+
+type ListAggregatedOperationsResponse struct {
+ // Operations: A list of operations in the project, across all zones.
+ Operations []*Operation `json:"operations,omitempty"`
+}
+
+type ListClustersResponse struct {
+ // Clusters: A list of clusters in the project in the specified zone.
+ Clusters []*Cluster `json:"clusters,omitempty"`
+}
+
+type ListOperationsResponse struct {
+ // Operations: A list of operations in the project in the specified
+ // zone.
+ Operations []*Operation `json:"operations,omitempty"`
+}
+
+type MasterAuth struct {
+ // Password: The password to use when accessing the Kubernetes master
+ // endpoint.
+ Password string `json:"password,omitempty"`
+
+ // User: The username to use when accessing the Kubernetes master
+ // endpoint.
+ User string `json:"user,omitempty"`
+}
+
+type NodeConfig struct {
+ // MachineType: The name of a Google Compute Engine machine type (e.g.
+ // n1-standard-1).
+ //
+ // If unspecified, the default machine type is
+ // n1-standard-1.
+ MachineType string `json:"machineType,omitempty"`
+
+ // SourceImage: The fully-specified name of a Google Compute Engine
+ // image. For example:
+ // https://www.googleapis.com/compute/v1/projects/debian-cloud/global/ima
+ // ges/backports-debian-7-wheezy-vYYYYMMDD (where YYYMMDD is the version
+ // date).
+ //
+ // If specifying an image, you are responsible for ensuring its
+ // compatibility with the Debian 7 backports image. We recommend leaving
+ // this field blank to accept the default backports-debian-7-wheezy
+ // value.
+ SourceImage string `json:"sourceImage,omitempty"`
+}
+
+type Operation struct {
+ // ErrorMessage: If an error has occurred, a textual description of the
+ // error.
+ ErrorMessage string `json:"errorMessage,omitempty"`
+
+ // Name: The server-assigned ID for this operation. If the operation is
+ // fulfilled upfront, it may not have a resource name.
+ Name string `json:"name,omitempty"`
+
+ // OperationType: The operation type.
+ OperationType string `json:"operationType,omitempty"`
+
+ // Status: The current status of the operation.
+ Status string `json:"status,omitempty"`
+
+ // Target: [Optional] The URL of the cluster resource that this
+ // operation is associated with.
+ Target string `json:"target,omitempty"`
+
+ // Zone: The name of the Google Compute Engine zone in which the
+ // operation is taking place.
+ Zone string `json:"zone,omitempty"`
+}
+
+// method id "container.projects.clusters.list":
+
+type ProjectsClustersListCall struct {
+ s *Service
+ projectId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all clusters owned by a project across all zones.
+func (r *ProjectsClustersService) List(projectId string) *ProjectsClustersListCall {
+ c := &ProjectsClustersListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsClustersListCall) Fields(s ...googleapi.Field) *ProjectsClustersListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsClustersListCall) Do() (*ListAggregatedClustersResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/clusters")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ListAggregatedClustersResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all clusters owned by a project across all zones.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.clusters.list",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/clusters",
+ // "response": {
+ // "$ref": "ListAggregatedClustersResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.operations.list":
+
+type ProjectsOperationsListCall struct {
+ s *Service
+ projectId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all operations in a project, across all zones.
+func (r *ProjectsOperationsService) List(projectId string) *ProjectsOperationsListCall {
+ c := &ProjectsOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsOperationsListCall) Fields(s ...googleapi.Field) *ProjectsOperationsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsOperationsListCall) Do() (*ListAggregatedOperationsResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ListAggregatedOperationsResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all operations in a project, across all zones.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.operations.list",
+ // "parameterOrder": [
+ // "projectId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/operations",
+ // "response": {
+ // "$ref": "ListAggregatedOperationsResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.clusters.create":
+
+type ProjectsZonesClustersCreateCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ createclusterrequest *CreateClusterRequest
+ opt_ map[string]interface{}
+}
+
+// Create: Creates a cluster, consisting of the specified number and
+// type of Google Compute Engine instances, plus a Kubernetes master
+// instance.
+//
+// The cluster is created in the project's default
+// network.
+//
+// A firewall is added that allows traffic into port 443 on
+// the master, which enables HTTPS. A firewall and a route is added for
+// each node to allow the containers on that node to communicate with
+// all other instances in the cluster.
+//
+// Finally, a route named
+// k8s-iproute-10-xx-0-0 is created to track that the cluster's
+// 10.xx.0.0/16 CIDR has been assigned.
+func (r *ProjectsZonesClustersService) Create(projectId string, zoneId string, createclusterrequest *CreateClusterRequest) *ProjectsZonesClustersCreateCall {
+ c := &ProjectsZonesClustersCreateCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ c.createclusterrequest = createclusterrequest
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesClustersCreateCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersCreateCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesClustersCreateCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ body, err := googleapi.WithoutDataWrapper.JSONReader(c.createclusterrequest)
+ if err != nil {
+ return nil, err
+ }
+ ctype := "application/json"
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("POST", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ })
+ req.Header.Set("Content-Type", ctype)
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Creates a cluster, consisting of the specified number and type of Google Compute Engine instances, plus a Kubernetes master instance.\n\nThe cluster is created in the project's default network.\n\nA firewall is added that allows traffic into port 443 on the master, which enables HTTPS. A firewall and a route is added for each node to allow the containers on that node to communicate with all other instances in the cluster.\n\nFinally, a route named k8s-iproute-10-xx-0-0 is created to track that the cluster's 10.xx.0.0/16 CIDR has been assigned.",
+ // "httpMethod": "POST",
+ // "id": "container.projects.zones.clusters.create",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/clusters",
+ // "request": {
+ // "$ref": "CreateClusterRequest"
+ // },
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.clusters.delete":
+
+type ProjectsZonesClustersDeleteCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ clusterId string
+ opt_ map[string]interface{}
+}
+
+// Delete: Deletes the cluster, including the Kubernetes master and all
+// worker nodes.
+//
+// Firewalls and routes that were configured at cluster
+// creation are also deleted.
+func (r *ProjectsZonesClustersService) Delete(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersDeleteCall {
+ c := &ProjectsZonesClustersDeleteCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ c.clusterId = clusterId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesClustersDeleteCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersDeleteCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesClustersDeleteCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("DELETE", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ "clusterId": c.clusterId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Deletes the cluster, including the Kubernetes master and all worker nodes.\n\nFirewalls and routes that were configured at cluster creation are also deleted.",
+ // "httpMethod": "DELETE",
+ // "id": "container.projects.zones.clusters.delete",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId",
+ // "clusterId"
+ // ],
+ // "parameters": {
+ // "clusterId": {
+ // "description": "The name of the cluster to delete.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.clusters.get":
+
+type ProjectsZonesClustersGetCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ clusterId string
+ opt_ map[string]interface{}
+}
+
+// Get: Gets a specific cluster.
+func (r *ProjectsZonesClustersService) Get(projectId string, zoneId string, clusterId string) *ProjectsZonesClustersGetCall {
+ c := &ProjectsZonesClustersGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ c.clusterId = clusterId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesClustersGetCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesClustersGetCall) Do() (*Cluster, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters/{clusterId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ "clusterId": c.clusterId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Cluster
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets a specific cluster.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.zones.clusters.get",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId",
+ // "clusterId"
+ // ],
+ // "parameters": {
+ // "clusterId": {
+ // "description": "The name of the cluster to retrieve.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/clusters/{clusterId}",
+ // "response": {
+ // "$ref": "Cluster"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.clusters.list":
+
+type ProjectsZonesClustersListCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all clusters owned by a project in the specified zone.
+func (r *ProjectsZonesClustersService) List(projectId string, zoneId string) *ProjectsZonesClustersListCall {
+ c := &ProjectsZonesClustersListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesClustersListCall) Fields(s ...googleapi.Field) *ProjectsZonesClustersListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesClustersListCall) Do() (*ListClustersResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/clusters")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ListClustersResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all clusters owned by a project in the specified zone.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.zones.clusters.list",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone in which the cluster resides.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/clusters",
+ // "response": {
+ // "$ref": "ListClustersResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.operations.get":
+
+type ProjectsZonesOperationsGetCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ operationId string
+ opt_ map[string]interface{}
+}
+
+// Get: Gets the specified operation.
+func (r *ProjectsZonesOperationsService) Get(projectId string, zoneId string, operationId string) *ProjectsZonesOperationsGetCall {
+ c := &ProjectsZonesOperationsGetCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ c.operationId = operationId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesOperationsGetCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsGetCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesOperationsGetCall) Do() (*Operation, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations/{operationId}")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ "operationId": c.operationId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *Operation
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Gets the specified operation.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.zones.operations.get",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId",
+ // "operationId"
+ // ],
+ // "parameters": {
+ // "operationId": {
+ // "description": "The server-assigned name of the operation.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone in which the operation resides. This is always the same zone as the cluster with which the operation is associated.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/operations/{operationId}",
+ // "response": {
+ // "$ref": "Operation"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
+
+// method id "container.projects.zones.operations.list":
+
+type ProjectsZonesOperationsListCall struct {
+ s *Service
+ projectId string
+ zoneId string
+ opt_ map[string]interface{}
+}
+
+// List: Lists all operations in a project in a specific zone.
+func (r *ProjectsZonesOperationsService) List(projectId string, zoneId string) *ProjectsZonesOperationsListCall {
+ c := &ProjectsZonesOperationsListCall{s: r.s, opt_: make(map[string]interface{})}
+ c.projectId = projectId
+ c.zoneId = zoneId
+ return c
+}
+
+// Fields allows partial responses to be retrieved.
+// See https://developers.google.com/gdata/docs/2.0/basics#PartialResponse
+// for more information.
+func (c *ProjectsZonesOperationsListCall) Fields(s ...googleapi.Field) *ProjectsZonesOperationsListCall {
+ c.opt_["fields"] = googleapi.CombineFields(s)
+ return c
+}
+
+func (c *ProjectsZonesOperationsListCall) Do() (*ListOperationsResponse, error) {
+ var body io.Reader = nil
+ params := make(url.Values)
+ params.Set("alt", "json")
+ if v, ok := c.opt_["fields"]; ok {
+ params.Set("fields", fmt.Sprintf("%v", v))
+ }
+ urls := googleapi.ResolveRelative(c.s.BasePath, "{projectId}/zones/{zoneId}/operations")
+ urls += "?" + params.Encode()
+ req, _ := http.NewRequest("GET", urls, body)
+ googleapi.Expand(req.URL, map[string]string{
+ "projectId": c.projectId,
+ "zoneId": c.zoneId,
+ })
+ req.Header.Set("User-Agent", "google-api-go-client/0.5")
+ res, err := c.s.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer googleapi.CloseBody(res)
+ if err := googleapi.CheckResponse(res); err != nil {
+ return nil, err
+ }
+ var ret *ListOperationsResponse
+ if err := json.NewDecoder(res.Body).Decode(&ret); err != nil {
+ return nil, err
+ }
+ return ret, nil
+ // {
+ // "description": "Lists all operations in a project in a specific zone.",
+ // "httpMethod": "GET",
+ // "id": "container.projects.zones.operations.list",
+ // "parameterOrder": [
+ // "projectId",
+ // "zoneId"
+ // ],
+ // "parameters": {
+ // "projectId": {
+ // "description": "The Google Developers Console project ID or project number.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // },
+ // "zoneId": {
+ // "description": "The name of the Google Compute Engine zone to return operations for.",
+ // "location": "path",
+ // "required": true,
+ // "type": "string"
+ // }
+ // },
+ // "path": "{projectId}/zones/{zoneId}/operations",
+ // "response": {
+ // "$ref": "ListOperationsResponse"
+ // },
+ // "scopes": [
+ // "https://www.googleapis.com/auth/cloud-platform"
+ // ]
+ // }
+
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/conversion.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/conversion.go
index 6fc3c7d239fe..2537718d1df0 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/conversion.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/conversion.go
@@ -17,8 +17,10 @@ limitations under the License.
package api
import (
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
// Codec is the identity codec for this package - it can only convert itself
@@ -27,6 +29,16 @@ var Codec = runtime.CodecFor(Scheme, "")
func init() {
Scheme.AddConversionFuncs(
+ func(in *util.Time, out *util.Time, s conversion.Scope) error {
+ // Cannot deep copy these, because time.Time has unexported fields.
+ *out = *in
+ return nil
+ },
+ func(in *resource.Quantity, out *resource.Quantity, s conversion.Scope) error {
+ // Cannot deep copy these, because inf.Dec has unexported fields.
+ *out = *in.Copy()
+ return nil
+ },
// Convert ContainerManifest to BoundPod
func(in *ContainerManifest, out *BoundPod, s conversion.Scope) error {
out.Spec.Containers = in.Containers
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/errors.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/errors.go
index 8fbcd822a50e..88df9357c6c7 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/errors.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors/errors.go
@@ -102,7 +102,7 @@ func NewForbidden(kind, name string, err error) error {
Kind: kind,
ID: name,
},
- Message: fmt.Sprintf("%s %q is forbidden", kind, name),
+ Message: fmt.Sprintf("%s %q is forbidden: %v", kind, name, err),
}}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers.go
index 75ceff3a7cf7..5dff4d7c02b4 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers.go
@@ -18,7 +18,6 @@ package api
import (
"reflect"
- "strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
@@ -60,27 +59,4 @@ var Semantic = conversion.EqualitiesOrDie(
}
return a.Amount.Cmp(b.Amount) == 0
},
- pullPoliciesEqual,
)
-
-// TODO: Address these per #1502
-
-func IsPullAlways(p PullPolicy) bool {
- return pullPoliciesEqual(p, PullAlways)
-}
-
-func IsPullNever(p PullPolicy) bool {
- return pullPoliciesEqual(p, PullNever)
-}
-
-func IsPullIfNotPresent(p PullPolicy) bool {
- // Default to pull if not present
- if len(p) == 0 {
- return true
- }
- return pullPoliciesEqual(p, PullIfNotPresent)
-}
-
-func pullPoliciesEqual(p1, p2 PullPolicy) bool {
- return strings.ToLower(string(p1)) == strings.ToLower(string(p2))
-}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers_test.go
index 5d15ced93c00..c8cde624c31c 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/helpers_test.go
@@ -59,8 +59,6 @@ func TestSemantic(t *testing.T) {
true,
},
{resource.MustParse("2m"), resource.MustParse("1m"), false},
- {PullPolicy("NEVER"), PullPolicy("neveR"), true},
- {PullPolicy("NEVER"), PullPolicy("neveRi"), false},
}
for index, item := range table {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest/latest_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest/latest_test.go
index 56ae1a26e502..894043841482 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest/latest_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest/latest_test.go
@@ -131,6 +131,10 @@ var apiObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 1).Funcs(
c.RandString(): c.RandString(),
}
},
+ func(p *internal.PullPolicy, c fuzz.Continue) {
+ policies := []internal.PullPolicy{internal.PullAlways, internal.PullNever, internal.PullIfNotPresent}
+ *p = policies[c.Rand.Intn(len(policies))]
+ },
)
func TestInternalRoundTrip(t *testing.T) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta.go
index 1b7da4d2674e..4bdfcdacd737 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta.go
@@ -23,7 +23,7 @@ import (
// FillObjectMetaSystemFields populates fields that are managed by the system on ObjectMeta.
func FillObjectMetaSystemFields(ctx Context, meta *ObjectMeta) {
meta.CreationTimestamp = util.Now()
- meta.UID = util.NewUUID().String()
+ meta.UID = util.NewUUID()
}
// HasObjectMetaSystemFieldValues returns true if fields that are managed by the system on ObjectMeta have values.
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/interfaces.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/interfaces.go
index def9a269d266..17161d1e4716 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/interfaces.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/interfaces.go
@@ -18,6 +18,7 @@ package meta
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
// VersionInterfaces contains the interfaces one should use for dealing with types of a particular version.
@@ -31,6 +32,7 @@ type VersionInterfaces struct {
// internal API objects. Attempting to set or retrieve a field on an object that does
// not support that field (Name, UID, Namespace on lists) will be a no-op and return
// a default value.
+// TODO: rename to ObjectInterface when we clear up these interfaces.
type Interface interface {
TypeInterface
@@ -38,8 +40,8 @@ type Interface interface {
SetNamespace(namespace string)
Name() string
SetName(name string)
- UID() string
- SetUID(uid string)
+ UID() types.UID
+ SetUID(uid types.UID)
ResourceVersion() string
SetResourceVersion(version string)
SelfLink() string
@@ -51,8 +53,6 @@ type Interface interface {
}
// TypeInterface exposes the type and APIVersion of versioned or internal API objects.
-// TODO: remove the need for this interface by refactoring runtime encoding to avoid
-// needing this object.
type TypeInterface interface {
APIVersion() string
SetAPIVersion(version string)
@@ -79,8 +79,8 @@ type MetadataAccessor interface {
Name(obj runtime.Object) (string, error)
SetName(obj runtime.Object, name string) error
- UID(obj runtime.Object) (string, error)
- SetUID(obj runtime.Object, uid string) error
+ UID(obj runtime.Object) (types.UID, error)
+ SetUID(obj runtime.Object, uid types.UID) error
SelfLink(obj runtime.Object) (string, error)
SetSelfLink(obj runtime.Object, selfLink string) error
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta.go
index e8f9e9e5c0a0..1353083474d3 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta.go
@@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/conversion"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
// Accessor takes an arbitary object pointer and returns meta.Interface.
@@ -75,6 +76,9 @@ func Accessor(obj interface{}) (Interface, error) {
// TypeAccessor returns an interface that allows retrieving and modifying the APIVersion
// and Kind of an in-memory internal object.
+// TODO: this interface is used to test code that does not have ObjectMeta or ListMeta
+// in round tripping (objects which can use apiVersion/kind, but do not fit the Kube
+// api conventions).
func TypeAccessor(obj interface{}) (TypeInterface, error) {
v, err := conversion.EnforcePtr(obj)
if err != nil {
@@ -174,7 +178,7 @@ func (resourceAccessor) SetName(obj runtime.Object, name string) error {
return nil
}
-func (resourceAccessor) UID(obj runtime.Object) (string, error) {
+func (resourceAccessor) UID(obj runtime.Object) (types.UID, error) {
accessor, err := Accessor(obj)
if err != nil {
return "", err
@@ -182,7 +186,7 @@ func (resourceAccessor) UID(obj runtime.Object) (string, error) {
return accessor.UID(), nil
}
-func (resourceAccessor) SetUID(obj runtime.Object, uid string) error {
+func (resourceAccessor) SetUID(obj runtime.Object, uid types.UID) error {
accessor, err := Accessor(obj)
if err != nil {
return err
@@ -264,7 +268,7 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e
type genericAccessor struct {
namespace *string
name *string
- uid *string
+ uid *types.UID
apiVersion *string
kind *string
resourceVersion *string
@@ -301,14 +305,14 @@ func (a genericAccessor) SetName(name string) {
*a.name = name
}
-func (a genericAccessor) UID() string {
+func (a genericAccessor) UID() types.UID {
if a.uid == nil {
return ""
}
return *a.uid
}
-func (a genericAccessor) SetUID(uid string) {
+func (a genericAccessor) SetUID(uid types.UID) {
if a.uid == nil {
return
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta_test.go
index 2309c11f365b..b357fca60d86 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/meta_test.go
@@ -63,7 +63,7 @@ func TestGenericTypeMeta(t *testing.T) {
if e, a := "foo", accessor.Name(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
- if e, a := "uid", accessor.UID(); e != a {
+ if e, a := "uid", string(accessor.UID()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := "a", accessor.APIVersion(); e != a {
@@ -79,6 +79,17 @@ func TestGenericTypeMeta(t *testing.T) {
t.Errorf("expected %v, got %v", e, a)
}
+ typeAccessor, err := TypeAccessor(&j)
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+ if e, a := "a", accessor.APIVersion(); e != a {
+ t.Errorf("expected %v, got %v", e, a)
+ }
+ if e, a := "b", accessor.Kind(); e != a {
+ t.Errorf("expected %v, got %v", e, a)
+ }
+
accessor.SetNamespace("baz")
accessor.SetName("bar")
accessor.SetUID("other")
@@ -109,6 +120,15 @@ func TestGenericTypeMeta(t *testing.T) {
if e, a := "google.com", j.SelfLink; e != a {
t.Errorf("expected %v, got %v", e, a)
}
+
+ typeAccessor.SetAPIVersion("d")
+ typeAccessor.SetKind("e")
+ if e, a := "d", j.APIVersion; e != a {
+ t.Errorf("expected %v, got %v", e, a)
+ }
+ if e, a := "e", j.Kind; e != a {
+ t.Errorf("expected %v, got %v", e, a)
+ }
}
type InternalTypeMeta struct {
@@ -162,7 +182,7 @@ func TestGenericTypeMetaAccessor(t *testing.T) {
if err != nil {
t.Errorf("unexpected error: %v", err)
}
- if e, a := "uid", uid; e != a {
+ if e, a := "uid", string(uid); e != a {
t.Errorf("expected %v, got %v", e, a)
}
apiVersion, err := accessor.APIVersion(j)
@@ -311,7 +331,7 @@ func TestGenericObjectMeta(t *testing.T) {
if e, a := "foo", accessor.Name(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
- if e, a := "uid", accessor.UID(); e != a {
+ if e, a := "uid", string(accessor.UID()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := "a", accessor.APIVersion(); e != a {
@@ -403,7 +423,7 @@ func TestGenericListMeta(t *testing.T) {
if e, a := "", accessor.Name(); e != a {
t.Errorf("expected %v, got %v", e, a)
}
- if e, a := "", accessor.UID(); e != a {
+ if e, a := "", string(accessor.UID()); e != a {
t.Errorf("expected %v, got %v", e, a)
}
if e, a := "a", accessor.APIVersion(); e != a {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/restmapper_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/restmapper_test.go
index 02eb507777f1..62b251111829 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/restmapper_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta/restmapper_test.go
@@ -115,7 +115,7 @@ func TestKindToResource(t *testing.T) {
for i, testCase := range testCases {
plural, singular := kindToResource(testCase.Kind, testCase.MixedCase)
if singular != testCase.Singular || plural != testCase.Plural {
- t.Errorf("%d: unexpected plural and signular: %s %s", i, plural, singular)
+ t.Errorf("%d: unexpected plural and singular: %s %s", i, plural, singular)
}
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/register.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/register.go
index 298c62cea18f..5c138425b187 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/register.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/register.go
@@ -25,9 +25,9 @@ var Scheme = runtime.NewScheme()
func init() {
Scheme.AddKnownTypes("",
- &PodContainerInfo{},
- &PodList{},
&Pod{},
+ &PodList{},
+ &PodStatusResult{},
&ReplicationControllerList{},
&ReplicationController{},
&ServiceList{},
@@ -55,9 +55,9 @@ func init() {
Scheme.AddKnownTypeWithName("", "ServerOpList", &OperationList{})
}
-func (*PodContainerInfo) IsAnAPIObject() {}
func (*Pod) IsAnAPIObject() {}
func (*PodList) IsAnAPIObject() {}
+func (*PodStatusResult) IsAnAPIObject() {}
func (*ReplicationController) IsAnAPIObject() {}
func (*ReplicationControllerList) IsAnAPIObject() {}
func (*Service) IsAnAPIObject() {}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity.go
index 71c89fe12af8..7ec8bf9c4cdb 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity.go
@@ -18,12 +18,12 @@ package resource
import (
"errors"
- "flag"
"fmt"
"math/big"
"regexp"
"strings"
+ flag "github.com/spf13/pflag"
"speter.net/go/exp/math/dec/inf"
)
@@ -386,6 +386,7 @@ type qFlag struct {
dest *Quantity
}
+// Sets the value of the internal Quantity. (used by flag & pflag)
func (qf qFlag) Set(val string) error {
q, err := ParseQuantity(val)
if err != nil {
@@ -396,10 +397,16 @@ func (qf qFlag) Set(val string) error {
return nil
}
+// Converts the value of the internal Quantity to a string. (used by flag & pflag)
func (qf qFlag) String() string {
return qf.dest.String()
}
+// States the type of flag this is (Quantity). (used by pflag)
+func (qf qFlag) Type() string {
+ return "quantity"
+}
+
// QuantityFlag is a helper that makes a quantity flag (using standard flag package).
// Will panic if defaultValue is not a valid quantity.
func QuantityFlag(flagName, defaultValue, description string) *Quantity {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity_test.go
index 9b0ab7281b13..c550acfc6059 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource/quantity_test.go
@@ -22,6 +22,7 @@ import (
"testing"
fuzz "github.com/google/gofuzz"
+ "github.com/spf13/pflag"
"speter.net/go/exp/math/dec/inf"
)
@@ -487,3 +488,10 @@ func TestQFlagSet(t *testing.T) {
t.Errorf("Unexpected result %v != %v", e, a)
}
}
+
+func TestQFlagIsPFlag(t *testing.T) {
+ var pfv pflag.Value = qFlag{}
+ if e, a := "quantity", pfv.Type(); e != a {
+ t.Errorf("Unexpected result %v != %v", e, a)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/serialization_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/serialization_test.go
index c05d13b5ffec..7873819fee2d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/serialization_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/serialization_test.go
@@ -19,7 +19,6 @@ package api_test
import (
"encoding/json"
- "flag"
"math/rand"
"reflect"
"strconv"
@@ -37,6 +36,7 @@ import (
docker "github.com/fsouza/go-dockerclient"
fuzz "github.com/google/gofuzz"
+ flag "github.com/spf13/pflag"
"speter.net/go/exp/math/dec/inf"
)
@@ -146,7 +146,6 @@ func fuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
c.RandString(): c.RandString(),
}
},
-
func(q *resource.Quantity, c fuzz.Continue) {
// Real Quantity fuzz testing is done elsewhere;
// this limited subset of functionality survives
@@ -156,6 +155,10 @@ func fuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
//q.Amount.SetScale(inf.Scale(-c.Intn(12)))
q.Amount.SetUnscaled(c.Int63n(1000))
},
+ func(p *api.PullPolicy, c fuzz.Continue) {
+ policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent}
+ *p = policies[c.Rand.Intn(len(policies))]
+ },
)
return f
}
@@ -163,7 +166,7 @@ func fuzzerFor(t *testing.T, version string, src rand.Source) *fuzz.Fuzzer {
func fuzzInternalObject(t *testing.T, forVersion string, item runtime.Object, seed int64) runtime.Object {
fuzzerFor(t, forVersion, rand.NewSource(seed)).Fuzz(item)
- j, err := meta.Accessor(item)
+ j, err := meta.TypeAccessor(item)
if err != nil {
t.Fatalf("Unexpected error %v for %#v", err, item)
}
@@ -264,7 +267,7 @@ func TestRoundTripTypes(t *testing.T) {
if err != nil {
t.Fatalf("Couldn't make a %v? %v", kind, err)
}
- if _, err := meta.Accessor(item); err != nil {
+ if _, err := meta.TypeAccessor(item); err != nil {
t.Fatalf("%q is not a TypeMeta and cannot be tested - add it to nonRoundTrippableTypes: %v", kind, err)
}
roundTripSame(t, item)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/types.go
index b30de6c32c5d..80b41de867bd 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/types.go
@@ -19,6 +19,7 @@ package api
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
@@ -93,7 +94,7 @@ type ObjectMeta struct {
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
- UID string `json:"uid,omitempty"`
+ UID types.UID `json:"uid,omitempty"`
// An opaque value that represents the version of this resource. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
@@ -144,18 +145,19 @@ type Volume struct {
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
- Source *VolumeSource `json:"source"`
+ Source VolumeSource `json:"source,omitempty"`
}
-// VolumeSource represents the source location of a valume to mount.
+// VolumeSource represents the source location of a volume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
- // HostDir represents a pre-existing directory on the host machine that is directly
- // exposed to the container. This is generally used for system agents or other privileged
- // things that are allowed to see the host machine. Most containers will NOT need this.
+ // HostPath represents file or directory on the host machine that is
+ // directly exposed to the container. This is generally used for system
+ // agents or other privileged things that are allowed to see the host
+ // machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
- HostDir *HostDir `json:"hostDir"`
+ HostPath *HostPath `json:"hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
@@ -165,8 +167,8 @@ type VolumeSource struct {
GitRepo *GitRepo `json:"gitRepo"`
}
-// HostDir represents bare host directory volume.
-type HostDir struct {
+// HostPath represents bare host directory volume.
+type HostPath struct {
Path string `json:"path"`
}
@@ -289,11 +291,11 @@ type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
- PullAlways PullPolicy = "PullAlways"
+ PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
- PullNever PullPolicy = "PullNever"
+ PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
- PullIfNotPresent PullPolicy = "PullIfNotPresent"
+ PullIfNotPresent PullPolicy = "IfNotPresent"
)
// Container represents a single container that is expected to be run on the host.
@@ -417,6 +419,7 @@ type ContainerStatus struct {
type PodInfo map[string]ContainerStatus
// PodContainerInfo is a wrapper for PodInfo that can be encode/decoded
+// DEPRECATED: Replaced with PodStatusResult
type PodContainerInfo struct {
TypeMeta `json:",inline"`
ObjectMeta `json:"metadata,omitempty"`
@@ -502,6 +505,15 @@ type PodStatus struct {
Info PodInfo `json:"info,omitempty"`
}
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+ TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+ // Status represents the current information about a pod. This data may not be up
+ // to date.
+ Status PodStatus `json:"status,omitempty"`
+}
+
// Pod is a collection of containers, used as either input (create, update) or as output (list, get).
type Pod struct {
TypeMeta `json:",inline"`
@@ -1004,12 +1016,12 @@ type OperationList struct {
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
- Kind string `json:"kind,omitempty"`
- Namespace string `json:"namespace,omitempty"`
- Name string `json:"name,omitempty"`
- UID string `json:"uid,omitempty"`
- APIVersion string `json:"apiVersion,omitempty"`
- ResourceVersion string `json:"resourceVersion,omitempty"`
+ Kind string `json:"kind,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Name string `json:"name,omitempty"`
+ UID types.UID `json:"uid,omitempty"`
+ APIVersion string `json:"apiVersion,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain information to identify the sub-object. For example, if the object
@@ -1038,18 +1050,9 @@ type Event struct {
// Required. The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject,omitempty"`
- // Should be a short, machine understandable string that describes the current condition
- // of the referred object. This should not give the reason for being in this state.
- // Examples: "Running", "CantStart", "CantSchedule", "Deleted".
- // It's OK for components to make up conditions to report here, but the same string should
- // always be used for the same conditions.
- // TODO: define a way of making sure these are consistent and don't collide.
- // TODO: provide exact specification for format.
- Condition string `json:"condition,omitempty"`
-
// Optional; this should be a short, machine understandable string that gives the reason
- // for the transition into the object's current condition. For example, if ObjectCondition is
- // "CantStart", StatusReason might be "ImageNotFound".
+ // for this event being generated. For example, if the event is reporting that a container
+ // can't start, the Reason might be "ImageNotFound".
// TODO: provide exact specification for format.
Reason string `json:"reason,omitempty"`
@@ -1085,7 +1088,7 @@ type ContainerManifest struct {
// TODO: UUID on Manifest is deprecated in the future once we are done
// with the API refactoring. It is required for now to determine the instance
// of a Pod.
- UUID string `json:"uuid,omitempty"`
+ UUID types.UID `json:"uuid,omitempty"`
Volumes []Volume `json:"volumes"`
Containers []Container `json:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/unversioned.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/unversioned.go
index 302c0eaa1a82..c1604b5612f8 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/unversioned.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/unversioned.go
@@ -24,3 +24,9 @@ package api
type APIVersions struct {
Versions []string `json:"versions"`
}
+
+// RootPaths lists the paths available at root.
+// For example: "/healthz", "/api".
+type RootPaths struct {
+ Paths []string `json:"paths"`
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion.go
index 9a0535c1fccc..eaeb5de893de 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion.go
@@ -314,6 +314,30 @@ func init() {
}
return nil
},
+ func(in *newer.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
+ if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.ObjectMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.Status, &out.State, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *PodStatusResult, out *newer.PodStatusResult, s conversion.Scope) error {
+ if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.TypeMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.State, &out.Status, 0); err != nil {
+ return err
+ }
+ return nil
+ },
func(in *newer.ReplicationController, out *ReplicationController, s conversion.Scope) error {
if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
@@ -562,7 +586,6 @@ func init() {
return nil
},
- // Event Status <-> Condition
// Event Source <-> Source.Component
// Event Host <-> Source.Host
// TODO: remove this when it becomes possible to specify a field name conversion on a specific type
@@ -573,7 +596,6 @@ func init() {
if err := s.Convert(&in.ObjectMeta, &out.TypeMeta, 0); err != nil {
return err
}
- out.Status = in.Condition
out.Reason = in.Reason
out.Message = in.Message
out.Source = in.Source.Component
@@ -588,7 +610,6 @@ func init() {
if err := s.Convert(&in.TypeMeta, &out.ObjectMeta, 0); err != nil {
return err
}
- out.Condition = in.Status
out.Reason = in.Reason
out.Message = in.Message
out.Source.Component = in.Source
@@ -651,6 +672,74 @@ func init() {
}
return nil
},
+
+ // VolumeSource's HostDir is deprecated in favor of HostPath.
+ // TODO: It would be great if I could just map field names to
+ // convert or else maybe say "convert all members of this
+ // struct" and then fix up only the stuff that changed.
+ func(in *newer.VolumeSource, out *VolumeSource, s conversion.Scope) error {
+ if err := s.Convert(&in.EmptyDir, &out.EmptyDir, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GitRepo, &out.GitRepo, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.HostPath, &out.HostDir, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error {
+ if err := s.Convert(&in.EmptyDir, &out.EmptyDir, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GitRepo, &out.GitRepo, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.HostDir, &out.HostPath, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+
+ func(in *newer.PullPolicy, out *PullPolicy, s conversion.Scope) error {
+ switch *in {
+ case newer.PullAlways:
+ *out = PullAlways
+ case newer.PullNever:
+ *out = PullNever
+ case newer.PullIfNotPresent:
+ *out = PullIfNotPresent
+ case "":
+ *out = ""
+ default:
+ // Let unknown values through - they will get caught by validation
+ *out = PullPolicy(*in)
+ }
+ return nil
+ },
+ func(in *PullPolicy, out *newer.PullPolicy, s conversion.Scope) error {
+ switch *in {
+ case PullAlways:
+ *out = newer.PullAlways
+ case PullNever:
+ *out = newer.PullNever
+ case PullIfNotPresent:
+ *out = newer.PullIfNotPresent
+ case "":
+ *out = ""
+ default:
+ // Let unknown values through - they will get caught by validation
+ *out = newer.PullPolicy(*in)
+ }
+ return nil
+ },
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion_test.go
index a3a5a8cbed98..17a120d526be 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/conversion_test.go
@@ -270,3 +270,49 @@ func TestServiceEmptySelector(t *testing.T) {
t.Errorf("unexpected selector: %#v", obj)
}
}
+
+func TestPullPolicyConversion(t *testing.T) {
+ table := []struct {
+ versioned current.PullPolicy
+ internal newer.PullPolicy
+ }{
+ {
+ versioned: current.PullAlways,
+ internal: newer.PullAlways,
+ }, {
+ versioned: current.PullNever,
+ internal: newer.PullNever,
+ }, {
+ versioned: current.PullIfNotPresent,
+ internal: newer.PullIfNotPresent,
+ }, {
+ versioned: "",
+ internal: "",
+ }, {
+ versioned: "invalid value",
+ internal: "invalid value",
+ },
+ }
+ for _, item := range table {
+ var got newer.PullPolicy
+ err := Convert(&item.versioned, &got)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ continue
+ }
+ if e, a := item.internal, got; e != a {
+ t.Errorf("Expected: %q, got %q", e, a)
+ }
+ }
+ for _, item := range table {
+ var got current.PullPolicy
+ err := Convert(&item.internal, &got)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ continue
+ }
+ if e, a := item.versioned, got; e != a {
+ t.Errorf("Expected: %q, got %q", e, a)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/register.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/register.go
index 6f936305d2c7..9d5e19bc8e87 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/register.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/register.go
@@ -27,7 +27,7 @@ var Codec = runtime.CodecFor(api.Scheme, "v1beta1")
func init() {
api.Scheme.AddKnownTypes("v1beta1",
&Pod{},
- &PodContainerInfo{},
+ &PodStatusResult{},
&PodList{},
&ReplicationController{},
&ReplicationControllerList{},
@@ -57,7 +57,7 @@ func init() {
}
func (*Pod) IsAnAPIObject() {}
-func (*PodContainerInfo) IsAnAPIObject() {}
+func (*PodStatusResult) IsAnAPIObject() {}
func (*PodList) IsAnAPIObject() {}
func (*ReplicationController) IsAnAPIObject() {}
func (*ReplicationControllerList) IsAnAPIObject() {}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/types.go
index 77e19b6b9b50..1f11c12bbd7b 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1/types.go
@@ -18,6 +18,7 @@ package v1beta1
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
@@ -56,7 +57,7 @@ type ContainerManifest struct {
// TODO: UUID on Manifext is deprecated in the future once we are done
// with the API refactory. It is required for now to determine the instance
// of a Pod.
- UUID string `json:"uuid,omitempty" description:"manifest UUID"`
+ UUID types.UID `json:"uuid,omitempty" description:"manifest UUID"`
Volumes []Volume `json:"volumes" description:"list of volumes that can be mounted by containers belonging to the pod"`
Containers []Container `json:"containers" description:"list of containers belonging to the pod"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" description:"restart policy for all containers within the pod; one of RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyNever"`
@@ -78,7 +79,7 @@ type Volume struct {
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
- Source *VolumeSource `json:"source" description:"location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir"`
+ Source VolumeSource `json:"source,omitempty" description:"location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir"`
}
// VolumeSource represents the source location of a valume to mount.
@@ -89,7 +90,7 @@ type VolumeSource struct {
// things that are allowed to see the host machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and
// who can/can not mount host directories as read/write.
- HostDir *HostDir `json:"hostDir" description:"pre-existing host directory; generally for privileged system daemons or other agents tied to the host"`
+ HostDir *HostPath `json:"hostDir" description:"pre-existing host file or directory; generally for privileged system daemons or other agents tied to the host"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir" description:"temporary directory that shares a pod's lifetime"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
@@ -99,8 +100,8 @@ type VolumeSource struct {
GitRepo *GitRepo `json:"gitRepo" description:"git repository at a particular revision"`
}
-// HostDir represents bare host directory volume.
-type HostDir struct {
+// HostPath represents bare host directory volume.
+type HostPath struct {
Path string `json:"path" description:"path of the directory on the host"`
}
@@ -297,7 +298,7 @@ type Lifecycle struct {
type TypeMeta struct {
Kind string `json:"kind,omitempty" description:"kind of object, in CamelCase"`
ID string `json:"id,omitempty" description:"name of the object; must be a DNS_SUBDOMAIN and unique among all objects of the same kind within the same namespace; used in resource URLs"`
- UID string `json:"uid,omitempty" description:"UUID assigned by the system upon creation, unique across space and time"`
+ UID types.UID `json:"uid,omitempty" description:"UUID assigned by the system upon creation, unique across space and time"`
CreationTimestamp util.Time `json:"creationTimestamp,omitempty" description:"RFC 3339 date and time at which the object was created; recorded by the system; null for lists"`
SelfLink string `json:"selfLink,omitempty" description:"URL for the object"`
ResourceVersion uint64 `json:"resourceVersion,omitempty" description:"string that identifies the internal version of this object that can be used by clients to determine when objects have changed; value must be treated as opaque by clients and passed unmodified back to the server"`
@@ -411,6 +412,11 @@ type PodState struct {
Info PodInfo `json:"info,omitempty" description:"map of container name to container status"`
}
+type PodStatusResult struct {
+ TypeMeta `json:",inline"`
+ State PodState `json:"state,omitempty" description:"current state of the pod"`
+}
+
// PodList is a list of Pods.
type PodList struct {
TypeMeta `json:",inline"`
@@ -776,12 +782,12 @@ type ServerOpList struct {
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
- Kind string `json:"kind,omitempty" description:"kind of the referent"`
- Namespace string `json:"namespace,omitempty" description:"namespace of the referent"`
- ID string `json:"name,omitempty" description:"id of the referent"`
- UID string `json:"uid,omitempty" description:"uid of the referent"`
- APIVersion string `json:"apiVersion,omitempty" description:"API version of the referent"`
- ResourceVersion string `json:"resourceVersion,omitempty" description:"specific resourceVersion to which this reference is made, if any"`
+ Kind string `json:"kind,omitempty" description:"kind of the referent"`
+ Namespace string `json:"namespace,omitempty" description:"namespace of the referent"`
+ ID string `json:"name,omitempty" description:"id of the referent"`
+ UID types.UID `json:"uid,omitempty" description:"uid of the referent"`
+ APIVersion string `json:"apiVersion,omitempty" description:"API version of the referent"`
+ ResourceVersion string `json:"resourceVersion,omitempty" description:"specific resourceVersion to which this reference is made, if any"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain information to identify the sub-object. For example, if the object
@@ -809,6 +815,7 @@ type Event struct {
// always be used for the same status.
// TODO: define a way of making sure these are consistent and don't collide.
// TODO: provide exact specification for format.
+ // DEPRECATED: Status (a.k.a Condition) value will be ignored.
Status string `json:"status,omitempty" description:"short, machine understandable string that describes the current status of the referred object"`
// Optional; this should be a short, machine understandable string that gives the reason
@@ -825,7 +832,7 @@ type Event struct {
// TODO: provide exact specification for format.
Source string `json:"source,omitempty" description:"component reporting this event; short machine understandable string"`
// Host name on which the event is generated.
- Host string `json:"host,omitempty"`
+ Host string `json:"host,omitempty" description:"host name on which this event was generated"`
// The time at which the client recorded the event. (Time of server receipt is in TypeMeta.)
Timestamp util.Time `json:"timestamp,omitempty" description:"time at which the client recorded the event"`
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion.go
index 9a316b425e96..3666e9314c84 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion.go
@@ -345,6 +345,31 @@ func init() {
return nil
},
+ func(in *newer.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
+ if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.ObjectMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.Status, &out.State, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *PodStatusResult, out *newer.PodStatusResult, s conversion.Scope) error {
+ if err := s.Convert(&in.TypeMeta, &out.TypeMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.TypeMeta, &out.ObjectMeta, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.State, &out.Status, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+
func(in *newer.PodSpec, out *PodState, s conversion.Scope) error {
if err := s.Convert(&in, &out.Manifest, 0); err != nil {
return err
@@ -489,7 +514,6 @@ func init() {
if err := s.Convert(&in.ObjectMeta, &out.TypeMeta, 0); err != nil {
return err
}
- out.Status = in.Condition
out.Reason = in.Reason
out.Message = in.Message
out.Source = in.Source.Component
@@ -504,7 +528,6 @@ func init() {
if err := s.Convert(&in.TypeMeta, &out.ObjectMeta, 0); err != nil {
return err
}
- out.Condition = in.Status
out.Reason = in.Reason
out.Message = in.Message
out.Source.Component = in.Source
@@ -567,6 +590,69 @@ func init() {
}
return nil
},
+ func(in *newer.VolumeSource, out *VolumeSource, s conversion.Scope) error {
+ if err := s.Convert(&in.EmptyDir, &out.EmptyDir, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GitRepo, &out.GitRepo, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.HostPath, &out.HostDir, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+ func(in *VolumeSource, out *newer.VolumeSource, s conversion.Scope) error {
+ if err := s.Convert(&in.EmptyDir, &out.EmptyDir, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GitRepo, &out.GitRepo, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.GCEPersistentDisk, &out.GCEPersistentDisk, 0); err != nil {
+ return err
+ }
+ if err := s.Convert(&in.HostDir, &out.HostPath, 0); err != nil {
+ return err
+ }
+ return nil
+ },
+
+ func(in *newer.PullPolicy, out *PullPolicy, s conversion.Scope) error {
+ switch *in {
+ case newer.PullAlways:
+ *out = PullAlways
+ case newer.PullNever:
+ *out = PullNever
+ case newer.PullIfNotPresent:
+ *out = PullIfNotPresent
+ case "":
+ *out = ""
+ default:
+ // Let unknown values through - they will get caught by validation
+ *out = PullPolicy(*in)
+ }
+ return nil
+ },
+ func(in *PullPolicy, out *newer.PullPolicy, s conversion.Scope) error {
+ switch *in {
+ case PullAlways:
+ *out = newer.PullAlways
+ case PullNever:
+ *out = newer.PullNever
+ case PullIfNotPresent:
+ *out = newer.PullIfNotPresent
+ case "":
+ *out = ""
+ default:
+ // Let unknown values through - they will get caught by validation
+ *out = newer.PullPolicy(*in)
+ }
+ return nil
+ },
)
if err != nil {
// If one of the conversion functions is malformed, detect it immediately.
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion_test.go
index 8e3c3ebc7756..2b04256d9bf2 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/conversion_test.go
@@ -100,3 +100,49 @@ func TestNodeConversion(t *testing.T) {
t.Errorf("unexpected encoding: %s - %#v", m["kind"], string(data))
}
}
+
+func TestPullPolicyConversion(t *testing.T) {
+ table := []struct {
+ versioned current.PullPolicy
+ internal newer.PullPolicy
+ }{
+ {
+ versioned: current.PullAlways,
+ internal: newer.PullAlways,
+ }, {
+ versioned: current.PullNever,
+ internal: newer.PullNever,
+ }, {
+ versioned: current.PullIfNotPresent,
+ internal: newer.PullIfNotPresent,
+ }, {
+ versioned: "",
+ internal: "",
+ }, {
+ versioned: "invalid value",
+ internal: "invalid value",
+ },
+ }
+ for _, item := range table {
+ var got newer.PullPolicy
+ err := newer.Scheme.Convert(&item.versioned, &got)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ continue
+ }
+ if e, a := item.internal, got; e != a {
+ t.Errorf("Expected: %q, got %q", e, a)
+ }
+ }
+ for _, item := range table {
+ var got current.PullPolicy
+ err := newer.Scheme.Convert(&item.internal, &got)
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ continue
+ }
+ if e, a := item.versioned, got; e != a {
+ t.Errorf("Expected: %q, got %q", e, a)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/register.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/register.go
index 121f3d20a8e3..7682d9ad246d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/register.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/register.go
@@ -27,7 +27,7 @@ var Codec = runtime.CodecFor(api.Scheme, "v1beta2")
func init() {
api.Scheme.AddKnownTypes("v1beta2",
&Pod{},
- &PodContainerInfo{},
+ &PodStatusResult{},
&PodList{},
&ReplicationController{},
&ReplicationControllerList{},
@@ -57,7 +57,7 @@ func init() {
}
func (*Pod) IsAnAPIObject() {}
-func (*PodContainerInfo) IsAnAPIObject() {}
+func (*PodStatusResult) IsAnAPIObject() {}
func (*PodList) IsAnAPIObject() {}
func (*ReplicationController) IsAnAPIObject() {}
func (*ReplicationControllerList) IsAnAPIObject() {}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/types.go
index e8c26dab202c..3f1ef0bbacb1 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta2/types.go
@@ -18,6 +18,7 @@ package v1beta2
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
@@ -52,7 +53,7 @@ type Volume struct {
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
- Source *VolumeSource `json:"source" description:"location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir"`
+ Source VolumeSource `json:"source,omitempty" description:"location and type of volume to mount; at most one of HostDir, EmptyDir, GCEPersistentDisk, or GitRepo; default is EmptyDir"`
}
// VolumeSource represents the source location of a valume to mount.
@@ -63,7 +64,7 @@ type VolumeSource struct {
// things that are allowed to see the host machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and
// who can/can not mount host directories as read/write.
- HostDir *HostDir `json:"hostDir" description:"pre-existing host directory; generally for privileged system daemons or other agents tied to the host"`
+ HostDir *HostPath `json:"hostDir" description:"pre-existing host file or directory; generally for privileged system daemons or other agents tied to the host"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir" description:"temporary directory that shares a pod's lifetime"`
// A persistent disk that is mounted to the
@@ -73,8 +74,8 @@ type VolumeSource struct {
GitRepo *GitRepo `json:"gitRepo" description:"git repository at a particular revision"`
}
-// HostDir represents bare host directory volume.
-type HostDir struct {
+// HostPath represents bare host directory volume.
+type HostPath struct {
Path string `json:"path" description:"path of the directory on the host"`
}
@@ -260,7 +261,7 @@ type Lifecycle struct {
type TypeMeta struct {
Kind string `json:"kind,omitempty" description:"kind of object, in CamelCase"`
ID string `json:"id,omitempty" description:"name of the object; must be a DNS_SUBDOMAIN and unique among all objects of the same kind within the same namespace; used in resource URLs"`
- UID string `json:"uid,omitempty" description:"UUID assigned by the system upon creation, unique across space and time"`
+ UID types.UID `json:"uid,omitempty" description:"UUID assigned by the system upon creation, unique across space and time"`
CreationTimestamp util.Time `json:"creationTimestamp,omitempty" description:"RFC 3339 date and time at which the object was created; recorded by the system; null for lists"`
SelfLink string `json:"selfLink,omitempty" description:"URL for the object"`
ResourceVersion uint64 `json:"resourceVersion,omitempty" description:"string that identifies the internal version of this object that can be used by clients to determine when objects have changed; value must be treated as opaque by clients and passed unmodified back to the server"`
@@ -374,6 +375,11 @@ type PodState struct {
Info PodInfo `json:"info,omitempty" description:"map of container name to container status"`
}
+type PodStatusResult struct {
+ TypeMeta `json:",inline"`
+ State PodState `json:"state,omitempty" description:"current state of the pod"`
+}
+
// PodList is a list of Pods.
type PodList struct {
TypeMeta `json:",inline"`
@@ -749,12 +755,12 @@ type ServerOpList struct {
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
- Kind string `json:"kind,omitempty" description:"kind of the referent"`
- Namespace string `json:"namespace,omitempty" description:"namespace of the referent"`
- ID string `json:"name,omitempty" description:"id of the referent"`
- UID string `json:"uid,omitempty" description:"uid of the referent"`
- APIVersion string `json:"apiVersion,omitempty" description:"API version of the referent"`
- ResourceVersion string `json:"resourceVersion,omitempty" description:"specific resourceVersion to which this reference is made, if any"`
+ Kind string `json:"kind,omitempty" description:"kind of the referent"`
+ Namespace string `json:"namespace,omitempty" description:"namespace of the referent"`
+ ID string `json:"name,omitempty" description:"id of the referent"`
+ UID types.UID `json:"uid,omitempty" description:"uid of the referent"`
+ APIVersion string `json:"apiVersion,omitempty" description:"API version of the referent"`
+ ResourceVersion string `json:"resourceVersion,omitempty" description:"specific resourceVersion to which this reference is made, if any"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain information to identify the sub-object. For example, if the object
@@ -782,6 +788,7 @@ type Event struct {
// always be used for the same status.
// TODO: define a way of making sure these are consistent and don't collide.
// TODO: provide exact specification for format.
+ // DEPRECATED: Status (a.k.a Condition) value will be ignored.
Status string `json:"status,omitempty" description:"short, machine understandable string that describes the current status of the referred object"`
// Optional; this should be a short, machine understandable string that gives the reason
@@ -799,7 +806,7 @@ type Event struct {
Source string `json:"source,omitempty" description:"component reporting this event; short machine understandable string"`
// Host name on which the event is generated.
- Host string `json:"host,omitempty"`
+ Host string `json:"host,omitempty" description:"host name on which this event was generated"`
// The time at which the client recorded the event. (Time of server receipt is in TypeMeta.)
Timestamp util.Time `json:"timestamp,omitempty" description:"time at which the client recorded the event"`
@@ -824,7 +831,7 @@ type ContainerManifest struct {
// TODO: UUID on Manifext is deprecated in the future once we are done
// with the API refactory. It is required for now to determine the instance
// of a Pod.
- UUID string `json:"uuid,omitempty" description:"manifest UUID"`
+ UUID types.UID `json:"uuid,omitempty" description:"manifest UUID"`
Volumes []Volume `json:"volumes" description:"list of volumes that can be mounted by containers belonging to the pod"`
Containers []Container `json:"containers" description:"list of containers belonging to the pod"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty" description:"restart policy for all containers within the pod; one of RestartPolicyAlways, RestartPolicyOnFailure, RestartPolicyNever"`
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/register.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/register.go
index 3bb97847189b..6594cb67e895 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/register.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/register.go
@@ -26,9 +26,9 @@ var Codec = runtime.CodecFor(api.Scheme, "v1beta3")
func init() {
api.Scheme.AddKnownTypes("v1beta3",
- &PodContainerInfo{},
&Pod{},
&PodList{},
+ &PodStatusResult{},
&PodTemplate{},
&PodTemplateList{},
&BoundPod{},
@@ -56,9 +56,9 @@ func init() {
api.Scheme.AddKnownTypeWithName("v1beta3", "ServerOpList", &OperationList{})
}
-func (*PodContainerInfo) IsAnAPIObject() {}
func (*Pod) IsAnAPIObject() {}
func (*PodList) IsAnAPIObject() {}
+func (*PodStatusResult) IsAnAPIObject() {}
func (*PodTemplate) IsAnAPIObject() {}
func (*PodTemplateList) IsAnAPIObject() {}
func (*BoundPod) IsAnAPIObject() {}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/types.go
index cca9fed96d32..a02b1145a4fa 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta3/types.go
@@ -19,6 +19,7 @@ package v1beta3
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
@@ -93,7 +94,7 @@ type ObjectMeta struct {
// UID is the unique in time and space value for this object. It is typically generated by
// the server on successful creation of a resource and is not allowed to change on PUT
// operations.
- UID string `json:"uid,omitempty"`
+ UID types.UID `json:"uid,omitempty"`
// An opaque value that represents the version of this resource. May be used for optimistic
// concurrency, change detection, and the watch operation on a resource or set of resources.
@@ -138,7 +139,7 @@ const (
// // TODO: UUID on Manifest is deprecated in the future once we are done
// // with the API refactoring. It is required for now to determine the instance
// // of a Pod.
-// UUID string `json:"uuid,omitempty"`
+// UUID types.UID `json:"uuid,omitempty"`
// Volumes []Volume `json:"volumes"`
// Containers []Container `json:"containers"`
// RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
@@ -163,18 +164,19 @@ type Volume struct {
// Source represents the location and type of a volume to mount.
// This is optional for now. If not specified, the Volume is implied to be an EmptyDir.
// This implied behavior is deprecated and will be removed in a future version.
- Source *VolumeSource `json:"source"`
+ Source VolumeSource `json:"source,omitempty"`
}
// VolumeSource represents the source location of a valume to mount.
// Only one of its members may be specified.
type VolumeSource struct {
- // HostDir represents a pre-existing directory on the host machine that is directly
- // exposed to the container. This is generally used for system agents or other privileged
- // things that are allowed to see the host machine. Most containers will NOT need this.
+ // HostPath represents a pre-existing file or directory on the host
+ // machine that is directly exposed to the container. This is generally
+ // used for system agents or other privileged things that are allowed
+ // to see the host machine. Most containers will NOT need this.
// TODO(jonesdl) We need to restrict who can use host directory mounts and who can/can not
// mount host directories as read/write.
- HostDir *HostDir `json:"hostDir"`
+ HostPath *HostPath `json:"hostPath"`
// EmptyDir represents a temporary directory that shares a pod's lifetime.
EmptyDir *EmptyDir `json:"emptyDir"`
// GCEPersistentDisk represents a GCE Disk resource that is attached to a
@@ -184,8 +186,8 @@ type VolumeSource struct {
GitRepo *GitRepo `json:"gitRepo"`
}
-// HostDir represents bare host directory volume.
-type HostDir struct {
+// HostPath represents bare host directory volume.
+type HostPath struct {
Path string `json:"path"`
}
@@ -307,11 +309,11 @@ type PullPolicy string
const (
// PullAlways means that kubelet always attempts to pull the latest image. Container will fail If the pull fails.
- PullAlways PullPolicy = "PullAlways"
+ PullAlways PullPolicy = "Always"
// PullNever means that kubelet never pulls an image, but only uses a local image. Container will fail if the image isn't present
- PullNever PullPolicy = "PullNever"
+ PullNever PullPolicy = "Never"
// PullIfNotPresent means that kubelet pulls if the image isn't present on disk. Container will fail if the image isn't present and the pull fails.
- PullIfNotPresent PullPolicy = "PullIfNotPresent"
+ PullIfNotPresent PullPolicy = "IfNotPresent"
)
// Container represents a single container that is expected to be run on the host.
@@ -434,13 +436,6 @@ type ContainerStatus struct {
// PodInfo contains one entry for every container with available info.
type PodInfo map[string]ContainerStatus
-// PodContainerInfo is a wrapper for PodInfo that can be encode/decoded
-type PodContainerInfo struct {
- TypeMeta `json:",inline"`
- ObjectMeta `json:"metadata,omitempty"`
- ContainerInfo PodInfo `json:"containerInfo" description:"information about each container in this pod"`
-}
-
type RestartPolicyAlways struct{}
// TODO(dchen1107): Define what kinds of failures should restart.
@@ -510,6 +505,15 @@ type PodStatus struct {
Info PodInfo `json:"info,omitempty"`
}
+// PodStatusResult is a wrapper for PodStatus returned by kubelet that can be encode/decoded
+type PodStatusResult struct {
+ TypeMeta `json:",inline"`
+ ObjectMeta `json:"metadata,omitempty"`
+ // Status represents the current information about a pod. This data may not be up
+ // to date.
+ Status PodStatus `json:"status,omitempty"`
+}
+
// Pod is a collection of containers that can run on a host. This resource is created
// by clients and scheduled onto hosts. BoundPod represents the state of this resource
// to hosts.
@@ -995,12 +999,12 @@ type OperationList struct {
// ObjectReference contains enough information to let you inspect or modify the referred object.
type ObjectReference struct {
- Kind string `json:"kind,omitempty"`
- Namespace string `json:"namespace,omitempty"`
- Name string `json:"name,omitempty"`
- UID string `json:"uid,omitempty"`
- APIVersion string `json:"apiVersion,omitempty"`
- ResourceVersion string `json:"resourceVersion,omitempty"`
+ Kind string `json:"kind,omitempty"`
+ Namespace string `json:"namespace,omitempty"`
+ Name string `json:"name,omitempty"`
+ UID types.UID `json:"uid,omitempty"`
+ APIVersion string `json:"apiVersion,omitempty"`
+ ResourceVersion string `json:"resourceVersion,omitempty"`
// Optional. If referring to a piece of an object instead of an entire object, this string
// should contain information to identify the sub-object. For example, if the object
@@ -1029,18 +1033,8 @@ type Event struct {
// Required. The object that this event is about.
InvolvedObject ObjectReference `json:"involvedObject,omitempty"`
- // Should be a short, machine understandable string that describes the current condition
- // of the referred object. This should not give the reason for being in this state.
- // Examples: "Running", "CantStart", "CantSchedule", "Deleted".
- // It's OK for components to make up conditions to report here, but the same string should
- // always be used for the same conditions.
- // TODO: define a way of making sure these are consistent and don't collide.
- // TODO: provide exact specification for format.
- Condition string `json:"condition,omitempty"`
-
// Optional; this should be a short, machine understandable string that gives the reason
- // for the transition into the object's current condition. For example, if ObjectCondition is
- // "CantStart", StatusReason might be "ImageNotFound".
+ // for this event being generated.
// TODO: provide exact specification for format.
Reason string `json:"reason,omitempty"`
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/schema_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/schema_test.go
index 693429d2c6b1..30862e6cf001 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/schema_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/schema_test.go
@@ -118,6 +118,10 @@ var apiObjectFuzzer = fuzz.New().NilChance(.5).NumElements(1, 1).Funcs(
c.RandString(): c.RandString(),
}
},
+ func(p *api.PullPolicy, c fuzz.Continue) {
+ policies := []api.PullPolicy{api.PullAlways, api.PullNever, api.PullIfNotPresent}
+ *p = policies[c.Rand.Intn(len(policies))]
+ },
)
func TestLoad(t *testing.T) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation.go
index 80b059bce6bf..b3b0f01bb3cf 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation.go
@@ -40,14 +40,7 @@ func validateVolumes(volumes []api.Volume) (util.StringSet, errs.ValidationError
allNames := util.StringSet{}
for i := range volumes {
vol := &volumes[i] // so we can set default values
- el := errs.ValidationErrorList{}
- if vol.Source == nil {
- // TODO: Enforce that a source is set once we deprecate the implied form.
- vol.Source = &api.VolumeSource{
- EmptyDir: &api.EmptyDir{},
- }
- }
- el = validateSource(vol.Source).Prefix("source")
+ el := validateSource(&vol.Source).Prefix("source")
if len(vol.Name) == 0 {
el = append(el, errs.NewFieldRequired("name", vol.Name))
} else if !util.IsDNSLabel(vol.Name) {
@@ -67,9 +60,9 @@ func validateVolumes(volumes []api.Volume) (util.StringSet, errs.ValidationError
func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
numVolumes := 0
allErrs := errs.ValidationErrorList{}
- if source.HostDir != nil {
+ if source.HostPath != nil {
numVolumes++
- allErrs = append(allErrs, validateHostDir(source.HostDir).Prefix("hostDirectory")...)
+ allErrs = append(allErrs, validateHostPath(source.HostPath).Prefix("hostPath")...)
}
if source.EmptyDir != nil {
numVolumes++
@@ -77,22 +70,25 @@ func validateSource(source *api.VolumeSource) errs.ValidationErrorList {
}
if source.GitRepo != nil {
numVolumes++
- allErrs = append(allErrs, validateGitRepo(source.GitRepo)...)
+ allErrs = append(allErrs, validateGitRepo(source.GitRepo).Prefix("gitRepo")...)
}
if source.GCEPersistentDisk != nil {
numVolumes++
- allErrs = append(allErrs, validateGCEPersistentDisk(source.GCEPersistentDisk)...)
+ allErrs = append(allErrs, validateGCEPersistentDisk(source.GCEPersistentDisk).Prefix("persistentDisk")...)
}
- if numVolumes != 1 {
+ if numVolumes == 0 {
+ // TODO: Enforce that a source is set once we deprecate the implied form.
+ source.EmptyDir = &api.EmptyDir{}
+ } else if numVolumes != 1 {
allErrs = append(allErrs, errs.NewFieldInvalid("", source, "exactly 1 volume type is required"))
}
return allErrs
}
-func validateHostDir(hostDir *api.HostDir) errs.ValidationErrorList {
+func validateHostPath(hostDir *api.HostPath) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if hostDir.Path == "" {
- allErrs = append(allErrs, errs.NewNotFound("path", hostDir.Path))
+ allErrs = append(allErrs, errs.NewFieldRequired("path", hostDir.Path))
}
return allErrs
}
@@ -100,7 +96,7 @@ func validateHostDir(hostDir *api.HostDir) errs.ValidationErrorList {
func validateGitRepo(gitRepo *api.GitRepo) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if gitRepo.Repository == "" {
- allErrs = append(allErrs, errs.NewFieldRequired("gitRepo.Repository", gitRepo.Repository))
+ allErrs = append(allErrs, errs.NewFieldRequired("repository", gitRepo.Repository))
}
return allErrs
}
@@ -108,13 +104,13 @@ func validateGitRepo(gitRepo *api.GitRepo) errs.ValidationErrorList {
func validateGCEPersistentDisk(PD *api.GCEPersistentDisk) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
if PD.PDName == "" {
- allErrs = append(allErrs, errs.NewFieldRequired("PD.PDName", PD.PDName))
+ allErrs = append(allErrs, errs.NewFieldRequired("pdName", PD.PDName))
}
if PD.FSType == "" {
- allErrs = append(allErrs, errs.NewFieldRequired("PD.FSType", PD.FSType))
+ allErrs = append(allErrs, errs.NewFieldRequired("fsType", PD.FSType))
}
if PD.Partition < 0 || PD.Partition > 255 {
- allErrs = append(allErrs, errs.NewFieldInvalid("PD.Partition", PD.Partition, ""))
+ allErrs = append(allErrs, errs.NewFieldInvalid("partition", PD.Partition, ""))
}
return allErrs
}
@@ -266,6 +262,29 @@ func validateLifecycle(lifecycle *api.Lifecycle) errs.ValidationErrorList {
return allErrs
}
+// TODO(dchen1107): Move this along with other defaulting values
+func validatePullPolicyWithDefault(ctr *api.Container) errs.ValidationErrorList {
+ allErrors := errs.ValidationErrorList{}
+
+ switch ctr.ImagePullPolicy {
+ case "":
+ // TODO(dchen1107): Move ParseImageName code to pkg/util
+ parts := strings.Split(ctr.Image, ":")
+ // Check image tag
+ if parts[len(parts)-1] == "latest" {
+ ctr.ImagePullPolicy = api.PullAlways
+ } else {
+ ctr.ImagePullPolicy = api.PullIfNotPresent
+ }
+ case api.PullAlways, api.PullIfNotPresent, api.PullNever:
+ break
+ default:
+ allErrors = append(allErrors, errs.NewFieldNotSupported("", ctr.ImagePullPolicy))
+ }
+
+ return allErrors
+}
+
func validateContainers(containers []api.Container, volumes util.StringSet) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
@@ -294,6 +313,7 @@ func validateContainers(containers []api.Container, volumes util.StringSet) errs
cErrs = append(cErrs, validatePorts(ctr.Ports).Prefix("ports")...)
cErrs = append(cErrs, validateEnv(ctr.Env).Prefix("env")...)
cErrs = append(cErrs, validateVolumeMounts(ctr.VolumeMounts, volumes).Prefix("volumeMounts")...)
+ cErrs = append(cErrs, validatePullPolicyWithDefault(ctr).Prefix("pullPolicy")...)
allErrs = append(allErrs, cErrs.PrefixIndex(i)...)
}
// Check for colliding ports across all containers.
@@ -398,6 +418,7 @@ func ValidatePodSpec(spec *api.PodSpec) errs.ValidationErrorList {
return allErrs
}
+// ValidateLabels validates that a set of labels are correctly defined.
func ValidateLabels(labels map[string]string, field string) errs.ValidationErrorList {
allErrs := errs.ValidationErrorList{}
for k := range labels {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation_test.go
index 2757f268ff0b..1e0975ab2631 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation/validation_test.go
@@ -53,7 +53,7 @@ func TestValidateLabels(t *testing.T) {
{"1.2.3.4/5678": "bar"},
}
for i := range successCases {
- errs := validateLabels(successCases[i], "field")
+ errs := ValidateLabels(successCases[i], "field")
if len(errs) != 0 {
t.Errorf("case[%d] expected success, got %#v", i, errs)
}
@@ -67,7 +67,7 @@ func TestValidateLabels(t *testing.T) {
{strings.Repeat("a", 254): "bar"},
}
for i := range errorCases {
- errs := validateLabels(errorCases[i], "field")
+ errs := ValidateLabels(errorCases[i], "field")
if len(errs) != 1 {
t.Errorf("case[%d] expected failure", i)
}
@@ -77,11 +77,11 @@ func TestValidateLabels(t *testing.T) {
func TestValidateVolumes(t *testing.T) {
successCase := []api.Volume{
{Name: "abc"},
- {Name: "123", Source: &api.VolumeSource{HostDir: &api.HostDir{"/mnt/path2"}}},
- {Name: "abc-123", Source: &api.VolumeSource{HostDir: &api.HostDir{"/mnt/path3"}}},
- {Name: "empty", Source: &api.VolumeSource{EmptyDir: &api.EmptyDir{}}},
- {Name: "gcepd", Source: &api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{"my-PD", "ext4", 1, false}}},
- {Name: "gitrepo", Source: &api.VolumeSource{GitRepo: &api.GitRepo{"my-repo", "hashstring"}}},
+ {Name: "123", Source: api.VolumeSource{HostPath: &api.HostPath{"/mnt/path2"}}},
+ {Name: "abc-123", Source: api.VolumeSource{HostPath: &api.HostPath{"/mnt/path3"}}},
+ {Name: "empty", Source: api.VolumeSource{EmptyDir: &api.EmptyDir{}}},
+ {Name: "gcepd", Source: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{"my-PD", "ext4", 1, false}}},
+ {Name: "gitrepo", Source: api.VolumeSource{GitRepo: &api.GitRepo{"my-repo", "hashstring"}}},
}
names, errs := validateVolumes(successCase)
if len(errs) != 0 {
@@ -220,6 +220,54 @@ func TestValidateVolumeMounts(t *testing.T) {
}
}
+func TestValidatePullPolicy(t *testing.T) {
+ type T struct {
+ Container api.Container
+ ExpectedPolicy api.PullPolicy
+ }
+ testCases := map[string]T{
+ "NotPresent1": {
+ api.Container{Name: "abc", Image: "image:latest", ImagePullPolicy: "IfNotPresent"},
+ api.PullIfNotPresent,
+ },
+ "NotPresent2": {
+ api.Container{Name: "abc1", Image: "image", ImagePullPolicy: "IfNotPresent"},
+ api.PullIfNotPresent,
+ },
+ "Always1": {
+ api.Container{Name: "123", Image: "image:latest", ImagePullPolicy: "Always"},
+ api.PullAlways,
+ },
+ "Always2": {
+ api.Container{Name: "1234", Image: "image", ImagePullPolicy: "Always"},
+ api.PullAlways,
+ },
+ "Never1": {
+ api.Container{Name: "abc-123", Image: "image:latest", ImagePullPolicy: "Never"},
+ api.PullNever,
+ },
+ "Never2": {
+ api.Container{Name: "abc-1234", Image: "image", ImagePullPolicy: "Never"},
+ api.PullNever,
+ },
+ "DefaultToNotPresent": {api.Container{Name: "notPresent", Image: "image"}, api.PullIfNotPresent},
+ "DefaultToNotPresent2": {api.Container{Name: "notPresent1", Image: "image:sometag"}, api.PullIfNotPresent},
+ "DefaultToAlways1": {api.Container{Name: "always", Image: "image:latest"}, api.PullAlways},
+ "DefaultToAlways2": {api.Container{Name: "always", Image: "foo.bar.com:5000/my/image:latest"}, api.PullAlways},
+ }
+ for k, v := range testCases {
+ ctr := &v.Container
+ errs := validatePullPolicyWithDefault(ctr)
+ if len(errs) != 0 {
+ t.Errorf("case[%s] expected success, got %#v", k, errs)
+ }
+ if ctr.ImagePullPolicy != v.ExpectedPolicy {
+ t.Errorf("case[%s] expected policy %v, got %v", k, v.ExpectedPolicy, ctr.ImagePullPolicy)
+ }
+ }
+
+}
+
func TestValidateContainers(t *testing.T) {
volumes := util.StringSet{}
capabilities.SetForTests(capabilities.Capabilities{
@@ -366,8 +414,8 @@ func TestValidateManifest(t *testing.T) {
{
Version: "v1beta1",
ID: "abc",
- Volumes: []api.Volume{{Name: "vol1", Source: &api.VolumeSource{HostDir: &api.HostDir{"/mnt/vol1"}}},
- {Name: "vol2", Source: &api.VolumeSource{HostDir: &api.HostDir{"/mnt/vol2"}}}},
+ Volumes: []api.Volume{{Name: "vol1", Source: api.VolumeSource{HostPath: &api.HostPath{"/mnt/vol1"}}},
+ {Name: "vol2", Source: api.VolumeSource{HostPath: &api.HostPath{"/mnt/vol2"}}}},
Containers: []api.Container{
{
Name: "abc",
@@ -878,7 +926,10 @@ func TestValidateService(t *testing.T) {
},
existing: api.ServiceList{
Items: []api.Service{
- {Spec: api.ServiceSpec{Port: 80, CreateExternalLoadBalancer: true}},
+ {
+ ObjectMeta: api.ObjectMeta{Name: "def123", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{Port: 80, CreateExternalLoadBalancer: true},
+ },
},
},
numErrs: 1,
@@ -895,7 +946,10 @@ func TestValidateService(t *testing.T) {
},
existing: api.ServiceList{
Items: []api.Service{
- {Spec: api.ServiceSpec{Port: 80}},
+ {
+ ObjectMeta: api.ObjectMeta{Name: "def123", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{Port: 80},
+ },
},
},
numErrs: 0,
@@ -911,7 +965,10 @@ func TestValidateService(t *testing.T) {
},
existing: api.ServiceList{
Items: []api.Service{
- {Spec: api.ServiceSpec{Port: 80, CreateExternalLoadBalancer: true}},
+ {
+ ObjectMeta: api.ObjectMeta{Name: "def123", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{Port: 80, CreateExternalLoadBalancer: true},
+ },
},
},
numErrs: 0,
@@ -927,7 +984,10 @@ func TestValidateService(t *testing.T) {
},
existing: api.ServiceList{
Items: []api.Service{
- {Spec: api.ServiceSpec{Port: 80}},
+ {
+ ObjectMeta: api.ObjectMeta{Name: "def123", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{Port: 80},
+ },
},
},
numErrs: 0,
@@ -1001,7 +1061,7 @@ func TestValidateReplicationController(t *testing.T) {
invalidVolumePodTemplate := api.PodTemplate{
Spec: api.PodTemplateSpec{
Spec: api.PodSpec{
- Volumes: []api.Volume{{Name: "gcepd", Source: &api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{"my-PD", "ext4", 1, false}}}},
+ Volumes: []api.Volume{{Name: "gcepd", Source: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{"my-PD", "ext4", 1, false}}}},
},
},
}
@@ -1074,7 +1134,7 @@ func TestValidateReplicationController(t *testing.T) {
Selector: validSelector,
},
},
- "read-write presistent disk": {
+ "read-write persistent disk": {
ObjectMeta: api.ObjectMeta{Name: "abc"},
Spec: api.ReplicationControllerSpec{
Selector: validSelector,
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/apiserver.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/apiserver.go
index 6d6ef4274b7c..70eec069e378 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/apiserver.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/apiserver.go
@@ -62,9 +62,9 @@ func Handle(storage map[string]RESTStorage, codec runtime.Codec, root string, ve
group := NewAPIGroupVersion(storage, codec, prefix, selfLinker, admissionControl)
container := restful.NewContainer()
mux := container.ServeMux
- group.InstallREST(container, root, version)
+ group.InstallREST(container, mux, root, version)
ws := new(restful.WebService)
- InstallSupport(container, ws)
+ InstallSupport(mux, ws)
container.Add(ws)
return &defaultAPIServer{mux, group}
}
@@ -103,28 +103,25 @@ func indirectArbitraryPointer(ptrToObject interface{}) interface{} {
return reflect.Indirect(reflect.ValueOf(ptrToObject)).Interface()
}
-func registerResourceHandlers(ws *restful.WebService, version string, path string, storage RESTStorage, kinds map[string]reflect.Type, h restful.RouteFunction, namespaceScope bool) {
- glog.V(3).Infof("Installing /%s/%s\n", version, path)
+func registerResourceHandlers(ws *restful.WebService, version string, path string, storage RESTStorage, h restful.RouteFunction, namespaceScope bool) error {
object := storage.New()
_, kind, err := api.Scheme.ObjectVersionAndKind(object)
if err != nil {
- glog.Warningf("error getting kind: %v\n", err)
- return
+ return err
}
versionedPtr, err := api.Scheme.New(version, kind)
if err != nil {
- glog.Warningf("error making object: %v\n", err)
- return
+ return err
}
versionedObject := indirectArbitraryPointer(versionedPtr)
- glog.V(3).Infoln("type: ", reflect.TypeOf(versionedObject))
// See github.com/emicklei/go-restful/blob/master/jsr311.go for routing logic
// and status-code behavior
if namespaceScope {
path = "ns/{namespace}/" + path
}
- glog.V(3).Infof("Installing version=/%s, kind=/%s, path=/%s\n", version, kind, path)
+
+ glog.V(5).Infof("Installing version=/%s, kind=/%s, path=/%s", version, kind, path)
nameParam := ws.PathParameter("name", "name of the "+kind).DataType("string")
namespaceParam := ws.PathParameter("namespace", "object name and auth scope, such as for teams and projects").DataType("string")
@@ -149,11 +146,10 @@ func registerResourceHandlers(ws *restful.WebService, version string, path strin
_, listKind, err := api.Scheme.ObjectVersionAndKind(list)
versionedListPtr, err := api.Scheme.New(version, listKind)
if err != nil {
- glog.Errorf("error making list object: %v\n", err)
- return
+ return err
}
versionedList := indirectArbitraryPointer(versionedListPtr)
- glog.V(3).Infoln("type: ", reflect.TypeOf(versionedList))
+ glog.V(5).Infoln("type: ", reflect.TypeOf(versionedList))
ws.Route(listRoute.Returns(http.StatusOK, "OK", versionedList))
} else {
ws.Route(listRoute.Returns(http.StatusMethodNotAllowed, "listing objects is not supported", nil))
@@ -192,6 +188,8 @@ func registerResourceHandlers(ws *restful.WebService, version string, path strin
} else {
ws.Route(deleteRoute.Returns(http.StatusMethodNotAllowed, "deleting objects is not supported", nil))
}
+
+ return nil
}
// Adds the given param to the given route builder if shouldAdd is true. Does nothing if shouldAdd is false.
@@ -205,7 +203,7 @@ func addParamIf(b *restful.RouteBuilder, parameter *restful.Parameter, shouldAdd
// InstallREST registers the REST handlers (storage, watch, and operations) into a restful Container.
// It is expected that the provided path root prefix will serve all operations. Root MUST NOT end
// in a slash. A restful WebService is created for the group and version.
-func (g *APIGroupVersion) InstallREST(container *restful.Container, root string, version string) {
+func (g *APIGroupVersion) InstallREST(container *restful.Container, mux Mux, root string, version string) error {
prefix := path.Join(root, version)
restHandler := &g.handler
strippedHandler := http.StripPrefix(prefix, restHandler)
@@ -233,9 +231,6 @@ func (g *APIGroupVersion) InstallREST(container *restful.Container, root string,
// TODO: add scheme to APIGroupVersion rather than using api.Scheme
- kinds := api.Scheme.KnownTypes(version)
- glog.V(4).Infof("InstallREST: %v kinds: %#v", version, kinds)
-
// TODO: #2057: Return API resources on "/".
// TODO: Add status documentation using Returns()
@@ -262,15 +257,17 @@ func (g *APIGroupVersion) InstallREST(container *restful.Container, root string,
for path, storage := range g.handler.storage {
// register legacy patterns where namespace is optional in path
- registerResourceHandlers(ws, version, path, storage, kinds, h, false)
+ if err := registerResourceHandlers(ws, version, path, storage, h, false); err != nil {
+ return err
+ }
// register pattern where namespace is required in path
- registerResourceHandlers(ws, version, path, storage, kinds, h, true)
+ if err := registerResourceHandlers(ws, version, path, storage, h, true); err != nil {
+ return err
+ }
}
// TODO: port the rest of these. Sadly, if we don't, we'll have inconsistent
// API behavior, as well as lack of documentation
- mux := container.ServeMux
-
// Note: update GetAttribs() when adding a handler.
mux.Handle(prefix+"/watch/", http.StripPrefix(prefix+"/watch/", watchHandler))
mux.Handle(prefix+"/proxy/", http.StripPrefix(prefix+"/proxy/", proxyHandler))
@@ -279,6 +276,8 @@ func (g *APIGroupVersion) InstallREST(container *restful.Container, root string,
mux.Handle(prefix+"/operations/", http.StripPrefix(prefix+"/operations/", opHandler))
container.Add(ws)
+
+ return nil
}
// TODO: Convert to go-restful
@@ -295,9 +294,9 @@ func InstallValidator(mux Mux, servers func() map[string]Server) {
// TODO: document all handlers
// InstallSupport registers the APIServer support functions
-func InstallSupport(container *restful.Container, ws *restful.WebService) {
+func InstallSupport(mux Mux, ws *restful.WebService) {
// TODO: convert healthz to restful and remove container arg
- healthz.InstallHandler(container.ServeMux)
+ healthz.InstallHandler(mux)
// Set up a service to return the git code version.
ws.Path("/version")
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/index.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/index.go
index 3b05c5270f2e..0b61665a1060 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/index.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/index.go
@@ -17,11 +17,24 @@ limitations under the License.
package apiserver
import (
- "io"
"net/http"
+ "sort"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+
+ "github.com/emicklei/go-restful"
)
-// handleIndex is the root index page for Kubernetes.
-func HandleIndex(w http.ResponseWriter, r *http.Request) {
- io.WriteString(w, "Welcome to Kubernetes")
+func IndexHandler(container *restful.Container, muxHelper *MuxHelper) func(http.ResponseWriter, *http.Request) {
+ return func(w http.ResponseWriter, r *http.Request) {
+ var handledPaths []string
+ // Extract the paths handled using restful.WebService
+ for _, ws := range container.RegisteredWebServices() {
+ handledPaths = append(handledPaths, ws.RootPath())
+ }
+ // Extract the paths handled using mux handler.
+ handledPaths = append(handledPaths, muxHelper.RegisteredPaths...)
+ sort.Strings(handledPaths)
+ writeRawJSON(http.StatusOK, api.RootPaths{Paths: handledPaths}, w)
+ }
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/mux_helper.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/mux_helper.go
new file mode 100644
index 000000000000..e95f0719cdbe
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/mux_helper.go
@@ -0,0 +1,37 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package apiserver
+
+import (
+ "net/http"
+)
+
+// Offers additional functionality over ServeMux, for ex: supports listing registered paths.
+type MuxHelper struct {
+ Mux Mux
+ RegisteredPaths []string
+}
+
+func (m *MuxHelper) Handle(path string, handler http.Handler) {
+ m.RegisteredPaths = append(m.RegisteredPaths, path)
+ m.Mux.Handle(path, handler)
+}
+
+func (m *MuxHelper) HandleFunc(path string, handler func(http.ResponseWriter, *http.Request)) {
+ m.RegisteredPaths = append(m.RegisteredPaths, path)
+ m.Mux.HandleFunc(path, handler)
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy.go
index edaed7423b9f..360ef4171524 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy.go
@@ -96,6 +96,12 @@ func (r *ProxyHandler) ServeHTTP(w http.ResponseWriter, req *http.Request) {
if len(parts) > 2 {
proxyParts := parts[2:]
rest = strings.Join(proxyParts, "/")
+ if strings.HasSuffix(req.URL.Path, "/") {
+ // The original path had a trailing slash, which has been stripped
+ // by KindAndNamespace(). We should add it back because some
+ // servers (like etcd) require it.
+ rest = rest + "/"
+ }
}
storage, ok := r.storage[kind]
if !ok {
@@ -163,6 +169,11 @@ type proxyTransport struct {
}
func (t *proxyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
+ // Add reverse proxy headers.
+ req.Header.Set("X-Forwarded-Uri", t.proxyPathPrepend+req.URL.Path)
+ req.Header.Set("X-Forwarded-Host", t.proxyHost)
+ req.Header.Set("X-Forwarded-Proto", t.proxyScheme)
+
resp, err := http.DefaultTransport.RoundTrip(req)
if err != nil {
@@ -174,7 +185,9 @@ func (t *proxyTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return resp, nil
}
- if resp.Header.Get("Content-Type") != "text/html" {
+ cType := resp.Header.Get("Content-Type")
+ cType = strings.TrimSpace(strings.SplitN(cType, ";", 2)[0])
+ if cType != "text/html" {
// Do nothing, simply pass through
return resp, nil
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy_test.go
index fdc9954af959..99f3ce66f8df 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver/proxy_test.go
@@ -52,53 +52,81 @@ func fmtHTML(in string) string {
return string(out.Bytes())
}
-func TestProxyTransport_fixLinks(t *testing.T) {
+func TestProxyTransport(t *testing.T) {
testTransport := &proxyTransport{
proxyScheme: "http",
proxyHost: "foo.com",
- proxyPathPrepend: "/proxy/minion/minion1:10250/",
+ proxyPathPrepend: "/proxy/minion/minion1:10250",
}
testTransport2 := &proxyTransport{
proxyScheme: "https",
proxyHost: "foo.com",
- proxyPathPrepend: "/proxy/minion/minion1:8080/",
+ proxyPathPrepend: "/proxy/minion/minion1:8080",
}
table := map[string]struct {
- input string
- sourceURL string
- transport *proxyTransport
- output string
+ input string
+ sourceURL string
+ transport *proxyTransport
+ output string
+ contentType string
+ forwardedURI string
}{
"normal": {
- input: `kubelet.loggoogle.log
`,
- sourceURL: "http://myminion.com/logs/log.log",
- transport: testTransport,
- output: `kubelet.loggoogle.log
`,
+ input: `kubelet.loggoogle.log
`,
+ sourceURL: "http://myminion.com/logs/log.log",
+ transport: testTransport,
+ output: `kubelet.loggoogle.log
`,
+ contentType: "text/html",
+ forwardedURI: "/proxy/minion/minion1:10250/logs/log.log",
+ },
+ "content-type charset": {
+ input: `kubelet.loggoogle.log
`,
+ sourceURL: "http://myminion.com/logs/log.log",
+ transport: testTransport,
+ output: `kubelet.loggoogle.log
`,
+ contentType: "text/html; charset=utf-8",
+ forwardedURI: "/proxy/minion/minion1:10250/logs/log.log",
+ },
+ "content-type passthrough": {
+ input: `kubelet.loggoogle.log
`,
+ sourceURL: "http://myminion.com/logs/log.log",
+ transport: testTransport,
+ output: `kubelet.loggoogle.log
`,
+ contentType: "text/plain",
+ forwardedURI: "/proxy/minion/minion1:10250/logs/log.log",
},
"subdir": {
- input: `kubelet.loggoogle.log`,
- sourceURL: "http://myminion.com/whatever/apt/somelog.log",
- transport: testTransport2,
- output: `kubelet.loggoogle.log`,
+ input: `kubelet.loggoogle.log`,
+ sourceURL: "http://myminion.com/whatever/apt/somelog.log",
+ transport: testTransport2,
+ output: `kubelet.loggoogle.log`,
+ contentType: "text/html",
+ forwardedURI: "/proxy/minion/minion1:8080/whatever/apt/somelog.log",
},
"image": {
- input: `
`,
- sourceURL: "http://myminion.com/",
- transport: testTransport,
- output: `
`,
+ input: `
`,
+ sourceURL: "http://myminion.com/",
+ transport: testTransport,
+ output: `
`,
+ contentType: "text/html",
+ forwardedURI: "/proxy/minion/minion1:10250/",
},
"abs": {
- input: ``,
- sourceURL: "http://myminion.com/any/path/",
- transport: testTransport,
- output: ``,
+ input: ``,
+ sourceURL: "http://myminion.com/any/path/",
+ transport: testTransport,
+ output: ``,
+ contentType: "text/html",
+ forwardedURI: "/proxy/minion/minion1:10250/any/path/",
},
"abs but same host": {
- input: ``,
- sourceURL: "http://myminion.com/any/path/",
- transport: testTransport,
- output: ``,
+ input: ``,
+ sourceURL: "http://myminion.com/any/path/",
+ transport: testTransport,
+ output: ``,
+ contentType: "text/html",
+ forwardedURI: "/proxy/minion/minion1:10250/any/path/",
},
}
@@ -106,22 +134,40 @@ func TestProxyTransport_fixLinks(t *testing.T) {
// Canonicalize the html so we can diff.
item.input = fmtHTML(item.input)
item.output = fmtHTML(item.output)
- req := &http.Request{
- Method: "GET",
- URL: parseURLOrDie(item.sourceURL),
- }
- resp := &http.Response{
- Status: "200 OK",
- StatusCode: http.StatusOK,
- Body: ioutil.NopCloser(strings.NewReader(item.input)),
- Close: true,
+
+ server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ // Check request headers.
+ if got, want := r.Header.Get("X-Forwarded-Uri"), item.forwardedURI; got != want {
+ t.Errorf("%v: X-Forwarded-Uri = %q, want %q", name, got, want)
+ }
+ if got, want := r.Header.Get("X-Forwarded-Host"), item.transport.proxyHost; got != want {
+ t.Errorf("%v: X-Forwarded-Host = %q, want %q", name, got, want)
+ }
+ if got, want := r.Header.Get("X-Forwarded-Proto"), item.transport.proxyScheme; got != want {
+ t.Errorf("%v: X-Forwarded-Proto = %q, want %q", name, got, want)
+ }
+
+ // Send response.
+ w.Header().Set("Content-Type", item.contentType)
+ fmt.Fprint(w, item.input)
+ }))
+ // Replace source URL with our test server address.
+ sourceURL := parseURLOrDie(item.sourceURL)
+ serverURL := parseURLOrDie(server.URL)
+ item.input = strings.Replace(item.input, sourceURL.Host, serverURL.Host, -1)
+ sourceURL.Host = serverURL.Host
+
+ req, err := http.NewRequest("GET", sourceURL.String(), nil)
+ if err != nil {
+ t.Errorf("%v: Unexpected error: %v", name, err)
+ continue
}
- updatedResp, err := item.transport.fixLinks(req, resp)
+ resp, err := item.transport.RoundTrip(req)
if err != nil {
t.Errorf("%v: Unexpected error: %v", name, err)
continue
}
- body, err := ioutil.ReadAll(updatedResp.Body)
+ body, err := ioutil.ReadAll(resp.Body)
if err != nil {
t.Errorf("%v: Unexpected error: %v", name, err)
continue
@@ -129,6 +175,7 @@ func TestProxyTransport_fixLinks(t *testing.T) {
if e, a := item.output, string(body); e != a {
t.Errorf("%v: expected %v, but got %v", name, e, a)
}
+ server.Close()
}
}
@@ -147,6 +194,7 @@ func TestProxy(t *testing.T) {
{"PUT", "/some/dir/id", "different question", "answer", "text/css", "default"},
{"DELETE", "/some/dir/id", "", "ok", "text/css", "default"},
{"GET", "/some/dir/id", "", "answer", "text/css", "other"},
+ {"GET", "/trailing/slash/", "", "answer", "text/css", "default"},
}
for _, item := range table {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listers.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listers.go
index f98dff6e4a2d..a8eb8f57700b 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listers.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listers.go
@@ -75,17 +75,41 @@ func (s *StoreToNodeLister) GetNodeInfo(id string) (*api.Node, error) {
return nil, fmt.Errorf("minion '%v' is not in cache", id)
}
-// StoreToServiceLister makes a Store have the List method of the client.ServiceInterface
+// StoreToServiceLister makes a Store that has the List method of the client.ServiceInterface
// The Store must contain (only) Services.
type StoreToServiceLister struct {
Store
}
-func (s *StoreToServiceLister) List() (svcs api.ServiceList, err error) {
+func (s *StoreToServiceLister) List() (services api.ServiceList, err error) {
for _, m := range s.Store.List() {
- svcs.Items = append(svcs.Items, *(m.(*api.Service)))
+ services.Items = append(services.Items, *(m.(*api.Service)))
}
- return svcs, nil
+ return services, nil
+}
+
+// TODO: Move this back to scheduler as a helper function that takes a Store,
+// rather than a method of StoreToServiceLister.
+func (s *StoreToServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) {
+ var selector labels.Selector
+ var service api.Service
+
+ for _, m := range s.Store.List() {
+ service = *m.(*api.Service)
+ // consider only services that are in the same namespace as the pod
+ if service.Namespace != pod.Namespace {
+ continue
+ }
+ selector = labels.Set(service.Spec.Selector).AsSelector()
+ if selector.Matches(labels.Set(pod.Labels)) {
+ services = append(services, service)
+ }
+ }
+ if len(services) == 0 {
+ err = fmt.Errorf("Could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+
+ return
}
// TODO: add StoreToEndpointsLister for use in kube-proxy.
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listwatch.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listwatch.go
index 75ac25d73c4a..f6e724e2a3f6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listwatch.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache/listwatch.go
@@ -24,7 +24,7 @@ import (
)
// ListWatch knows how to list and watch a set of apiserver resources. It satisfies the ListerWatcher interface.
-// It is a convenience function for users of NewReflector, etc.
+// It is a convenience function for users of NewReflector, etc. Client must not be nil.
type ListWatch struct {
Client *client.Client
FieldSelector labels.Selector
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/types.go
index 5b90969f5e97..d4dbce9d5475 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/types.go
@@ -83,6 +83,8 @@ type Context struct {
AuthInfo string `json:"user"`
// Namespace is the default namespace to use on unspecified requests
Namespace string `json:"namespace,omitempty"`
+ // NamespacePath is the path to a kubernetes ns file (~/.kubernetes_ns)
+ NamespacePath string `json:"namespace-path,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions map[string]runtime.EmbeddedObject `json:"extensions,omitempty"`
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/v1/types.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/v1/types.go
index b5dcf83c54de..7e31d14a4f23 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/v1/types.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api/v1/types.go
@@ -83,6 +83,8 @@ type Context struct {
AuthInfo string `json:"user"`
// Namespace is the default namespace to use on unspecified requests
Namespace string `json:"namespace,omitempty"`
+ // NamespacePath is the path to a kubernetes ns file (~/.kubernetes_ns)
+ NamespacePath string `json:"namespace-path,omitempty"`
// Extensions holds additional information. This is useful for extenders so that reads and writes don't clobber unknown fields
Extensions []NamedExtension `json:"extensions,omitempty"`
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config.go
index 9b05bc8ef1fa..05c4cfcdff0d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config.go
@@ -25,6 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/clientauth"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
)
@@ -39,9 +40,12 @@ var (
// ClientConfig is used to make it easy to get an api server client
type ClientConfig interface {
+ // RawConfig returns the merged result of all overrides
RawConfig() (clientcmdapi.Config, error)
// ClientConfig returns a complete client config
ClientConfig() (*client.Config, error)
+ // Namespace returns the namespace resulting from the merged result of all overrides
+ Namespace() (string, error)
}
// DirectClientConfig is a ClientConfig interface that is backed by a clientcmdapi.Config, options overrides, and an optional fallbackReader for auth information
@@ -229,6 +233,35 @@ func canIdentifyUser(config client.Config) bool {
}
+// Namespace implements KubeConfig
+func (config DirectClientConfig) Namespace() (string, error) {
+ if err := config.ConfirmUsable(); err != nil {
+ return "", err
+ }
+
+ configContext := config.getContext()
+
+ if len(configContext.Namespace) != 0 {
+ return configContext.Namespace, nil
+ }
+
+ if len(configContext.NamespacePath) != 0 {
+ nsInfo, err := kubectl.LoadNamespaceInfo(configContext.NamespacePath)
+ if err != nil {
+ return "", err
+ }
+
+ return nsInfo.Namespace, nil
+ }
+
+ // if nothing was specified, try the default file
+ nsInfo, err := kubectl.LoadNamespaceInfo(os.Getenv("HOME") + "/.kubernetes_ns")
+ if err != nil {
+ return "", err
+ }
+ return nsInfo.Namespace, nil
+}
+
// ConfirmUsable looks a particular context and determines if that particular part of the config is useable. There might still be errors in the config,
// but no errors in the sections requested or referenced. It does not return early so that it can find as many errors as possible.
func (config DirectClientConfig) ConfirmUsable() error {
@@ -251,21 +284,30 @@ func (config DirectClientConfig) getContextName() string {
}
func (config DirectClientConfig) getAuthInfoName() string {
- if len(config.overrides.AuthInfoName) != 0 {
- return config.overrides.AuthInfoName
+ if len(config.overrides.Context.AuthInfo) != 0 {
+ return config.overrides.Context.AuthInfo
}
return config.getContext().AuthInfo
}
func (config DirectClientConfig) getClusterName() string {
- if len(config.overrides.ClusterName) != 0 {
- return config.overrides.ClusterName
+ if len(config.overrides.Context.Cluster) != 0 {
+ return config.overrides.Context.Cluster
}
return config.getContext().Cluster
}
func (config DirectClientConfig) getContext() clientcmdapi.Context {
- return config.config.Contexts[config.getContextName()]
+ contexts := config.config.Contexts
+ contextName := config.getContextName()
+
+ var mergedContext clientcmdapi.Context
+ if configContext, exists := contexts[contextName]; exists {
+ mergo.Merge(&mergedContext, configContext)
+ }
+ mergo.Merge(&mergedContext, config.overrides.Context)
+
+ return mergedContext
}
func (config DirectClientConfig) getAuthInfo() clientcmdapi.AuthInfo {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config_test.go
index d5a973c7a716..171bf70f3aa7 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/client_config_test.go
@@ -48,6 +48,24 @@ func createValidTestConfig() *clientcmdapi.Config {
return config
}
+func TestMergeContext(t *testing.T) {
+ const namespace = "overriden-namespace"
+
+ config := createValidTestConfig()
+ clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{
+ Context: clientcmdapi.Context{
+ Namespace: namespace,
+ },
+ })
+
+ actual, err := clientBuilder.Namespace()
+ if err != nil {
+ t.Errorf("Unexpected error: %v", err)
+ }
+
+ matchStringArg(namespace, actual, t)
+}
+
func TestCreateClean(t *testing.T) {
config := createValidTestConfig()
clientBuilder := NewNonInteractiveClientConfig(*config, "clean", &ConfigOverrides{})
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/merged_client_builder.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/merged_client_builder.go
index ffbe4ab68e30..f6bc5c4556dd 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/merged_client_builder.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/merged_client_builder.go
@@ -78,3 +78,13 @@ func (config DeferredLoadingClientConfig) ClientConfig() (*client.Config, error)
return mergedClientConfig.ClientConfig()
}
+
+// Namespace implements KubeConfig
+func (config DeferredLoadingClientConfig) Namespace() (string, error) {
+ mergedKubeConfig, err := config.createClientConfig()
+ if err != nil {
+ return "", err
+ }
+
+ return mergedKubeConfig.Namespace()
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
index 5534142ddffb..f6309c98c5ec 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
@@ -27,10 +27,8 @@ import (
type ConfigOverrides struct {
AuthInfo clientcmdapi.AuthInfo
ClusterInfo clientcmdapi.Cluster
- Namespace string
+ Context clientcmdapi.Context
CurrentContext string
- ClusterName string
- AuthInfoName string
}
// ConfigOverrideFlags holds the flag names to be used for binding command line flags. Notice that this structure tightly
@@ -38,41 +36,50 @@ type ConfigOverrides struct {
type ConfigOverrideFlags struct {
AuthOverrideFlags AuthOverrideFlags
ClusterOverrideFlags ClusterOverrideFlags
- Namespace string
+ ContextOverrideFlags ContextOverrideFlags
CurrentContext string
- ClusterName string
- AuthInfoName string
}
// AuthOverrideFlags holds the flag names to be used for binding command line flags for AuthInfo objects
type AuthOverrideFlags struct {
AuthPath string
+ AuthPathShort string
ClientCertificate string
ClientKey string
Token string
}
+// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects
+type ContextOverrideFlags struct {
+ ClusterName string
+ AuthInfoName string
+ Namespace string
+ NamespacePath string
+}
+
// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects
type ClusterOverrideFlags struct {
APIServer string
+ APIServerShort string
APIVersion string
CertificateAuthority string
InsecureSkipTLSVerify string
}
const (
- FlagClusterName = "cluster"
- FlagAuthInfoName = "user"
- FlagContext = "context"
- FlagNamespace = "namespace"
- FlagAPIServer = "server"
- FlagAPIVersion = "api-version"
- FlagAuthPath = "auth-path"
- FlagInsecure = "insecure-skip-tls-verify"
- FlagCertFile = "client-certificate"
- FlagKeyFile = "client-key"
- FlagCAFile = "certificate-authority"
- FlagBearerToken = "token"
+ FlagClusterName = "cluster"
+ FlagAuthInfoName = "user"
+ FlagContext = "context"
+ FlagNamespace = "namespace"
+ FlagNamespacePath = "ns-path"
+ FlagAPIServer = "server"
+ FlagAPIVersion = "api-version"
+ FlagAuthPath = "auth-path"
+ FlagInsecure = "insecure-skip-tls-verify"
+ FlagCertFile = "client-certificate"
+ FlagKeyFile = "client-key"
+ FlagCAFile = "certificate-authority"
+ FlagBearerToken = "token"
)
// RecommendedAuthOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
@@ -100,17 +107,24 @@ func RecommendedConfigOverrideFlags(prefix string) ConfigOverrideFlags {
return ConfigOverrideFlags{
AuthOverrideFlags: RecommendedAuthOverrideFlags(prefix),
ClusterOverrideFlags: RecommendedClusterOverrideFlags(prefix),
- Namespace: prefix + FlagNamespace,
+ ContextOverrideFlags: RecommendedContextOverrideFlags(prefix),
CurrentContext: prefix + FlagContext,
- ClusterName: prefix + FlagClusterName,
- AuthInfoName: prefix + FlagAuthInfoName,
+ }
+}
+
+// RecommendedContextOverrideFlags is a convenience method to return recommended flag names prefixed with a string of your choosing
+func RecommendedContextOverrideFlags(prefix string) ContextOverrideFlags {
+ return ContextOverrideFlags{
+ ClusterName: prefix + FlagClusterName,
+ AuthInfoName: prefix + FlagAuthInfoName,
+ Namespace: prefix + FlagNamespace,
+ NamespacePath: prefix + FlagNamespacePath,
}
}
// BindAuthInfoFlags is a convenience method to bind the specified flags to their associated variables
func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, flagNames AuthOverrideFlags) {
- // TODO short flag names are impossible to prefix, decide whether to keep them or not
- flags.StringVarP(&authInfo.AuthPath, flagNames.AuthPath, "a", "", "Path to the auth info file. If missing, prompt the user. Only used if using https.")
+ flags.StringVarP(&authInfo.AuthPath, flagNames.AuthPath, flagNames.AuthPathShort, "", "Path to the auth info file. If missing, prompt the user. Only used if using https.")
flags.StringVar(&authInfo.ClientCertificate, flagNames.ClientCertificate, "", "Path to a client key file for TLS.")
flags.StringVar(&authInfo.ClientKey, flagNames.ClientKey, "", "Path to a client key file for TLS.")
flags.StringVar(&authInfo.Token, flagNames.Token, "", "Bearer token for authentication to the API server.")
@@ -118,8 +132,7 @@ func BindAuthInfoFlags(authInfo *clientcmdapi.AuthInfo, flags *pflag.FlagSet, fl
// BindClusterFlags is a convenience method to bind the specified flags to their associated variables
func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, flagNames ClusterOverrideFlags) {
- // TODO short flag names are impossible to prefix, decide whether to keep them or not
- flags.StringVarP(&clusterInfo.Server, flagNames.APIServer, "s", "", "The address of the Kubernetes API server")
+ flags.StringVarP(&clusterInfo.Server, flagNames.APIServer, flagNames.APIServerShort, "", "The address of the Kubernetes API server")
flags.StringVar(&clusterInfo.APIVersion, flagNames.APIVersion, "", "The API version to use when talking to the server")
flags.StringVar(&clusterInfo.CertificateAuthority, flagNames.CertificateAuthority, "", "Path to a cert. file for the certificate authority.")
flags.BoolVar(&clusterInfo.InsecureSkipTLSVerify, flagNames.InsecureSkipTLSVerify, false, "If true, the server's certificate will not be checked for validity. This will make your HTTPS connections insecure.")
@@ -129,9 +142,14 @@ func BindClusterFlags(clusterInfo *clientcmdapi.Cluster, flags *pflag.FlagSet, f
func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNames ConfigOverrideFlags) {
BindAuthInfoFlags(&overrides.AuthInfo, flags, flagNames.AuthOverrideFlags)
BindClusterFlags(&overrides.ClusterInfo, flags, flagNames.ClusterOverrideFlags)
- // TODO not integrated yet
- // flags.StringVar(&overrides.Namespace, flagNames.Namespace, "", "If present, the namespace scope for this CLI request.")
+ BindContextFlags(&overrides.Context, flags, flagNames.ContextOverrideFlags)
flags.StringVar(&overrides.CurrentContext, flagNames.CurrentContext, "", "The name of the kubeconfig context to use")
- flags.StringVar(&overrides.ClusterName, flagNames.ClusterName, "", "The name of the kubeconfig cluster to use")
- flags.StringVar(&overrides.AuthInfoName, flagNames.AuthInfoName, "", "The name of the kubeconfig user to use")
+}
+
+// BindFlags is a convenience method to bind the specified flags to their associated variables
+func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) {
+ flags.StringVar(&contextInfo.Cluster, flagNames.ClusterName, "", "The name of the kubeconfig cluster to use")
+ flags.StringVar(&contextInfo.AuthInfo, flagNames.AuthInfoName, "", "The name of the kubeconfig user to use")
+ flags.StringVar(&contextInfo.Namespace, flagNames.Namespace, "", "If present, the namespace scope for this CLI request.")
+ flags.StringVar(&contextInfo.NamespacePath, flagNames.NamespacePath, "", "Path to the namespace info file that holds the namespace context to use for CLI requests.")
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/validation.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/validation.go
index 6a2f63f0fbac..aa9b9e64e7d5 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/validation.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/validation.go
@@ -180,7 +180,7 @@ func validateContext(contextName string, context clientcmdapi.Context, config cl
}
if (len(context.Namespace) != 0) && !util.IsDNS952Label(context.Namespace) {
- validationErrors = append(validationErrors, fmt.Errorf("namespace, %v, for context %v, does not conform to the kubernetest DNS952 rules", context.Namespace, contextName))
+ validationErrors = append(validationErrors, fmt.Errorf("namespace, %v, for context %v, does not conform to the kubernetes DNS952 rules", context.Namespace, contextName))
}
return validationErrors
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events.go
index 11464056adf6..6a68b7f6ceba 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events.go
@@ -136,7 +136,7 @@ func (e *events) Search(objOrRef runtime.Object) (*api.EventList, error) {
fields["involvedObject.name"] = ref.Name
}
if ref.UID != "" {
- fields["involvedObject.uid"] = ref.UID
+ fields["involvedObject.uid"] = string(ref.UID)
}
return e.List(labels.Everything(), fields.AsSelector())
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events_test.go
index d21c606a2d5c..513b7a84ba8e 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/events_test.go
@@ -63,7 +63,6 @@ func TestEventCreate(t *testing.T) {
timeStamp := util.Now()
event := &api.Event{
//namespace: namespace{"default"},
- Condition: "Running",
InvolvedObject: *objReference,
Timestamp: timeStamp,
}
@@ -98,7 +97,6 @@ func TestEventGet(t *testing.T) {
}
timeStamp := util.Now()
event := &api.Event{
- Condition: "Running",
InvolvedObject: *objReference,
Timestamp: timeStamp,
}
@@ -136,7 +134,6 @@ func TestEventList(t *testing.T) {
eventList := &api.EventList{
Items: []api.Event{
{
- Condition: "Running",
InvolvedObject: *objReference,
Timestamp: timeStamp,
},
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/helper_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/helper_test.go
index 5ed4a6cb2320..6eeaaf185daa 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/helper_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/helper_test.go
@@ -21,14 +21,138 @@ import (
"testing"
)
+const (
+ rootCACert = `-----BEGIN CERTIFICATE-----
+MIIC4DCCAcqgAwIBAgIBATALBgkqhkiG9w0BAQswIzEhMB8GA1UEAwwYMTAuMTMu
+MTI5LjEwNkAxNDIxMzU5MDU4MB4XDTE1MDExNTIxNTczN1oXDTE2MDExNTIxNTcz
+OFowIzEhMB8GA1UEAwwYMTAuMTMuMTI5LjEwNkAxNDIxMzU5MDU4MIIBIjANBgkq
+hkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAunDRXGwsiYWGFDlWH6kjGun+PshDGeZX
+xtx9lUnL8pIRWH3wX6f13PO9sktaOWW0T0mlo6k2bMlSLlSZgG9H6og0W6gLS3vq
+s4VavZ6DbXIwemZG2vbRwsvR+t4G6Nbwelm6F8RFnA1Fwt428pavmNQ/wgYzo+T1
+1eS+HiN4ACnSoDSx3QRWcgBkB1g6VReofVjx63i0J+w8Q/41L9GUuLqquFxu6ZnH
+60vTB55lHgFiDLjA1FkEz2dGvGh/wtnFlRvjaPC54JH2K1mPYAUXTreoeJtLJKX0
+ycoiyB24+zGCniUmgIsmQWRPaOPircexCp1BOeze82BT1LCZNTVaxQIDAQABoyMw
+ITAOBgNVHQ8BAf8EBAMCAKQwDwYDVR0TAQH/BAUwAwEB/zALBgkqhkiG9w0BAQsD
+ggEBADMxsUuAFlsYDpF4fRCzXXwrhbtj4oQwcHpbu+rnOPHCZupiafzZpDu+rw4x
+YGPnCb594bRTQn4pAu3Ac18NbLD5pV3uioAkv8oPkgr8aUhXqiv7KdDiaWm6sbAL
+EHiXVBBAFvQws10HMqMoKtO8f1XDNAUkWduakR/U6yMgvOPwS7xl0eUTqyRB6zGb
+K55q2dejiFWaFqB/y78txzvz6UlOZKE44g2JAVoJVM6kGaxh33q8/FmrL4kuN3ut
+W+MmJCVDvd4eEqPwbp7146ZWTqpIJ8lvA6wuChtqV8lhAPka2hD/LMqY8iXNmfXD
+uml0obOEy+ON91k+SWTJ3ggmF/U=
+-----END CERTIFICATE-----`
+
+ certData = `-----BEGIN CERTIFICATE-----
+MIIC6jCCAdSgAwIBAgIBCzALBgkqhkiG9w0BAQswIzEhMB8GA1UEAwwYMTAuMTMu
+MTI5LjEwNkAxNDIxMzU5MDU4MB4XDTE1MDExNTIyMDEzMVoXDTE2MDExNTIyMDEz
+MlowGzEZMBcGA1UEAxMQb3BlbnNoaWZ0LWNsaWVudDCCASIwDQYJKoZIhvcNAQEB
+BQADggEPADCCAQoCggEBAKtdhz0+uCLXw5cSYns9rU/XifFSpb/x24WDdrm72S/v
+b9BPYsAStiP148buylr1SOuNi8sTAZmlVDDIpIVwMLff+o2rKYDicn9fjbrTxTOj
+lI4pHJBH+JU3AJ0tbajupioh70jwFS0oYpwtneg2zcnE2Z4l6mhrj2okrc5Q1/X2
+I2HChtIU4JYTisObtin10QKJX01CLfYXJLa8upWzKZ4/GOcHG+eAV3jXWoXidtjb
+1Usw70amoTZ6mIVCkiu1QwCoa8+ycojGfZhvqMsAp1536ZcCul+Na+AbCv4zKS7F
+kQQaImVrXdUiFansIoofGlw/JNuoKK6ssVpS5Ic3pgcCAwEAAaM1MDMwDgYDVR0P
+AQH/BAQDAgCgMBMGA1UdJQQMMAoGCCsGAQUFBwMCMAwGA1UdEwEB/wQCMAAwCwYJ
+KoZIhvcNAQELA4IBAQCKLREH7bXtXtZ+8vI6cjD7W3QikiArGqbl36bAhhWsJLp/
+p/ndKz39iFNaiZ3GlwIURWOOKx3y3GA0x9m8FR+Llthf0EQ8sUjnwaknWs0Y6DQ3
+jjPFZOpV3KPCFrdMJ3++E3MgwFC/Ih/N2ebFX9EcV9Vcc6oVWMdwT0fsrhu683rq
+6GSR/3iVX1G/pmOiuaR0fNUaCyCfYrnI4zHBDgSfnlm3vIvN2lrsR/DQBakNL8DJ
+HBgKxMGeUPoneBv+c8DMXIL0EhaFXRlBv9QW45/GiAIOuyFJ0i6hCtGZpJjq4OpQ
+BRjCI+izPzFTjsxD4aORE+WOkyWFCGPWKfNejfw0
+-----END CERTIFICATE-----`
+
+ keyData = `-----BEGIN RSA PRIVATE KEY-----
+MIIEowIBAAKCAQEAq12HPT64ItfDlxJiez2tT9eJ8VKlv/HbhYN2ubvZL+9v0E9i
+wBK2I/Xjxu7KWvVI642LyxMBmaVUMMikhXAwt9/6jaspgOJyf1+NutPFM6OUjikc
+kEf4lTcAnS1tqO6mKiHvSPAVLShinC2d6DbNycTZniXqaGuPaiStzlDX9fYjYcKG
+0hTglhOKw5u2KfXRAolfTUIt9hcktry6lbMpnj8Y5wcb54BXeNdaheJ22NvVSzDv
+RqahNnqYhUKSK7VDAKhrz7JyiMZ9mG+oywCnXnfplwK6X41r4BsK/jMpLsWRBBoi
+ZWtd1SIVqewiih8aXD8k26gorqyxWlLkhzemBwIDAQABAoIBAD2XYRs3JrGHQUpU
+FkdbVKZkvrSY0vAZOqBTLuH0zUv4UATb8487anGkWBjRDLQCgxH+jucPTrztekQK
+aW94clo0S3aNtV4YhbSYIHWs1a0It0UdK6ID7CmdWkAj6s0T8W8lQT7C46mWYVLm
+5mFnCTHi6aB42jZrqmEpC7sivWwuU0xqj3Ml8kkxQCGmyc9JjmCB4OrFFC8NNt6M
+ObvQkUI6Z3nO4phTbpxkE1/9dT0MmPIF7GhHVzJMS+EyyRYUDllZ0wvVSOM3qZT0
+JMUaBerkNwm9foKJ1+dv2nMKZZbJajv7suUDCfU44mVeaEO+4kmTKSGCGjjTBGkr
+7L1ySDECgYEA5ElIMhpdBzIivCuBIH8LlUeuzd93pqssO1G2Xg0jHtfM4tz7fyeI
+cr90dc8gpli24dkSxzLeg3Tn3wIj/Bu64m2TpZPZEIlukYvgdgArmRIPQVxerYey
+OkrfTNkxU1HXsYjLCdGcGXs5lmb+K/kuTcFxaMOs7jZi7La+jEONwf8CgYEAwCs/
+rUOOA0klDsWWisbivOiNPII79c9McZCNBqncCBfMUoiGe8uWDEO4TFHN60vFuVk9
+8PkwpCfvaBUX+ajvbafIfHxsnfk1M04WLGCeqQ/ym5Q4sQoQOcC1b1y9qc/xEWfg
+nIUuia0ukYRpl7qQa3tNg+BNFyjypW8zukUAC/kCgYB1/Kojuxx5q5/oQVPrx73k
+2bevD+B3c+DYh9MJqSCNwFtUpYIWpggPxoQan4LwdsmO0PKzocb/ilyNFj4i/vII
+NToqSc/WjDFpaDIKyuu9oWfhECye45NqLWhb/6VOuu4QA/Nsj7luMhIBehnEAHW+
+GkzTKM8oD1PxpEG3nPKXYQKBgQC6AuMPRt3XBl1NkCrpSBy/uObFlFaP2Enpf39S
+3OZ0Gv0XQrnSaL1kP8TMcz68rMrGX8DaWYsgytstR4W+jyy7WvZwsUu+GjTJ5aMG
+77uEcEBpIi9CBzivfn7hPccE8ZgqPf+n4i6q66yxBJflW5xhvafJqDtW2LcPNbW/
+bvzdmQKBgExALRUXpq+5dbmkdXBHtvXdRDZ6rVmrnjy4nI5bPw+1GqQqk6uAR6B/
+F6NmLCQOO4PDG/cuatNHIr2FrwTmGdEL6ObLUGWn9Oer9gJhHVqqsY5I4sEPo4XX
+stR0Yiw0buV6DL/moUO0HIM9Bjh96HJp+LxiIS6UCdIhMPp5HoQa
+-----END RSA PRIVATE KEY-----`
+)
+
func TestTransportFor(t *testing.T) {
testCases := map[string]struct {
Config *Config
Err bool
+ TLS bool
Default bool
}{
"default transport": {
- Config: &Config{},
+ Default: true,
+ Config: &Config{},
+ },
+
+ "ca transport": {
+ TLS: true,
+ Config: &Config{
+ CAData: []byte(rootCACert),
+ },
+ },
+ "bad ca file transport": {
+ Err: true,
+ Config: &Config{
+ CAFile: "invalid file",
+ },
+ },
+ "ca data overriding bad ca file transport": {
+ TLS: true,
+ Config: &Config{
+ CAData: []byte(rootCACert),
+ CAFile: "invalid file",
+ },
+ },
+
+ "cert transport": {
+ TLS: true,
+ Config: &Config{
+ CertData: []byte(certData),
+ KeyData: []byte(keyData),
+ CAData: []byte(rootCACert),
+ },
+ },
+ "bad cert data transport": {
+ Err: true,
+ Config: &Config{
+ CertData: []byte(certData),
+ KeyData: []byte("bad key data"),
+ CAData: []byte(rootCACert),
+ },
+ },
+ "bad file cert transport": {
+ Err: true,
+ Config: &Config{
+ CertData: []byte(certData),
+ KeyFile: "invalid file",
+ CAData: []byte(rootCACert),
+ },
+ },
+ "key data overriding bad file cert transport": {
+ TLS: true,
+ Config: &Config{
+ CertData: []byte(certData),
+ KeyData: []byte(keyData),
+ KeyFile: "invalid file",
+ CAData: []byte(rootCACert),
+ },
},
}
for k, testCase := range testCases {
@@ -41,8 +165,26 @@ func TestTransportFor(t *testing.T) {
t.Errorf("%s: unexpected error: %v", k, err)
continue
}
- if testCase.Default && transport != http.DefaultTransport {
+
+ switch {
+ case testCase.Default && transport != http.DefaultTransport:
t.Errorf("%s: expected the default transport, got %#v", k, transport)
+ continue
+ case !testCase.Default && transport == http.DefaultTransport:
+ t.Errorf("%s: expected non-default transport, got %#v", k, transport)
+ continue
+ }
+
+ // We only know how to check TLSConfig on http.Transports
+ if transport, ok := transport.(*http.Transport); ok {
+ switch {
+ case testCase.TLS && transport.TLSClientConfig == nil:
+ t.Errorf("%s: expected TLSClientConfig, got %#v", k, transport)
+ continue
+ case !testCase.TLS && transport.TLSClientConfig != nil:
+ t.Errorf("%s: expected no TLSClientConfig, got %#v", k, transport)
+ continue
+ }
}
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet.go
index 6a8fdaae4aab..8c1c5b7186b3 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet.go
@@ -46,9 +46,9 @@ type KubeletHealthChecker interface {
// PodInfoGetter is an interface for things that can get information about a pod's containers.
// Injectable for easy testing.
type PodInfoGetter interface {
- // GetPodInfo returns information about all containers which are part
- // Returns an api.PodInfo, or an error if one occurs.
- GetPodInfo(host, podNamespace, podID string) (api.PodContainerInfo, error)
+ // GetPodStatus returns information about all containers which are part
+ // Returns an api.PodStatus, or an error if one occurs.
+ GetPodStatus(host, podNamespace, podID string) (api.PodStatusResult, error)
}
// HTTPKubeletClient is the default implementation of PodInfoGetter and KubeletHealthchecker, accesses the kubelet over HTTP.
@@ -113,7 +113,7 @@ func (c *HTTPKubeletClient) url(host string) string {
}
// GetPodInfo gets information about the specified pod.
-func (c *HTTPKubeletClient) GetPodInfo(host, podNamespace, podID string) (api.PodContainerInfo, error) {
+func (c *HTTPKubeletClient) GetPodStatus(host, podNamespace, podID string) (api.PodStatusResult, error) {
request, err := http.NewRequest(
"GET",
fmt.Sprintf(
@@ -122,28 +122,28 @@ func (c *HTTPKubeletClient) GetPodInfo(host, podNamespace, podID string) (api.Po
podID,
podNamespace),
nil)
- info := api.PodContainerInfo{}
+ status := api.PodStatusResult{}
if err != nil {
- return info, err
+ return status, err
}
response, err := c.Client.Do(request)
if err != nil {
- return info, err
+ return status, err
}
defer response.Body.Close()
if response.StatusCode == http.StatusNotFound {
- return info, ErrPodInfoNotAvailable
+ return status, ErrPodInfoNotAvailable
}
body, err := ioutil.ReadAll(response.Body)
if err != nil {
- return info, err
+ return status, err
}
// Check that this data can be unmarshalled
- err = latest.Codec.DecodeInto(body, &info)
+ err = latest.Codec.DecodeInto(body, &status)
if err != nil {
- return info, err
+ return status, err
}
- return info, nil
+ return status, nil
}
func (c *HTTPKubeletClient) HealthCheck(host string) (health.Status, error) {
@@ -156,8 +156,8 @@ func (c *HTTPKubeletClient) HealthCheck(host string) (health.Status, error) {
type FakeKubeletClient struct{}
// GetPodInfo is a fake implementation of PodInfoGetter.GetPodInfo.
-func (c FakeKubeletClient) GetPodInfo(host, podNamespace string, podID string) (api.PodContainerInfo, error) {
- return api.PodContainerInfo{}, errors.New("Not Implemented")
+func (c FakeKubeletClient) GetPodStatus(host, podNamespace string, podID string) (api.PodStatusResult, error) {
+ return api.PodStatusResult{}, errors.New("Not Implemented")
}
func (c FakeKubeletClient) HealthCheck(host string) (health.Status, error) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet_test.go
index dcdb9185d84f..a36b2986ec21 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/kubelet_test.go
@@ -31,9 +31,12 @@ import (
)
func TestHTTPKubeletClient(t *testing.T) {
- expectObj := api.PodContainerInfo{
- ContainerInfo: map[string]api.ContainerStatus{
- "myID": {},
+ expectObj := api.PodStatusResult{
+ Status: api.PodStatus{
+ Info: map[string]api.ContainerStatus{
+ "myID1": {},
+ "myID2": {},
+ },
},
}
body, err := json.Marshal(expectObj)
@@ -64,13 +67,13 @@ func TestHTTPKubeletClient(t *testing.T) {
Client: http.DefaultClient,
Port: uint(port),
}
- gotObj, err := podInfoGetter.GetPodInfo(parts[0], api.NamespaceDefault, "foo")
+ gotObj, err := podInfoGetter.GetPodStatus(parts[0], api.NamespaceDefault, "foo")
if err != nil {
t.Errorf("unexpected error: %v", err)
}
// reflect.DeepEqual(expectObj, gotObj) doesn't handle blank times well
- if len(gotObj.ContainerInfo) != len(expectObj.ContainerInfo) {
+ if len(gotObj.Status.Info) != len(expectObj.Status.Info) {
t.Errorf("Unexpected response. Expected: %#v, received %#v", expectObj, gotObj)
}
}
@@ -109,7 +112,7 @@ func TestHTTPKubeletClientNotFound(t *testing.T) {
Client: http.DefaultClient,
Port: uint(port),
}
- _, err = podInfoGetter.GetPodInfo(parts[0], api.NamespaceDefault, "foo")
+ _, err = podInfoGetter.GetPodStatus(parts[0], api.NamespaceDefault, "foo")
if err != ErrPodInfoNotAvailable {
t.Errorf("Expected %#v, Got %#v", ErrPodInfoNotAvailable, err)
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event.go
index 91b93cdc48de..d85306b8b716 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event.go
@@ -18,6 +18,7 @@ package record
import (
"fmt"
+ "math/rand"
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@@ -30,8 +31,9 @@ import (
"github.com/golang/glog"
)
-// retryEventSleep is the time between record failures to retry. Available for test alteration.
-var retryEventSleep = 1 * time.Second
+const maxTriesPerEvent = 12
+
+var sleepDuration = 10 * time.Second
// EventRecorder knows how to store events (client.Client implements it.)
// EventRecorder must respect the namespace that will be embedded in 'event'.
@@ -46,54 +48,73 @@ type EventRecorder interface {
// or used to stop recording, if desired.
// TODO: make me an object with parameterizable queue length and retry interval
func StartRecording(recorder EventRecorder, source api.EventSource) watch.Interface {
+ // The default math/rand package functions aren't thread safe, so create a
+ // new Rand object for each StartRecording call.
+ randGen := rand.New(rand.NewSource(time.Now().UnixNano()))
return GetEvents(func(event *api.Event) {
// Make a copy before modification, because there could be multiple listeners.
// Events are safe to copy like this.
eventCopy := *event
event = &eventCopy
event.Source = source
- try := 0
+
+ tries := 0
for {
- try++
- _, err := recorder.Create(event)
- if err == nil {
+ if recordEvent(recorder, event) {
break
}
- // If we can't contact the server, then hold everything while we keep trying.
- // Otherwise, something about the event is malformed and we should abandon it.
- giveUp := false
- switch err.(type) {
- case *client.RequestConstructionError:
- // We will construct the request the same next time, so don't keep trying.
- giveUp = true
- case *errors.StatusError:
- // This indicates that the server understood and rejected our request.
- giveUp = true
- case *errors.UnexpectedObjectError:
- // We don't expect this; it implies the server's response didn't match a
- // known pattern. Go ahead and retry.
- default:
- // This case includes actual http transport errors. Go ahead and retry.
- }
- if giveUp {
- glog.Errorf("Unable to write event '%#v': '%v' (will not retry!)", event, err)
+ tries++
+ if tries >= maxTriesPerEvent {
+ glog.Errorf("Unable to write event '%#v' (retry limit exceeded!)", event)
break
}
- if try >= 3 {
- glog.Errorf("Unable to write event '%#v': '%v' (retry limit exceeded!)", event, err)
- break
+ // Randomize the first sleep so that various clients won't all be
+ // synced up if the master goes down.
+ if tries == 1 {
+ time.Sleep(time.Duration(float64(sleepDuration) * randGen.Float64()))
+ } else {
+ time.Sleep(sleepDuration)
}
- glog.Errorf("Unable to write event: '%v' (will retry in 1 second)", err)
- time.Sleep(retryEventSleep)
}
})
}
+// recordEvent attempts to write event to recorder. It returns true if the event
+// was successfully recorded or discarded, false if it should be retried.
+func recordEvent(recorder EventRecorder, event *api.Event) bool {
+ _, err := recorder.Create(event)
+ if err == nil {
+ return true
+ }
+ // If we can't contact the server, then hold everything while we keep trying.
+ // Otherwise, something about the event is malformed and we should abandon it.
+ giveUp := false
+ switch err.(type) {
+ case *client.RequestConstructionError:
+ // We will construct the request the same next time, so don't keep trying.
+ giveUp = true
+ case *errors.StatusError:
+ // This indicates that the server understood and rejected our request.
+ giveUp = true
+ case *errors.UnexpectedObjectError:
+ // We don't expect this; it implies the server's response didn't match a
+ // known pattern. Go ahead and retry.
+ default:
+ // This case includes actual http transport errors. Go ahead and retry.
+ }
+ if giveUp {
+ glog.Errorf("Unable to write event '%#v': '%v' (will not retry!)", event, err)
+ return true
+ }
+ glog.Errorf("Unable to write event: '%v' (may retry after sleeping)", err)
+ return false
+}
+
// StartLogging just logs local events, using the given logging function. The
// return value can be ignored or used to stop logging, if desired.
func StartLogging(logf func(format string, args ...interface{})) watch.Interface {
return GetEvents(func(e *api.Event) {
- logf("Event(%#v): status: '%v', reason: '%v' %v", e.InvolvedObject, e.Condition, e.Reason, e.Message)
+ logf("Event(%#v): reason: '%v' %v", e.InvolvedObject, e.Reason, e.Message)
})
}
@@ -120,24 +141,23 @@ func GetEvents(f func(*api.Event)) watch.Interface {
return w
}
-const queueLen = 1000
+const maxQueuedEvents = 1000
-var events = watch.NewBroadcaster(queueLen)
+var events = watch.NewBroadcaster(maxQueuedEvents, watch.DropIfChannelFull)
// Event constructs an event from the given information and puts it in the queue for sending.
// 'object' is the object this event is about. Event will make a reference-- or you may also
// pass a reference to the object directly.
-// 'condition' is the new condition of the object. 'reason' is the reason it now has this status.
-// Both 'condition' and 'reason' should be short and unique; they will be used to automate
-// handling of events, so imagine people writing switch statements to handle them. You want to
-// make that easy.
+// 'reason' is the reason this event is generated. 'reason' should be short and unique; it will
+// be used to automate handling of events, so imagine people writing switch statements to
+// handle them. You want to make that easy.
// 'message' is intended to be human readable.
//
// The resulting event will be created in the same namespace as the reference object.
-func Event(object runtime.Object, condition, reason, message string) {
+func Event(object runtime.Object, reason, message string) {
ref, err := api.GetReference(object)
if err != nil {
- glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v' '%v'", object, err, condition, reason, message)
+ glog.Errorf("Could not construct reference to: '%#v' due to: '%v'. Will not report event: '%v' '%v'", object, err, reason, message)
return
}
t := util.Now()
@@ -148,7 +168,6 @@ func Event(object runtime.Object, condition, reason, message string) {
Namespace: ref.Namespace,
},
InvolvedObject: *ref,
- Condition: condition,
Reason: reason,
Message: message,
Timestamp: t,
@@ -158,6 +177,6 @@ func Event(object runtime.Object, condition, reason, message string) {
}
// Eventf is just like Event, but with Sprintf for the message field.
-func Eventf(object runtime.Object, status, reason, messageFmt string, args ...interface{}) {
- Event(object, status, reason, fmt.Sprintf(messageFmt, args...))
+func Eventf(object runtime.Object, reason, messageFmt string, args ...interface{}) {
+ Event(object, reason, fmt.Sprintf(messageFmt, args...))
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event_test.go
index 0d64ebaa7a64..ff317376942c 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/record/event_test.go
@@ -19,9 +19,9 @@ package record
import (
"fmt"
"reflect"
+ "strconv"
"strings"
"testing"
- "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
@@ -31,7 +31,8 @@ import (
)
func init() {
- retryEventSleep = 1 * time.Microsecond
+ // Don't bother sleeping between retries.
+ sleepDuration = 0
}
type testEventRecorder struct {
@@ -64,16 +65,15 @@ func TestEventf(t *testing.T) {
t.Fatal(err)
}
table := []struct {
- obj runtime.Object
- status, reason string
- messageFmt string
- elements []interface{}
- expect *api.Event
- expectLog string
+ obj runtime.Object
+ reason string
+ messageFmt string
+ elements []interface{}
+ expect *api.Event
+ expectLog string
}{
{
obj: testRef,
- status: "Running",
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@@ -90,16 +90,14 @@ func TestEventf(t *testing.T) {
APIVersion: "v1beta1",
FieldPath: "desiredState.manifest.containers[2]",
},
- Condition: "Running",
- Reason: "Started",
- Message: "some verbose message: 1",
- Source: api.EventSource{Component: "eventTest"},
+ Reason: "Started",
+ Message: "some verbose message: 1",
+ Source: api.EventSource{Component: "eventTest"},
},
- expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:"desiredState.manifest.containers[2]"}): status: 'Running', reason: 'Started' some verbose message: 1`,
+ expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:"desiredState.manifest.containers[2]"}): reason: 'Started' some verbose message: 1`,
},
{
obj: testPod,
- status: "Running",
reason: "Started",
messageFmt: "some verbose message: %v",
elements: []interface{}{1},
@@ -115,12 +113,11 @@ func TestEventf(t *testing.T) {
UID: "bar",
APIVersion: "v1beta1",
},
- Condition: "Running",
- Reason: "Started",
- Message: "some verbose message: 1",
- Source: api.EventSource{Component: "eventTest"},
+ Reason: "Started",
+ Message: "some verbose message: 1",
+ Source: api.EventSource{Component: "eventTest"},
},
- expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:""}): status: 'Running', reason: 'Started' some verbose message: 1`,
+ expectLog: `Event(api.ObjectReference{Kind:"Pod", Namespace:"baz", Name:"foo", UID:"bar", APIVersion:"v1beta1", ResourceVersion:"", FieldPath:""}): reason: 'Started' some verbose message: 1`,
},
}
@@ -155,7 +152,7 @@ func TestEventf(t *testing.T) {
called <- struct{}{}
})
- Eventf(item.obj, item.status, item.reason, item.messageFmt, item.elements...)
+ Eventf(item.obj, item.reason, item.messageFmt, item.elements...)
<-called
<-called
@@ -192,12 +189,12 @@ func TestWriteEventError(t *testing.T) {
},
"retry1": {
timesToSendError: 1000,
- attemptsWanted: 3,
+ attemptsWanted: 12,
err: &errors.UnexpectedObjectError{},
},
"retry2": {
timesToSendError: 1000,
- attemptsWanted: 3,
+ attemptsWanted: 12,
err: fmt.Errorf("A weird error"),
},
"succeedEventually": {
@@ -231,9 +228,9 @@ func TestWriteEventError(t *testing.T) {
).Stop()
for caseName := range table {
- Event(ref, "Status", "Reason", caseName)
+ Event(ref, "Reason", caseName)
}
- Event(ref, "Status", "Reason", "finished")
+ Event(ref, "Reason", "finished")
<-done
for caseName, item := range table {
@@ -242,3 +239,54 @@ func TestWriteEventError(t *testing.T) {
}
}
}
+
+func TestLotsOfEvents(t *testing.T) {
+ recorderCalled := make(chan struct{})
+ loggerCalled := make(chan struct{})
+
+ // Fail each event a few times to ensure there's some load on the tested code.
+ var counts [1000]int
+ testEvents := testEventRecorder{
+ OnEvent: func(event *api.Event) (*api.Event, error) {
+ num, err := strconv.Atoi(event.Message)
+ if err != nil {
+ t.Error(err)
+ return event, nil
+ }
+ counts[num]++
+ if counts[num] < 5 {
+ return nil, fmt.Errorf("fake error")
+ }
+ recorderCalled <- struct{}{}
+ return event, nil
+ },
+ }
+ recorder := StartRecording(&testEvents, api.EventSource{Component: "eventTest"})
+ logger := StartLogging(func(formatter string, args ...interface{}) {
+ loggerCalled <- struct{}{}
+ })
+
+ ref := &api.ObjectReference{
+ Kind: "Pod",
+ Name: "foo",
+ Namespace: "baz",
+ UID: "bar",
+ APIVersion: "v1beta1",
+ }
+ for i := 0; i < maxQueuedEvents; i++ {
+ go Event(ref, "Reason", strconv.Itoa(i))
+ }
+ // Make sure no events were dropped by either of the listeners.
+ for i := 0; i < maxQueuedEvents; i++ {
+ <-recorderCalled
+ <-loggerCalled
+ }
+ // Make sure that every event was attempted 5 times
+ for i := 0; i < maxQueuedEvents; i++ {
+ if counts[i] < 5 {
+ t.Errorf("Only attempted to record event '%d' %d times.", i, counts[i])
+ }
+ }
+ recorder.Stop()
+ logger.Stop()
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/restclient_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/restclient_test.go
index d0e2e1ebb6fd..073e1c4bb8cf 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/restclient_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/restclient_test.go
@@ -102,7 +102,7 @@ func TestSetDefaults(t *testing.T) {
case err != nil:
continue
}
- if *val != testCase.After {
+ if !reflect.DeepEqual(*val, testCase.After) {
t.Errorf("unexpected result object: %#v", val)
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce/gce.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce/gce.go
index 5916918f9cbe..65e62f6d915d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce/gce.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce/gce.go
@@ -32,10 +32,11 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/resource"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
- "code.google.com/p/goauth2/compute/serviceaccount"
compute "code.google.com/p/google-api-go-client/compute/v1"
container "code.google.com/p/google-api-go-client/container/v1beta1"
"github.com/golang/glog"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
)
// GCECloud is an implementation of Interface, TCPLoadBalancer and Instances for Google Compute Engine.
@@ -109,10 +110,7 @@ func newGCECloud() (*GCECloud, error) {
if err != nil {
return nil, err
}
- client, err := serviceaccount.NewClient(&serviceaccount.Options{})
- if err != nil {
- return nil, err
- }
+ client := oauth2.NewClient(oauth2.NoContext, google.ComputeTokenSource(""))
svc, err := compute.New(client)
if err != nil {
return nil, err
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication_controller.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication_controller.go
index 49c026b86813..b347d94c72c8 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication_controller.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/controller/replication_controller.go
@@ -21,9 +21,11 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
"github.com/golang/glog"
)
@@ -122,6 +124,10 @@ func (rm *ReplicationManager) watchControllers(resourceVersion *string) {
// that called us call us again.
return
}
+ if event.Type == watch.Error {
+ glog.Errorf("error from watch during sync: %v", errors.FromObject(event.Object))
+ continue
+ }
glog.V(4).Infof("Got watch: %#v", event)
rc, ok := event.Object.(*api.ReplicationController)
if !ok {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter.go
index 753d95c74f27..1e2c2c42ab22 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter.go
@@ -394,20 +394,29 @@ func (c *Converter) convert(sv, dv reflect.Value, scope *scope) error {
func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
dt, st := dv.Type(), sv.Type()
+ if !dv.CanSet() {
+ return scope.error("Cannot set dest. (Tried to deep copy something with unexported fields?)")
+ }
+
if !scope.flags.IsSet(AllowDifferentFieldTypeNames) && c.NameFunc(dt) != c.NameFunc(st) {
return scope.error(
"type names don't match (%v, %v), and no conversion 'func (%v, %v) error' registered.",
c.NameFunc(st), c.NameFunc(dt), st, dt)
}
- // This should handle all simple types.
- if st.AssignableTo(dt) {
- dv.Set(sv)
- return nil
- }
- if st.ConvertibleTo(dt) {
- dv.Set(sv.Convert(dt))
- return nil
+ switch st.Kind() {
+ case reflect.Map, reflect.Ptr, reflect.Slice, reflect.Interface, reflect.Struct:
+ // Don't copy these via assignment/conversion!
+ default:
+ // This should handle all simple types.
+ if st.AssignableTo(dt) {
+ dv.Set(sv)
+ return nil
+ }
+ if st.ConvertibleTo(dt) {
+ dv.Set(sv.Convert(dt))
+ return nil
+ }
}
if c.Debug != nil {
@@ -466,6 +475,18 @@ func (c *Converter) defaultConvert(sv, dv reflect.Value, scope *scope) error {
}
dv.SetMapIndex(dk, dkv)
}
+ case reflect.Interface:
+ if sv.IsNil() {
+ // Don't copy a nil interface!
+ dv.Set(reflect.Zero(dt))
+ return nil
+ }
+ tmpdv := reflect.New(sv.Elem().Type()).Elem()
+ if err := c.convert(sv.Elem(), tmpdv, scope); err != nil {
+ return err
+ }
+ dv.Set(reflect.ValueOf(tmpdv.Interface()))
+ return nil
default:
return scope.error("couldn't copy '%v' into '%v'; didn't understand types", st, dt)
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter_test.go
index d1b3e2ba0341..2ca555aac17e 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/converter_test.go
@@ -66,6 +66,46 @@ func TestConverter_DefaultConvert(t *testing.T) {
}
}
+func TestConverter_DeepCopy(t *testing.T) {
+ type A struct {
+ Foo *string
+ Bar []string
+ Baz interface{}
+ Qux map[string]string
+ }
+ c := NewConverter()
+ c.Debug = t
+
+ foo, baz := "foo", "baz"
+ x := A{
+ Foo: &foo,
+ Bar: []string{"bar"},
+ Baz: &baz,
+ Qux: map[string]string{"qux": "qux"},
+ }
+ y := A{}
+
+ if err := c.Convert(&x, &y, 0, nil); err != nil {
+ t.Fatalf("unexpected error %v", err)
+ }
+ *x.Foo = "foo2"
+ x.Bar[0] = "bar2"
+ *x.Baz.(*string) = "baz2"
+ x.Qux["qux"] = "qux2"
+ if e, a := *x.Foo, *y.Foo; e == a {
+ t.Errorf("expected difference between %v and %v", e, a)
+ }
+ if e, a := x.Bar, y.Bar; reflect.DeepEqual(e, a) {
+ t.Errorf("expected difference between %v and %v", e, a)
+ }
+ if e, a := *x.Baz.(*string), *y.Baz.(*string); e == a {
+ t.Errorf("expected difference between %v and %v", e, a)
+ }
+ if e, a := x.Qux, y.Qux; reflect.DeepEqual(e, a) {
+ t.Errorf("expected difference between %v and %v", e, a)
+ }
+}
+
func TestConverter_CallsRegisteredFunctions(t *testing.T) {
type A struct {
Foo string
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/scheme_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/scheme_test.go
index 3d2c3bbc5cfb..40bf1e9c81fb 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/scheme_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/conversion/scheme_test.go
@@ -18,7 +18,6 @@ package conversion
import (
"encoding/json"
- "flag"
"fmt"
"reflect"
"strings"
@@ -28,6 +27,7 @@ import (
"github.com/ghodss/yaml"
"github.com/google/gofuzz"
+ flag "github.com/spf13/pflag"
)
var fuzzIters = flag.Int("fuzz_iters", 50, "How many fuzzing iterations to do.")
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider/config.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider/config.go
index 03999093f7e6..b44750409d39 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider/config.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider/config.go
@@ -51,7 +51,6 @@ func ReadDockerConfigFile() (cfg DockerConfig, err error) {
if err != nil {
glog.Errorf("while trying to canonicalize %s: %v", dockerConfigFileLocation, err)
}
- absDockerConfigFileLocation, err = filepath.Abs(dockerConfigFileLocation)
glog.V(2).Infof("looking for .dockercfg at %s", absDockerConfigFileLocation)
contents, err := ioutil.ReadFile(absDockerConfigFileLocation)
if os.IsNotExist(err) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec.go
index 579875ac9701..9f0ad4434c12 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec.go
@@ -21,13 +21,14 @@ import (
"strings"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/golang/glog"
)
const defaultHealthyOutput = "ok"
type CommandRunner interface {
- RunInContainer(podFullName, uuid, containerName string, cmd []string) ([]byte, error)
+ RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
}
type ExecHealthChecker struct {
@@ -38,11 +39,11 @@ func NewExecHealthChecker(runner CommandRunner) HealthChecker {
return &ExecHealthChecker{runner}
}
-func (e *ExecHealthChecker) HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (Status, error) {
+func (e *ExecHealthChecker) HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (Status, error) {
if container.LivenessProbe.Exec == nil {
return Unknown, fmt.Errorf("missing exec parameters")
}
- data, err := e.runner.RunInContainer(podFullName, podUUID, container.Name, container.LivenessProbe.Exec.Command)
+ data, err := e.runner.RunInContainer(podFullName, podUID, container.Name, container.LivenessProbe.Exec.Command)
glog.V(1).Infof("container %s health check response: %s", podFullName, string(data))
if err != nil {
return Unknown, err
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec_test.go
index 462379a741c1..a9aa2199c894 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/exec_test.go
@@ -22,6 +22,7 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
type FakeExec struct {
@@ -30,7 +31,7 @@ type FakeExec struct {
err error
}
-func (f *FakeExec) RunInContainer(podFullName, uuid, container string, cmd []string) ([]byte, error) {
+func (f *FakeExec) RunInContainer(podFullName string, uid types.UID, container string, cmd []string) ([]byte, error) {
f.cmd = cmd
return f.out, f.err
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/health.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/health.go
index 61260b30ceec..64233e166ee8 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/health.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/health.go
@@ -20,6 +20,7 @@ import (
"sync"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/golang/glog"
)
@@ -35,7 +36,7 @@ const (
// HealthChecker defines an abstract interface for checking container health.
type HealthChecker interface {
- HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (Status, error)
+ HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (Status, error)
CanCheck(probe *api.LivenessProbe) bool
}
@@ -78,13 +79,13 @@ func (m *muxHealthChecker) findCheckerFor(probe *api.LivenessProbe) HealthChecke
// HealthCheck delegates the health-checking of the container to one of the bundled implementations.
// If there is no health checker that can check container it returns Unknown, nil.
-func (m *muxHealthChecker) HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (Status, error) {
+func (m *muxHealthChecker) HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (Status, error) {
checker := m.findCheckerFor(container.LivenessProbe)
if checker == nil {
glog.Warningf("Failed to find health checker for %s %+v", container.Name, container.LivenessProbe)
return Unknown, nil
}
- return checker.HealthCheck(podFullName, podUUID, status, container)
+ return checker.HealthCheck(podFullName, podUID, status, container)
}
func (m *muxHealthChecker) CanCheck(probe *api.LivenessProbe) bool {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/http.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/http.go
index 3e7998132c17..f5b447938871 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/http.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/http.go
@@ -24,6 +24,7 @@ import (
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
@@ -105,7 +106,7 @@ func DoHTTPCheck(url string, client HTTPGetInterface) (Status, error) {
}
// HealthCheck checks if the container is healthy by trying sending HTTP Get requests to the container.
-func (h *HTTPHealthChecker) HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (Status, error) {
+func (h *HTTPHealthChecker) HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (Status, error) {
host, port, path, err := getURLParts(status, container)
if err != nil {
return Unknown, err
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/tcp.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/tcp.go
index 4d0cdc9305f8..039e82d140cd 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/tcp.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/health/tcp.go
@@ -22,6 +22,7 @@ import (
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
@@ -74,7 +75,7 @@ func DoTCPCheck(addr string) (Status, error) {
return Healthy, nil
}
-func (t *TCPHealthChecker) HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (Status, error) {
+func (t *TCPHealthChecker) HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (Status, error) {
host, port, err := getTCPAddrParts(status, container)
if err != nil {
return Unknown, err
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer.go
index 094e97f2c4b0..08d3d7be30ff 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer.go
@@ -143,7 +143,7 @@ var replicationControllerColumns = []string{"Name", "Image(s)", "Selector", "Rep
var serviceColumns = []string{"Name", "Labels", "Selector", "IP", "Port"}
var minionColumns = []string{"Minion identifier", "Labels"}
var statusColumns = []string{"Status"}
-var eventColumns = []string{"Name", "Kind", "Condition", "Reason", "Message"}
+var eventColumns = []string{"Name", "Kind", "Reason", "Message"}
// addDefaultHandlers adds print handlers for default Kubernetes types.
func (h *HumanReadablePrinter) addDefaultHandlers() {
@@ -269,10 +269,9 @@ func printStatus(status *api.Status, w io.Writer) error {
func printEvent(event *api.Event, w io.Writer) error {
_, err := fmt.Fprintf(
- w, "%s\t%s\t%s\t%s\t%s\n",
+ w, "%s\t%s\t%s\t%s\n",
event.InvolvedObject.Name,
event.InvolvedObject.Kind,
- event.Condition,
event.Reason,
event.Message,
)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer_test.go
index d3144b248d9c..19db87de636f 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubecfg/resource_printer_test.go
@@ -77,7 +77,7 @@ func TestYAMLPrinterPrint(t *testing.T) {
var objOut api.Pod
err = yaml.Unmarshal([]byte(buf.String()), &objOut)
if err != nil {
- t.Errorf("Unexpeted error: %#v", err)
+ t.Errorf("Unexpected error: %#v", err)
}
if !reflect.DeepEqual(obj, &objOut) {
t.Errorf("Unexpected inequality: %#v vs %#v", obj, &objOut)
@@ -100,7 +100,7 @@ func TestIdentityPrinter(t *testing.T) {
printer.PrintObj(obj, buff)
objOut, err := latest.Codec.Decode([]byte(buff.String()))
if err != nil {
- t.Errorf("Unexpeted error: %#v", err)
+ t.Errorf("Unexpected error: %#v", err)
}
if !reflect.DeepEqual(obj, objOut) {
t.Errorf("Unexpected inequality: %#v vs %#v", obj, objOut)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd.go
index b4b400d3e655..2a94c6efc35a 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd.go
@@ -62,8 +62,14 @@ type Factory struct {
Describer func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Describer, error)
// Returns a Printer for formatting objects of the given type or an error.
Printer func(cmd *cobra.Command, mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error)
+ // Returns a Resizer for changing the size of the specified RESTMapping type or an error
+ Resizer func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Resizer, error)
+ // Returns a Reaper for gracefully shutting down resources.
+ Reaper func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Reaper, error)
// Returns a schema that can validate objects stored on disk.
Validator func(*cobra.Command) (validation.Schema, error)
+ // Returns the default namespace to use in cases where no other namespace is specified
+ DefaultNamespace func(cmd *cobra.Command) (string, error)
}
// NewFactory creates a factory with the default Kubernetes resources defined
@@ -71,6 +77,7 @@ type Factory struct {
// if optionalClientConfig is not nil, then this factory will make use of it.
func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
mapper := kubectl.ShortcutExpander{latest.RESTMapper}
+
flags := pflag.NewFlagSet("", pflag.ContinueOnError)
clientConfig := optionalClientConfig
@@ -88,8 +95,11 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
flags: flags,
Object: func(cmd *cobra.Command) (meta.RESTMapper, runtime.ObjectTyper) {
- version := GetFlagString(cmd, "api-version")
- return kubectl.OutputVersionMapper{mapper, version}, api.Scheme
+ cfg, err := clientConfig.ClientConfig()
+ checkErr(err)
+ cmdApiVersion := cfg.Version
+
+ return kubectl.OutputVersionMapper{mapper, cmdApiVersion}, api.Scheme
},
Client: func(cmd *cobra.Command) (*client.Client, error) {
return clients.ClientForVersion("")
@@ -118,6 +128,20 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
Printer: func(cmd *cobra.Command, mapping *meta.RESTMapping, noHeaders bool) (kubectl.ResourcePrinter, error) {
return kubectl.NewHumanReadablePrinter(noHeaders), nil
},
+ Resizer: func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Resizer, error) {
+ client, err := clients.ClientForVersion(mapping.APIVersion)
+ if err != nil {
+ return nil, err
+ }
+ return kubectl.ResizerFor(mapping.Kind, client)
+ },
+ Reaper: func(cmd *cobra.Command, mapping *meta.RESTMapping) (kubectl.Reaper, error) {
+ client, err := clients.ClientForVersion(mapping.APIVersion)
+ if err != nil {
+ return nil, err
+ }
+ return kubectl.ReaperFor(mapping.Kind, client)
+ },
Validator: func(cmd *cobra.Command) (validation.Schema, error) {
if GetFlagBool(cmd, "validate") {
client, err := clients.ClientForVersion("")
@@ -128,6 +152,9 @@ func NewFactory(optionalClientConfig clientcmd.ClientConfig) *Factory {
}
return validation.NullSchema{}, nil
},
+ DefaultNamespace: func(cmd *cobra.Command) (string, error) {
+ return clientConfig.Namespace()
+ },
}
}
@@ -136,6 +163,10 @@ func (f *Factory) BindFlags(flags *pflag.FlagSet) {
// any flags defined by external projects (not part of pflags)
util.AddAllFlagsToPFlagSet(flags)
+ // This is necessary as github.com/spf13/cobra doesn't support "global"
+ // pflags currently. See https://github.com/spf13/cobra/issues/44.
+ util.AddPFlagSetToPFlagSet(pflag.CommandLine, flags)
+
if f.flags != nil {
f.flags.VisitAll(func(flag *pflag.Flag) {
flags.AddFlag(flag)
@@ -147,8 +178,6 @@ func (f *Factory) BindFlags(flags *pflag.FlagSet) {
// TODO Add a verbose flag that turns on glog logging. Probably need a way
// to do that automatically for every subcommand.
flags.BoolVar(&f.clients.matchVersion, FlagMatchBinaryVersion, false, "Require server version to match client version")
- flags.String("ns-path", os.Getenv("HOME")+"/.kubernetes_ns", "Path to the namespace info file that holds the namespace context to use for CLI requests.")
- flags.StringP("namespace", "n", "", "If present, the namespace scope for this CLI request.")
flags.Bool("validate", false, "If true, use a schema to validate the input before sending it")
}
@@ -179,8 +208,10 @@ Find more information at https://github.com/GoogleCloudPlatform/kubernetes.`,
cmds.AddCommand(NewCmdNamespace(out))
cmds.AddCommand(f.NewCmdLog(out))
cmds.AddCommand(f.NewCmdRollingUpdate(out))
+ cmds.AddCommand(f.NewCmdResize(out))
cmds.AddCommand(f.NewCmdRunContainer(out))
+ cmds.AddCommand(f.NewCmdStop(out))
return cmds
}
@@ -223,7 +254,12 @@ func DefaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
flags.StringVar(&loadingRules.CommandLinePath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
overrides := &clientcmd.ConfigOverrides{}
- clientcmd.BindOverrideFlags(overrides, flags, clientcmd.RecommendedConfigOverrideFlags(""))
+ flagNames := clientcmd.RecommendedConfigOverrideFlags("")
+ // short flagnames are disabled by default. These are here for compatibility with existing scripts
+ flagNames.AuthOverrideFlags.AuthPathShort = "a"
+ flagNames.ClusterOverrideFlags.APIServerShort = "s"
+
+ clientcmd.BindOverrideFlags(overrides, flags, flagNames)
clientConfig := clientcmd.NewInteractiveDeferredLoadingClientConfig(loadingRules, overrides, os.Stdin)
return clientConfig
@@ -252,38 +288,6 @@ func runHelp(cmd *cobra.Command, args []string) {
cmd.Help()
}
-// GetKubeNamespace returns the value of the namespace a
-// user provided on the command line or use the default
-// namespace.
-func GetKubeNamespace(cmd *cobra.Command) string {
- result := api.NamespaceDefault
- if ns := GetFlagString(cmd, "namespace"); len(ns) > 0 {
- result = ns
- glog.V(2).Infof("Using namespace from -ns flag")
- } else {
- nsPath := GetFlagString(cmd, "ns-path")
- nsInfo, err := kubectl.LoadNamespaceInfo(nsPath)
- if err != nil {
- glog.Fatalf("Error loading current namespace: %v", err)
- }
- result = nsInfo.Namespace
- }
- glog.V(2).Infof("Using namespace %s", result)
- return result
-}
-
-// GetExplicitKubeNamespace returns the value of the namespace a
-// user explicitly provided on the command line, or false if no
-// such namespace was specified.
-func GetExplicitKubeNamespace(cmd *cobra.Command) (string, bool) {
- if ns := GetFlagString(cmd, "namespace"); len(ns) > 0 {
- return ns, true
- }
- // TODO: determine when --ns-path is set but equal to the default
- // value and return its value and true.
- return "", false
-}
-
type clientSwaggerSchema struct {
c *client.Client
t runtime.ObjectTyper
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd_test.go
index 317c46c9174c..50dc9315a376 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/cmd_test.go
@@ -26,6 +26,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
. "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource"
@@ -92,13 +93,15 @@ func (t *testDescriber) Describe(namespace, name string) (output string, err err
}
type testFactory struct {
- Mapper meta.RESTMapper
- Typer runtime.ObjectTyper
- Client kubectl.RESTClient
- Describer kubectl.Describer
- Printer kubectl.ResourcePrinter
- Validator validation.Schema
- Err error
+ Mapper meta.RESTMapper
+ Typer runtime.ObjectTyper
+ Client kubectl.RESTClient
+ Describer kubectl.Describer
+ Printer kubectl.ResourcePrinter
+ Validator validation.Schema
+ Namespace string
+ ClientConfig *client.Config
+ Err error
}
func NewTestFactory() (*Factory, *testFactory, runtime.Codec) {
@@ -124,6 +127,12 @@ func NewTestFactory() (*Factory, *testFactory, runtime.Codec) {
Validator: func(cmd *cobra.Command) (validation.Schema, error) {
return t.Validator, t.Err
},
+ DefaultNamespace: func(cmd *cobra.Command) (string, error) {
+ return t.Namespace, t.Err
+ },
+ ClientConfig: func(cmd *cobra.Command) (*client.Config, error) {
+ return t.ClientConfig, t.Err
+ },
}, t, codec
}
@@ -147,6 +156,12 @@ func NewAPIFactory() (*Factory, *testFactory, runtime.Codec) {
Validator: func(cmd *cobra.Command) (validation.Schema, error) {
return t.Validator, t.Err
},
+ DefaultNamespace: func(cmd *cobra.Command) (string, error) {
+ return t.Namespace, t.Err
+ },
+ ClientConfig: func(cmd *cobra.Command) (*client.Config, error) {
+ return t.ClientConfig, t.Err
+ },
}, t, latest.Codec
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/config/config.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/config/config.go
index e73fb8416353..4c6bf23a69cb 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/config/config.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/config/config.go
@@ -47,7 +47,7 @@ func NewCmdConfig(out io.Writer) *cobra.Command {
}
// file paths are common to all sub commands
- cmd.PersistentFlags().BoolVar(&pathOptions.local, "local", true, "use the .kubeconfig in the currect directory")
+ cmd.PersistentFlags().BoolVar(&pathOptions.local, "local", true, "use the .kubeconfig in the current directory")
cmd.PersistentFlags().BoolVar(&pathOptions.global, "global", false, "use the .kubeconfig from "+os.Getenv("HOME"))
cmd.PersistentFlags().StringVar(&pathOptions.specifiedFile, "kubeconfig", "", "use a particular .kubeconfig file")
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create.go
index ea97f8ae60eb..1038560eed69 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create.go
@@ -47,10 +47,13 @@ Examples:
schema, err := f.Validator(cmd)
checkErr(err)
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
mapper, typer := f.Object(cmd)
r := resource.NewBuilder(mapper, typer, ClientMapperForCommand(cmd, f)).
ContinueOnError().
- NamespaceParam(GetKubeNamespace(cmd)).RequireNamespace().
+ NamespaceParam(cmdNamespace).RequireNamespace().
FilenameParam(flags.Filenames...).
Flatten().
Do()
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create_test.go
index a8b6fa0289df..2253b9f74b00 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/create_test.go
@@ -41,10 +41,10 @@ func TestCreateObject(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdCreate(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Run(cmd, []string{})
@@ -73,10 +73,10 @@ func TestCreateMultipleObject(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdCreate(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.json")
cmd.Run(cmd, []string{})
@@ -107,10 +107,10 @@ func TestCreateDirectory(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdCreate(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook")
cmd.Run(cmd, []string{})
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete.go
index a5bbf75291d8..94704056388d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete.go
@@ -20,7 +20,6 @@ import (
"fmt"
"io"
- "github.com/golang/glog"
"github.com/spf13/cobra"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
@@ -59,10 +58,13 @@ Examples:
$ kubectl delete pod 1234-56-7890-234234-456456
`,
Run: func(cmd *cobra.Command, args []string) {
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
mapper, typer := f.Object(cmd)
r := resource.NewBuilder(mapper, typer, ClientMapperForCommand(cmd, f)).
ContinueOnError().
- NamespaceParam(GetKubeNamespace(cmd)).DefaultNamespace().
+ NamespaceParam(cmdNamespace).DefaultNamespace().
FilenameParam(flags.Filenames...).
SelectorParam(GetFlagString(cmd, "selector")).
ResourceTypeOrNameArgs(args...).
@@ -70,7 +72,7 @@ Examples:
Do()
found := 0
- r.IgnoreErrors(errors.IsNotFound).Visit(func(r *resource.Info) error {
+ err = r.IgnoreErrors(errors.IsNotFound).Visit(func(r *resource.Info) error {
found++
if err := resource.NewHelper(r.Client, r.Mapping).Delete(r.Namespace, r.Name); err != nil {
return err
@@ -78,8 +80,9 @@ Examples:
fmt.Fprintf(out, "%s\n", r.Name)
return nil
})
+ checkErr(err)
if found == 0 {
- glog.V(2).Infof("No resource(s) found")
+ fmt.Fprintf(cmd.Out(), "No resources found\n")
}
},
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete_test.go
index 3447895c5950..3885d2f9fdd0 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/delete_test.go
@@ -22,6 +22,7 @@ import (
"strings"
"testing"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
)
@@ -42,10 +43,10 @@ func TestDeleteObject(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Run(cmd, []string{})
@@ -70,10 +71,10 @@ func TestDeleteObjectIgnoreNotFound(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Run(cmd, []string{})
@@ -82,6 +83,37 @@ func TestDeleteObjectIgnoreNotFound(t *testing.T) {
}
}
+func TestDeleteNoObjects(t *testing.T) {
+ f, tf, codec := NewAPIFactory()
+ tf.Printer = &testPrinter{}
+ tf.Client = &client.FakeRESTClient{
+ Codec: codec,
+ Client: client.HTTPClientFunc(func(req *http.Request) (*http.Response, error) {
+ switch p, m := req.URL.Path, req.Method; {
+ case p == "/ns/test/pods" && m == "GET":
+ return &http.Response{StatusCode: 200, Body: objBody(codec, &api.PodList{})}, nil
+ default:
+ t.Fatalf("unexpected request: %#v\n%#v", req.URL, req)
+ return nil, nil
+ }
+ }),
+ }
+ tf.Namespace = "test"
+ buf := bytes.NewBuffer([]byte{})
+ stderr := bytes.NewBuffer([]byte{})
+
+ cmd := f.NewCmdDelete(buf)
+ cmd.SetOutput(stderr)
+ cmd.Run(cmd, []string{"pods"})
+
+ if buf.String() != "" {
+ t.Errorf("unexpected output: %s", buf.String())
+ }
+ if stderr.String() != "No resources found\n" {
+ t.Errorf("unexpected output: %s", stderr.String())
+ }
+}
+
func TestDeleteMultipleObject(t *testing.T) {
pods, svc := testData()
@@ -101,10 +133,10 @@ func TestDeleteMultipleObject(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.json")
cmd.Run(cmd, []string{})
@@ -133,10 +165,10 @@ func TestDeleteMultipleObjectIgnoreMissing(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook/redis-master.json")
cmd.Flags().Set("filename", "../../../examples/guestbook/frontend-service.json")
cmd.Run(cmd, []string{})
@@ -167,10 +199,10 @@ func TestDeleteDirectory(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("filename", "../../../examples/guestbook")
cmd.Run(cmd, []string{})
@@ -208,10 +240,10 @@ func TestDeleteMultipleSelector(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDelete(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("selector", "a=b")
cmd.Run(cmd, []string{"pods,services"})
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe.go
index e5d27984ab2b..7950d57cb732 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe.go
@@ -32,8 +32,11 @@ func (f *Factory) NewCmdDescribe(out io.Writer) *cobra.Command {
This command joins many API calls together to form a detailed description of a
given resource.`,
Run: func(cmd *cobra.Command, args []string) {
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
mapper, _ := f.Object(cmd)
- mapping, namespace, name := ResourceFromArgs(cmd, args, mapper)
+ mapping, namespace, name := ResourceFromArgs(cmd, args, mapper, cmdNamespace)
describer, err := f.Describer(cmd, mapping)
checkErr(err)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe_test.go
index 914aa2097ddf..994a38e0f809 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/describe_test.go
@@ -34,14 +34,13 @@ func TestDescribeUnknownSchemaObject(t *testing.T) {
Codec: codec,
Resp: &http.Response{StatusCode: 200, Body: objBody(codec, &internalType{Name: "foo"})},
}
+ tf.Namespace = "non-default"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdDescribe(buf)
- cmd.Flags().String("api-version", "default", "")
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"type", "foo"})
- if d.Name != "foo" || d.Namespace != "test" {
+ if d.Name != "foo" || d.Namespace != "non-default" {
t.Errorf("unexpected describer: %#v", d)
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/factory_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/factory_test.go
new file mode 100644
index 000000000000..d3a9efc41767
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/factory_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
+ clientcmdapi "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/api"
+)
+
+func TestNewFactoryDefaultFlagBindings(t *testing.T) {
+ factory := NewFactory(nil)
+
+ if !factory.flags.HasFlags() {
+ t.Errorf("Expected flags, but didn't get any")
+ }
+}
+
+func TestNewFactoryNoFlagBindings(t *testing.T) {
+ clientConfig := clientcmd.NewDefaultClientConfig(*clientcmdapi.NewConfig(), &clientcmd.ConfigOverrides{})
+ factory := NewFactory(clientConfig)
+
+ if factory.flags.HasFlags() {
+ t.Errorf("Expected zero flags, but got %v", factory.flags)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get.go
index 01989534e829..273fc1d86880 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get.go
@@ -62,7 +62,7 @@ Examples:
AddPrinterFlags(cmd)
cmd.Flags().StringP("selector", "l", "", "Selector (label query) to filter on")
cmd.Flags().BoolP("watch", "w", false, "After listing/getting the requested object, watch for changes.")
- cmd.Flags().Bool("watch-only", false, "Watch for changes to the requseted object(s), without listing/getting first.")
+ cmd.Flags().Bool("watch-only", false, "Watch for changes to the requested object(s), without listing/getting first.")
return cmd
}
@@ -73,11 +73,14 @@ func RunGet(f *Factory, out io.Writer, cmd *cobra.Command, args []string) {
selector := GetFlagString(cmd, "selector")
mapper, typer := f.Object(cmd)
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
// handle watch separately since we cannot watch multiple resource types
isWatch, isWatchOnly := GetFlagBool(cmd, "watch"), GetFlagBool(cmd, "watch-only")
if isWatch || isWatchOnly {
r := resource.NewBuilder(mapper, typer, ClientMapperForCommand(cmd, f)).
- NamespaceParam(GetKubeNamespace(cmd)).DefaultNamespace().
+ NamespaceParam(cmdNamespace).DefaultNamespace().
SelectorParam(selector).
ResourceTypeOrNameArgs(args...).
SingleResourceType().
@@ -113,7 +116,7 @@ func RunGet(f *Factory, out io.Writer, cmd *cobra.Command, args []string) {
}
b := resource.NewBuilder(mapper, typer, ClientMapperForCommand(cmd, f)).
- NamespaceParam(GetKubeNamespace(cmd)).DefaultNamespace().
+ NamespaceParam(cmdNamespace).DefaultNamespace().
SelectorParam(selector).
ResourceTypeOrNameArgs(args...).
Latest()
@@ -121,8 +124,12 @@ func RunGet(f *Factory, out io.Writer, cmd *cobra.Command, args []string) {
checkErr(err)
if generic {
+ clientConfig, err := f.ClientConfig(cmd)
+ checkErr(err)
+ defaultVersion := clientConfig.Version
+
// the outermost object will be converted to the output-version
- version := outputVersion(cmd)
+ version := outputVersion(cmd, defaultVersion)
if len(version) == 0 {
// TODO: add a new ResourceBuilder mode for Object() that attempts to ensure the objects
// are in the appropriate version if one exists (and if not, use the best effort).
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get_test.go
index 9ea81450883d..6bf792d115c3 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/get_test.go
@@ -69,12 +69,12 @@ func TestGetUnknownSchemaObject(t *testing.T) {
Codec: codec,
Resp: &http.Response{StatusCode: 200, Body: objBody(codec, &internalType{Name: "foo"})},
}
+ tf.Namespace = "test"
+ tf.ClientConfig = &client.Config{Version: latest.Version}
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("api-version", "default", "")
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"type", "foo"})
expected := &internalType{Name: "foo"}
@@ -98,11 +98,11 @@ func TestGetSchemaObject(t *testing.T) {
Codec: codec,
Resp: &http.Response{StatusCode: 200, Body: objBody(codec, &api.ReplicationController{ObjectMeta: api.ObjectMeta{Name: "foo"}})},
}
+ tf.Namespace = "test"
+ tf.ClientConfig = &client.Config{Version: "v1beta3"}
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
- cmd.Flags().String("api-version", "v1beta3", "")
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"replicationcontrollers", "foo"})
if !strings.Contains(buf.String(), "\"foo\"") {
@@ -119,11 +119,11 @@ func TestGetObjects(t *testing.T) {
Codec: codec,
Resp: &http.Response{StatusCode: 200, Body: objBody(codec, &pods.Items[0])},
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"pods", "foo"})
expected := []runtime.Object{&pods.Items[0]}
@@ -145,11 +145,11 @@ func TestGetListObjects(t *testing.T) {
Codec: codec,
Resp: &http.Response{StatusCode: 200, Body: objBody(codec, pods)},
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"pods"})
expected := []runtime.Object{pods}
@@ -181,11 +181,11 @@ func TestGetMultipleTypeObjects(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Run(cmd, []string{"pods,services"})
expected := []runtime.Object{pods, svc}
@@ -217,12 +217,12 @@ func TestGetMultipleTypeObjectsAsList(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
+ tf.ClientConfig = &client.Config{Version: "v1beta1"}
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
- cmd.Flags().String("api-version", "v1beta1", "")
cmd.Flags().Set("output", "json")
cmd.Run(cmd, []string{"pods,services"})
@@ -269,11 +269,11 @@ func TestGetMultipleTypeObjectsWithSelector(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("selector", "a=b")
cmd.Run(cmd, []string{"pods,services"})
@@ -345,11 +345,11 @@ func TestWatchSelector(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("watch", "true")
cmd.Flags().Set("selector", "a=b")
@@ -384,11 +384,11 @@ func TestWatchResource(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("watch", "true")
cmd.Run(cmd, []string{"pods", "foo"})
@@ -422,11 +422,11 @@ func TestWatchOnlyResource(t *testing.T) {
}
}),
}
+ tf.Namespace = "test"
buf := bytes.NewBuffer([]byte{})
cmd := f.NewCmdGet(buf)
cmd.SetOutput(buf)
- cmd.Flags().String("namespace", "test", "")
cmd.Flags().Set("watch-only", "true")
cmd.Run(cmd, []string{"pods", "foo"})
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers.go
index 942c95e33e80..aa3c7053cdc8 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers.go
@@ -17,6 +17,7 @@ limitations under the License.
package cmd
import (
+ "encoding/json"
"fmt"
"io/ioutil"
"net/http"
@@ -26,7 +27,10 @@ import (
"strings"
"time"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/golang/glog"
+ "github.com/imdario/mergo"
"github.com/spf13/cobra"
)
@@ -171,3 +175,37 @@ func ReadConfigDataFromLocation(location string) ([]byte, error) {
return data, nil
}
}
+
+func Merge(dst runtime.Object, fragment, kind string) error {
+ // Ok, this is a little hairy, we'd rather not force the user to specify a kind for their JSON
+ // So we pull it into a map, add the Kind field, and then reserialize.
+ // We also pull the apiVersion for proper parsing
+ var intermediate interface{}
+ if err := json.Unmarshal([]byte(fragment), &intermediate); err != nil {
+ return err
+ }
+ dataMap, ok := intermediate.(map[string]interface{})
+ if !ok {
+ return fmt.Errorf("Expected a map, found something else: %s", fragment)
+ }
+ version, found := dataMap["apiVersion"]
+ if !found {
+ return fmt.Errorf("Inline JSON requires an apiVersion field")
+ }
+ versionString, ok := version.(string)
+ if !ok {
+ return fmt.Errorf("apiVersion must be a string")
+ }
+ codec := runtime.CodecFor(api.Scheme, versionString)
+
+ dataMap["kind"] = kind
+ data, err := json.Marshal(intermediate)
+ if err != nil {
+ return err
+ }
+ src, err := codec.Decode(data)
+ if err != nil {
+ return err
+ }
+ return mergo.Merge(dst, src)
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers_test.go
new file mode 100644
index 000000000000..57c229539ead
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/helpers_test.go
@@ -0,0 +1,108 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+)
+
+func TestMerge(t *testing.T) {
+ tests := []struct {
+ obj runtime.Object
+ fragment string
+ expected runtime.Object
+ expectErr bool
+ }{
+ {
+ obj: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ },
+ },
+ fragment: "{ \"apiVersion\": \"v1beta1\" }",
+ expected: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ },
+ },
+ },
+ {
+ obj: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ },
+ },
+ fragment: "{ \"apiVersion\": \"v1beta1\", \"id\": \"baz\", \"desiredState\": { \"host\": \"bar\" } }",
+ expected: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "baz",
+ },
+ Spec: api.PodSpec{
+ Host: "bar",
+ },
+ },
+ },
+ {
+ obj: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ },
+ },
+ fragment: "{ \"apiVersion\": \"v1beta3\", \"spec\": { \"volumes\": [ {\"name\": \"v1\"}, {\"name\": \"v2\"} ] } }",
+ expected: &api.Pod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ },
+ Spec: api.PodSpec{
+ Volumes: []api.Volume{
+ {
+ Name: "v1",
+ },
+ {
+ Name: "v2",
+ },
+ },
+ },
+ },
+ },
+ {
+ obj: &api.Pod{},
+ fragment: "invalid json",
+ expected: &api.Pod{},
+ expectErr: true,
+ },
+ }
+
+ for _, test := range tests {
+ err := Merge(test.obj, test.fragment, "Pod")
+ if !test.expectErr {
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ } else if !reflect.DeepEqual(test.obj, test.expected) {
+ t.Errorf("\nexpected:\n%v\nsaw:\n%v", test.expected, test.obj)
+ }
+ }
+ if test.expectErr && err == nil {
+ t.Errorf("unexpected non-error")
+ }
+ }
+
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/log.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/log.go
index 1e88248e2f4c..f1067172b5d1 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/log.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/log.go
@@ -43,7 +43,8 @@ Examples:
usageError(cmd, "log []")
}
- namespace := GetKubeNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
client, err := f.Client(cmd)
checkErr(err)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing.go
index c2d4ab2486ed..3f3b5ad5bae7 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing.go
@@ -36,35 +36,30 @@ func AddPrinterFlags(cmd *cobra.Command) {
}
// PrintObject prints an api object given command line flags to modify the output format
-func PrintObject(cmd *cobra.Command, obj runtime.Object, f *Factory, out io.Writer) {
+func PrintObject(cmd *cobra.Command, obj runtime.Object, f *Factory, out io.Writer) error {
mapper, _ := f.Object(cmd)
_, kind, err := api.Scheme.ObjectVersionAndKind(obj)
- checkErr(err)
+ if err != nil {
+ return err
+ }
mapping, err := mapper.RESTMapping(kind)
- checkErr(err)
-
- printer, ok, err := PrinterForCommand(cmd)
- checkErr(err)
+ if err != nil {
+ return err
+ }
- if ok {
- version := outputVersion(cmd)
- if len(version) == 0 {
- version = mapping.APIVersion
- }
- if len(version) == 0 {
- checkErr(fmt.Errorf("you must specify an output-version when using this output format"))
- }
- printer = kubectl.NewVersionedPrinter(printer, mapping.ObjectConvertor, version)
+ printer, err := PrinterForMapping(f, cmd, mapping)
+ if err != nil {
+ return err
}
- printer.PrintObj(obj, out)
+ return printer.PrintObj(obj, out)
}
// outputVersion returns the preferred output version for generic content (JSON, YAML, or templates)
-func outputVersion(cmd *cobra.Command) string {
+func outputVersion(cmd *cobra.Command, defaultVersion string) string {
outputVersion := GetFlagString(cmd, "output-version")
if len(outputVersion) == 0 {
- outputVersion = GetFlagString(cmd, "api-version")
+ outputVersion = defaultVersion
}
return outputVersion
}
@@ -89,7 +84,11 @@ func PrinterForMapping(f *Factory, cmd *cobra.Command, mapping *meta.RESTMapping
return nil, err
}
if ok {
- version := outputVersion(cmd)
+ clientConfig, err := f.ClientConfig(cmd)
+ checkErr(err)
+ defaultVersion := clientConfig.Version
+
+ version := outputVersion(cmd, defaultVersion)
if len(version) == 0 {
version = mapping.APIVersion
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing_test.go
new file mode 100644
index 000000000000..9dc8d06d99be
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/printing_test.go
@@ -0,0 +1,67 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd_test
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
+ . "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
+)
+
+func ExamplePrintReplicationController() {
+ f, tf, codec := NewAPIFactory()
+ tf.Printer = kubectl.NewHumanReadablePrinter(false)
+ tf.Client = &client.FakeRESTClient{
+ Codec: codec,
+ Client: nil,
+ }
+ cmd := f.NewCmdRunContainer(os.Stdout)
+ ctrl := &api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ Labels: map[string]string{"foo": "bar"},
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 1,
+ Selector: map[string]string{"foo": "bar"},
+ Template: &api.PodTemplateSpec{
+ ObjectMeta: api.ObjectMeta{
+ Labels: map[string]string{"foo": "bar"},
+ },
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ Image: "someimage",
+ },
+ },
+ },
+ },
+ },
+ }
+ err := PrintObject(cmd, ctrl, f, os.Stdout)
+ if err != nil {
+ fmt.Printf("Unexpected error: %v", err)
+ }
+ // Output:
+ // CONTROLLER CONTAINER(S) IMAGE(S) SELECTOR REPLICAS
+ // foo foo someimage foo=bar 1
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resize.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resize.go
new file mode 100644
index 000000000000..2a5476634275
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resize.go
@@ -0,0 +1,72 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
+ "github.com/spf13/cobra"
+)
+
+func (f *Factory) NewCmdResize(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "resize [--resource-version=] [--current-replicas=] --replicas= ",
+ Short: "Set a new size for a resizable resource (currently only Replication Controllers)",
+ Long: `Set a new size for a resizable resource (currently only Replication Controllers)
+
+Resize also allows users to specify one or more preconditions for the resize action.
+The new size is specified by --replicas=. You can also specify an optional precondition.
+The two currently supported options are --current-replicas or --resource-version.
+If a precondition is specified, it is validated before the resize is attempted, and it is
+guaranteed that the precondition holds true when the resize is sent to the server.
+
+Examples:
+ $ kubectl resize --replicas=3 replicationcontrollers foo
+ resized
+
+ # will only execute if the current size is 2
+ $ kubectl resize --current-replicas=2 --replicas=3 replicationcontrollers foo
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ count := GetFlagInt(cmd, "replicas")
+ if len(args) != 2 || count < 0 {
+ usageError(cmd, "--replicas= ")
+ }
+
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
+ mapper, _ := f.Object(cmd)
+ mapping, namespace, name := ResourceFromArgs(cmd, args, mapper, cmdNamespace)
+
+ resizer, err := f.Resizer(cmd, mapping)
+ checkErr(err)
+
+ resourceVersion := GetFlagString(cmd, "resource-version")
+ currentSize := GetFlagInt(cmd, "current-replicas")
+ s, err := resizer.Resize(namespace, name, &kubectl.ResizePrecondition{currentSize, resourceVersion}, uint(count))
+ checkErr(err)
+ fmt.Fprintf(out, "%s\n", s)
+ },
+ }
+ cmd.Flags().String("resource-version", "", "Precondition for resource version. Requires that the current resource version match this value in order to resize")
+ cmd.Flags().Int("current-replicas", -1, "Precondition for current size. Requires that the current size of the replication controller match this value in order to resize")
+ cmd.Flags().Int("replicas", -1, "The new number desired number of replicas. Required.")
+ return cmd
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resource.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resource.go
index 060e9f966d7d..ab440996786b 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resource.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/resource.go
@@ -41,18 +41,20 @@ func ResourcesFromArgsOrFile(
clientBuilder func(cmd *cobra.Command, mapping *meta.RESTMapping) (resource.RESTClient, error),
schema validation.Schema,
requireNames bool,
+ cmdNamespace,
+ cmdVersion string,
) resource.Visitor {
// handling filename & resource id
if len(selector) == 0 {
if requireNames || len(filename) > 0 {
- mapping, namespace, name := ResourceFromArgsOrFile(cmd, args, filename, typer, mapper, schema)
+ mapping, namespace, name := ResourceFromArgsOrFile(cmd, args, filename, typer, mapper, schema, cmdNamespace, cmdVersion)
client, err := clientBuilder(cmd, mapping)
checkErr(err)
return resource.NewInfo(client, mapping, namespace, name)
}
if len(args) == 2 {
- mapping, namespace, name := ResourceOrTypeFromArgs(cmd, args, mapper)
+ mapping, namespace, name := ResourceOrTypeFromArgs(cmd, args, mapper, cmdNamespace, cmdVersion)
client, err := clientBuilder(cmd, mapping)
checkErr(err)
return resource.NewInfo(client, mapping, namespace, name)
@@ -62,7 +64,7 @@ func ResourcesFromArgsOrFile(
labelSelector, err := labels.ParseSelector(selector)
checkErr(err)
- namespace := GetKubeNamespace(cmd)
+ namespace := cmdNamespace
visitors := resource.VisitorList{}
if len(args) < 1 {
@@ -94,7 +96,7 @@ func ResourcesFromArgsOrFile(
// ResourceFromArgsOrFile expects two arguments or a valid file with a given type, and extracts
// the fields necessary to uniquely locate a resource. Displays a usageError if that contract is
// not satisfied, or a generic error if any other problems occur.
-func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string, typer runtime.ObjectTyper, mapper meta.RESTMapper, schema validation.Schema) (mapping *meta.RESTMapping, namespace, name string) {
+func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string, typer runtime.ObjectTyper, mapper meta.RESTMapper, schema validation.Schema, cmdNamespace, cmdVersion string) (mapping *meta.RESTMapping, namespace, name string) {
// If command line args are passed in, use those preferentially.
if len(args) > 0 && len(args) != 2 {
usageError(cmd, "If passing in command line parameters, must be resource and name")
@@ -102,7 +104,7 @@ func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string,
if len(args) == 2 {
resource := args[0]
- namespace = GetKubeNamespace(cmd)
+ namespace = cmdNamespace
name = args[1]
if len(name) == 0 || len(resource) == 0 {
usageError(cmd, "Must specify filename or command line params")
@@ -113,8 +115,7 @@ func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string,
// The error returned by mapper is "no resource defined", which is a usage error
usageError(cmd, err.Error())
}
- version := GetFlagString(cmd, "api-version")
- mapping, err = mapper.RESTMapping(kind, version, defaultVersion)
+ mapping, err = mapper.RESTMapping(kind, cmdVersion, defaultVersion)
checkErr(err)
return
}
@@ -123,7 +124,7 @@ func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string,
usageError(cmd, "Must specify filename or command line params")
}
- mapping, namespace, name, _ = ResourceFromFile(cmd, filename, typer, mapper, schema)
+ mapping, namespace, name, _ = ResourceFromFile(filename, typer, mapper, schema, cmdVersion)
if len(name) == 0 {
checkErr(fmt.Errorf("the resource in the provided file has no name (or ID) defined"))
}
@@ -134,13 +135,13 @@ func ResourceFromArgsOrFile(cmd *cobra.Command, args []string, filename string,
// ResourceFromArgs expects two arguments with a given type, and extracts the fields necessary
// to uniquely locate a resource. Displays a usageError if that contract is not satisfied, or
// a generic error if any other problems occur.
-func ResourceFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTMapper) (mapping *meta.RESTMapping, namespace, name string) {
+func ResourceFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTMapper, cmdNamespace string) (mapping *meta.RESTMapping, namespace, name string) {
if len(args) != 2 {
usageError(cmd, "Must provide resource and name command line params")
}
resource := args[0]
- namespace = GetKubeNamespace(cmd)
+ namespace = cmdNamespace
name = args[1]
if len(name) == 0 || len(resource) == 0 {
usageError(cmd, "Must provide resource and name command line params")
@@ -157,7 +158,7 @@ func ResourceFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTMapper)
// ResourceFromArgs expects two arguments with a given type, and extracts the fields necessary
// to uniquely locate a resource. Displays a usageError if that contract is not satisfied, or
// a generic error if any other problems occur.
-func ResourceOrTypeFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTMapper) (mapping *meta.RESTMapping, namespace, name string) {
+func ResourceOrTypeFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTMapper, cmdNamespace, cmdVersion string) (mapping *meta.RESTMapping, namespace, name string) {
if len(args) == 0 || len(args) > 2 {
usageError(cmd, "Must provide resource or a resource and name as command line params")
}
@@ -167,7 +168,7 @@ func ResourceOrTypeFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTM
usageError(cmd, "Must provide resource or a resource and name as command line params")
}
- namespace = GetKubeNamespace(cmd)
+ namespace = cmdNamespace
if len(args) == 2 {
name = args[1]
if len(name) == 0 {
@@ -178,8 +179,7 @@ func ResourceOrTypeFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTM
defaultVersion, kind, err := mapper.VersionAndKindForResource(resource)
checkErr(err)
- version := GetFlagString(cmd, "api-version")
- mapping, err = mapper.RESTMapping(kind, version, defaultVersion)
+ mapping, err = mapper.RESTMapping(kind, cmdVersion, defaultVersion)
checkErr(err)
return
@@ -188,7 +188,7 @@ func ResourceOrTypeFromArgs(cmd *cobra.Command, args []string, mapper meta.RESTM
// ResourceFromFile retrieves the name and namespace from a valid file. If the file does not
// resolve to a known type an error is returned. The returned mapping can be used to determine
// the correct REST endpoint to modify this resource with.
-func ResourceFromFile(cmd *cobra.Command, filename string, typer runtime.ObjectTyper, mapper meta.RESTMapper, schema validation.Schema) (mapping *meta.RESTMapping, namespace, name string, data []byte) {
+func ResourceFromFile(filename string, typer runtime.ObjectTyper, mapper meta.RESTMapper, schema validation.Schema, cmdVersion string) (mapping *meta.RESTMapping, namespace, name string, data []byte) {
configData, err := ReadConfigData(filename)
checkErr(err)
data = configData
@@ -218,20 +218,18 @@ func ResourceFromFile(cmd *cobra.Command, filename string, typer runtime.ObjectT
checkErr(err)
// if the preferred API version differs, get a different mapper
- version := GetFlagString(cmd, "api-version")
- if version != objVersion {
- mapping, err = mapper.RESTMapping(kind, version)
+ if cmdVersion != objVersion {
+ mapping, err = mapper.RESTMapping(kind, cmdVersion)
checkErr(err)
}
return
}
-// CompareNamespaceFromFile returns an error if the namespace the user has provided on the CLI
+// CompareNamespace returns an error if the namespace the user has provided on the CLI
// or via the default namespace file does not match the namespace of an input file. This
// prevents a user from unintentionally updating the wrong namespace.
-func CompareNamespaceFromFile(cmd *cobra.Command, namespace string) error {
- defaultNamespace := GetKubeNamespace(cmd)
+func CompareNamespace(defaultNamespace, namespace string) error {
if len(namespace) > 0 {
if defaultNamespace != namespace {
return fmt.Errorf("the namespace from the provided file %q does not match the namespace %q. You must pass '--namespace=%s' to perform this operation.", namespace, defaultNamespace, namespace)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/rollingupdate.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/rollingupdate.go
index 1f20afb82391..4cf57c565ba5 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/rollingupdate.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/rollingupdate.go
@@ -34,8 +34,8 @@ const (
func (f *Factory) NewCmdRollingUpdate(out io.Writer) *cobra.Command {
cmd := &cobra.Command{
Use: "rollingupdate -f ",
- Short: "Perform a rolling update of the given replicationController",
- Long: `Perform a rolling update of the given replicationController.",
+ Short: "Perform a rolling update of the given ReplicationController",
+ Long: `Perform a rolling update of the given ReplicationController.
Replaces named controller with new controller, updating one pod at a time to use the
new PodTemplate. The new-controller.json must specify the same namespace as the
@@ -61,12 +61,24 @@ $ cat frontend-v2.json | kubectl rollingupdate frontend-v1 -f -
oldName := args[0]
schema, err := f.Validator(cmd)
checkErr(err)
+
+ clientConfig, err := f.ClientConfig(cmd)
+ checkErr(err)
+ cmdApiVersion := clientConfig.Version
+
mapper, typer := f.Object(cmd)
- mapping, namespace, newName, data := ResourceFromFile(cmd, filename, typer, mapper, schema)
+ mapping, namespace, newName, data := ResourceFromFile(filename, typer, mapper, schema, cmdApiVersion)
if mapping.Kind != "ReplicationController" {
usageError(cmd, "%s does not specify a valid ReplicationController", filename)
}
- err = CompareNamespaceFromFile(cmd, namespace)
+ if oldName == newName {
+ usageError(cmd, "%s cannot have the same name as the existing ReplicationController %s",
+ filename, oldName)
+ }
+
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+ err = CompareNamespace(cmdNamespace, namespace)
checkErr(err)
client, err := f.Client(cmd)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/run.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/run.go
index f2f4ba0af049..05a2d9056cee 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/run.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/run.go
@@ -27,7 +27,7 @@ import (
func (f *Factory) NewCmdRunContainer(out io.Writer) *cobra.Command {
cmd := &cobra.Command{
- Use: "run-container --image= [--replicas=replicas] [--dry-run=]",
+ Use: "run-container --image= [--replicas=replicas] [--dry-run=] [--overrides=]",
Short: "Run a particular image on the cluster.",
Long: `Create and run a particular image, possibly replicated.
Creates a replication controller to manage the created container(s)
@@ -38,15 +38,20 @@ Examples:
$ kubectl run-container nginx --image=dockerfile/nginx --replicas=5
-
+
$ kubectl run-container nginx --image=dockerfile/nginx --dry-run
- `,
+
+
+ $ kubectl run-container nginx --image=dockerfile/nginx --overrides='{ "apiVersion": "v1beta1", "desiredState": { ... } }'
+ is required for run")
}
- namespace := GetKubeNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
client, err := f.Client(cmd)
checkErr(err)
@@ -56,7 +61,7 @@ Examples:
usageError(cmd, fmt.Sprintf("Generator: %s not found.", generator))
}
names := generator.ParamNames()
- params, err := kubectl.MakeParams(cmd, names)
+ params := kubectl.MakeParams(cmd, names)
params["name"] = args[0]
err = kubectl.ValidateParams(names, params)
@@ -65,12 +70,19 @@ Examples:
controller, err := generator.Generate(params)
checkErr(err)
+ inline := GetFlagString(cmd, "overrides")
+ if len(inline) > 0 {
+ Merge(controller, inline, "ReplicationController")
+ }
+
// TODO: extract this flag to a central location, when such a location exists.
if !GetFlagBool(cmd, "dry-run") {
controller, err = client.ReplicationControllers(namespace).Create(controller.(*api.ReplicationController))
checkErr(err)
}
- PrintObject(cmd, controller, f, out)
+
+ err = PrintObject(cmd, controller, f, out)
+ checkErr(err)
},
}
AddPrinterFlags(cmd)
@@ -79,5 +91,6 @@ Examples:
cmd.Flags().IntP("replicas", "r", 1, "Number of replicas to create for this container. Default 1")
cmd.Flags().Bool("dry-run", false, "If true, only print the object that would be sent, don't actually do anything")
cmd.Flags().StringP("labels", "l", "", "Labels to apply to the pod(s) created by this call to run.")
+ cmd.Flags().String("overrides", "", "An inline JSON override for the generated object. If this is non-empty, it is parsed used to override the generated object. Requires that the object supply a valid apiVersion field.")
return cmd
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/stop.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/stop.go
new file mode 100644
index 000000000000..17e2782f3d01
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/stop.go
@@ -0,0 +1,56 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package cmd
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/spf13/cobra"
+)
+
+func (f *Factory) NewCmdStop(out io.Writer) *cobra.Command {
+ cmd := &cobra.Command{
+ Use: "stop ",
+ Short: "Gracefully shutdown a resource",
+ Long: `Gracefully shutdown a resource
+
+Attempts to shutdown and delete a resource that supports graceful termination.
+If the resource is resizable it will be resized to 0 before deletion.
+
+Examples:
+ $ kubectl stop replicationcontroller foo
+ foo stopped
+`,
+ Run: func(cmd *cobra.Command, args []string) {
+ if len(args) != 2 {
+ usageError(cmd, " ")
+ }
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ mapper, _ := f.Object(cmd)
+ mapping, namespace, name := ResourceFromArgs(cmd, args, mapper, cmdNamespace)
+
+ reaper, err := f.Reaper(cmd, mapping)
+ checkErr(err)
+
+ s, err := reaper.Stop(namespace, name)
+ checkErr(err)
+ fmt.Fprintf(out, "%s\n", s)
+ },
+ }
+ return cmd
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/update.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/update.go
index 13403b20bf15..6b191533f8d6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/update.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd/update.go
@@ -37,27 +37,78 @@ Examples:
$ cat pod.json | kubectl update -f -
- `,
+
+
+ $ kubectl update pods my-pod --patch='{ "apiVersion": "v1beta1", "desiredState": { "manifest": [{ "cpu": 100 }]}}'
+ `,
Run: func(cmd *cobra.Command, args []string) {
filename := GetFlagString(cmd, "filename")
- if len(filename) == 0 {
- usageError(cmd, "Must specify filename to update")
+ patch := GetFlagString(cmd, "patch")
+ if len(filename) == 0 && len(patch) == 0 {
+ usageError(cmd, "Must specify --filename or --patch to update")
}
- schema, err := f.Validator(cmd)
- checkErr(err)
- mapper, typer := f.Object(cmd)
- mapping, namespace, name, data := ResourceFromFile(cmd, filename, typer, mapper, schema)
- client, err := f.RESTClient(cmd, mapping)
- checkErr(err)
-
- err = CompareNamespaceFromFile(cmd, namespace)
- checkErr(err)
-
- err = resource.NewHelper(client, mapping).Update(namespace, name, true, data)
- checkErr(err)
+ if len(filename) != 0 && len(patch) != 0 {
+ usageError(cmd, "Can not specify both --filename and --patch")
+ }
+ var name string
+ if len(filename) > 0 {
+ name = updateWithFile(cmd, f, filename)
+ } else {
+ name = updateWithPatch(cmd, args, f, patch)
+ }
+
fmt.Fprintf(out, "%s\n", name)
},
}
cmd.Flags().StringP("filename", "f", "", "Filename or URL to file to use to update the resource")
+ cmd.Flags().String("patch", "", "A JSON document to override the existing resource. The resource is downloaded, then patched with the JSON, the updated")
return cmd
}
+
+func updateWithPatch(cmd *cobra.Command, args []string, f *Factory, patch string) string {
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+
+ mapper, _ := f.Object(cmd)
+ mapping, namespace, name := ResourceFromArgs(cmd, args, mapper, cmdNamespace)
+ client, err := f.RESTClient(cmd, mapping)
+ checkErr(err)
+
+ helper := resource.NewHelper(client, mapping)
+ obj, err := helper.Get(namespace, name)
+ checkErr(err)
+
+ Merge(obj, patch, mapping.Kind)
+
+ data, err := helper.Codec.Encode(obj)
+ checkErr(err)
+
+ err = helper.Update(namespace, name, true, data)
+ checkErr(err)
+ return name
+}
+
+func updateWithFile(cmd *cobra.Command, f *Factory, filename string) string {
+ schema, err := f.Validator(cmd)
+ checkErr(err)
+ mapper, typer := f.Object(cmd)
+
+ clientConfig, err := f.ClientConfig(cmd)
+ checkErr(err)
+ cmdApiVersion := clientConfig.Version
+
+ mapping, namespace, name, data := ResourceFromFile(filename, typer, mapper, schema, cmdApiVersion)
+
+ client, err := f.RESTClient(cmd, mapping)
+ checkErr(err)
+
+ cmdNamespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
+ err = CompareNamespace(cmdNamespace, namespace)
+ checkErr(err)
+
+ err = resource.NewHelper(client, mapping).Update(namespace, name, true, data)
+ checkErr(err)
+
+ return name
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/describe.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/describe.go
index f1a69ed519bf..3ebb5ca9ccbd 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/describe.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/describe.go
@@ -200,13 +200,12 @@ func describeEvents(el *api.EventList, w io.Writer) {
return
}
sort.Sort(SortableEvents(el.Items))
- fmt.Fprint(w, "Events:\nTime\tFrom\tSubobjectPath\tCondition\tReason\tMessage\n")
+ fmt.Fprint(w, "Events:\nTime\tFrom\tSubobjectPath\tReason\tMessage\n")
for _, e := range el.Items {
fmt.Fprintf(w, "%s\t%v\t%v\t%v\t%v\t%v\n",
e.Timestamp.Time.Format(time.RFC1123Z),
e.Source,
e.InvolvedObject.FieldPath,
- e.Condition,
e.Reason,
e.Message)
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate.go
index 74cb86ad33d8..1344c5efd014 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate.go
@@ -58,7 +58,7 @@ func ValidateParams(paramSpec []GeneratorParam, params map[string]string) error
}
// MakeParams is a utility that creates generator parameters from a command line
-func MakeParams(cmd *cobra.Command, params []GeneratorParam) (map[string]string, error) {
+func MakeParams(cmd *cobra.Command, params []GeneratorParam) map[string]string {
result := map[string]string{}
for ix := range params {
f := cmd.Flags().Lookup(params[ix].Name)
@@ -66,5 +66,5 @@ func MakeParams(cmd *cobra.Command, params []GeneratorParam) (map[string]string,
result[params[ix].Name] = f.Value.String()
}
}
- return result, nil
+ return result
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate_test.go
new file mode 100644
index 000000000000..6802f9658d26
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/generate_test.go
@@ -0,0 +1,114 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/spf13/cobra"
+)
+
+func TestValidateParams(t *testing.T) {
+ tests := []struct {
+ paramSpec []GeneratorParam
+ params map[string]string
+ valid bool
+ }{
+ {
+ paramSpec: []GeneratorParam{},
+ params: map[string]string{},
+ valid: true,
+ },
+ {
+ paramSpec: []GeneratorParam{
+ {Name: "foo"},
+ },
+ params: map[string]string{},
+ valid: true,
+ },
+ {
+ paramSpec: []GeneratorParam{
+ {Name: "foo", Required: true},
+ },
+ params: map[string]string{
+ "foo": "bar",
+ },
+ valid: true,
+ },
+ {
+ paramSpec: []GeneratorParam{
+ {Name: "foo", Required: true},
+ },
+ params: map[string]string{
+ "baz": "blah",
+ "foo": "bar",
+ },
+ valid: true,
+ },
+ {
+ paramSpec: []GeneratorParam{
+ {Name: "foo", Required: true},
+ {Name: "baz", Required: true},
+ },
+ params: map[string]string{
+ "baz": "blah",
+ "foo": "bar",
+ },
+ valid: true,
+ },
+ {
+ paramSpec: []GeneratorParam{
+ {Name: "foo", Required: true},
+ {Name: "baz", Required: true},
+ },
+ params: map[string]string{
+ "foo": "bar",
+ },
+ valid: false,
+ },
+ }
+ for _, test := range tests {
+ err := ValidateParams(test.paramSpec, test.params)
+ if test.valid && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if !test.valid && err == nil {
+ t.Errorf("unexpected non-error")
+ }
+ }
+}
+
+func TestMakeParams(t *testing.T) {
+ cmd := &cobra.Command{}
+ cmd.Flags().String("foo", "bar", "")
+ cmd.Flags().String("baz", "", "")
+ cmd.Flags().Set("baz", "blah")
+
+ paramSpec := []GeneratorParam{
+ {Name: "foo", Required: true},
+ {Name: "baz", Required: true},
+ }
+ expected := map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ }
+ params := MakeParams(cmd, paramSpec)
+ if !reflect.DeepEqual(params, expected) {
+ t.Errorf("\nexpected:\n%v\nsaw:\n%v", expected, params)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize.go
new file mode 100644
index 000000000000..ff3f866a631f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize.go
@@ -0,0 +1,93 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "fmt"
+ "strconv"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+)
+
+// ResizePrecondition describes a condition that must be true for the resize to take place
+// If CurrentSize == -1, it is ignored.
+// If CurrentResourceVersion is the empty string, it is ignored.
+// Otherwise they must equal the values in the replication controller for it to be valid.
+type ResizePrecondition struct {
+ Size int
+ ResourceVersion string
+}
+
+type PreconditionError struct {
+ Precondition string
+ ExpectedValue string
+ ActualValue string
+}
+
+func (pe *PreconditionError) Error() string {
+ return fmt.Sprintf("Expected %s to be %s, was %s", pe.Precondition, pe.ExpectedValue, pe.ActualValue)
+}
+
+// Validate ensures that the preconditions match. Returns nil if they are valid, an error otherwise
+func (precondition *ResizePrecondition) Validate(controller *api.ReplicationController) error {
+ if precondition.Size != -1 && controller.Spec.Replicas != precondition.Size {
+ return &PreconditionError{"replicas", strconv.Itoa(precondition.Size), strconv.Itoa(controller.Spec.Replicas)}
+ }
+ if precondition.ResourceVersion != "" && controller.ResourceVersion != precondition.ResourceVersion {
+ return &PreconditionError{"resource version", precondition.ResourceVersion, controller.ResourceVersion}
+ }
+ return nil
+}
+
+type Resizer interface {
+ Resize(namespace, name string, preconditions *ResizePrecondition, newSize uint) (string, error)
+}
+
+func ResizerFor(kind string, c client.Interface) (Resizer, error) {
+ switch kind {
+ case "ReplicationController":
+ return &ReplicationControllerResizer{c}, nil
+ }
+ return nil, fmt.Errorf("no resizer has been implemented for %q", kind)
+}
+
+type ReplicationControllerResizer struct {
+ client.Interface
+}
+
+func (resize *ReplicationControllerResizer) Resize(namespace, name string, preconditions *ResizePrecondition, newSize uint) (string, error) {
+ rc := resize.ReplicationControllers(namespace)
+ controller, err := rc.Get(name)
+ if err != nil {
+ return "", err
+ }
+
+ if preconditions != nil {
+ if err := preconditions.Validate(controller); err != nil {
+ return "", err
+ }
+ }
+
+ controller.Spec.Replicas = int(newSize)
+ // TODO: do retry on 409 errors here?
+ if _, err := rc.Update(controller); err != nil {
+ return "", err
+ }
+ // TODO: do a better job of printing objects here.
+ return "resized", nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize_test.go
new file mode 100644
index 000000000000..4e1da6c59a51
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resize_test.go
@@ -0,0 +1,182 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ // "strings"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ // "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+func TestReplicationControllerResize(t *testing.T) {
+ fake := &client.Fake{}
+ resizer := ReplicationControllerResizer{fake}
+ preconditions := ResizePrecondition{-1, ""}
+ count := uint(3)
+ name := "foo"
+ resizer.Resize("default", name, &preconditions, count)
+
+ if len(fake.Actions) != 2 {
+ t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions)
+ }
+ if fake.Actions[0].Action != "get-controller" || fake.Actions[0].Value != name {
+ t.Errorf("unexpected action: %v, expected get-controller %s", fake.Actions[0], name)
+ }
+ if fake.Actions[1].Action != "update-controller" || fake.Actions[1].Value.(*api.ReplicationController).Spec.Replicas != int(count) {
+ t.Errorf("unexpected action %v, expected update-controller with replicas = %d", count)
+ }
+}
+
+func TestReplicationControllerResizeFailsPreconditions(t *testing.T) {
+ fake := &client.Fake{
+ Ctrl: api.ReplicationController{
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 10,
+ },
+ },
+ }
+ resizer := ReplicationControllerResizer{fake}
+ preconditions := ResizePrecondition{2, ""}
+ count := uint(3)
+ name := "foo"
+ resizer.Resize("default", name, &preconditions, count)
+
+ if len(fake.Actions) != 1 {
+ t.Errorf("unexpected actions: %v, expected 2 actions (get, update)", fake.Actions)
+ }
+ if fake.Actions[0].Action != "get-controller" || fake.Actions[0].Value != name {
+ t.Errorf("unexpected action: %v, expected get-controller %s", fake.Actions[0], name)
+ }
+}
+
+func TestPreconditionValidate(t *testing.T) {
+ tests := []struct {
+ preconditions ResizePrecondition
+ controller api.ReplicationController
+ expectError bool
+ test string
+ }{
+ {
+ preconditions: ResizePrecondition{-1, ""},
+ expectError: false,
+ test: "defaults",
+ },
+ {
+ preconditions: ResizePrecondition{-1, ""},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "foo",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 10,
+ },
+ },
+ expectError: false,
+ test: "defaults 2",
+ },
+ {
+ preconditions: ResizePrecondition{0, ""},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "foo",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 0,
+ },
+ },
+ expectError: false,
+ test: "size matches",
+ },
+ {
+ preconditions: ResizePrecondition{-1, "foo"},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "foo",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 10,
+ },
+ },
+ expectError: false,
+ test: "resource version matches",
+ },
+ {
+ preconditions: ResizePrecondition{10, "foo"},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "foo",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 10,
+ },
+ },
+ expectError: false,
+ test: "both match",
+ },
+ {
+ preconditions: ResizePrecondition{10, "foo"},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "foo",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 20,
+ },
+ },
+ expectError: true,
+ test: "size different",
+ },
+ {
+ preconditions: ResizePrecondition{10, "foo"},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "bar",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 10,
+ },
+ },
+ expectError: true,
+ test: "version different",
+ },
+ {
+ preconditions: ResizePrecondition{10, "foo"},
+ controller: api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ ResourceVersion: "bar",
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 20,
+ },
+ },
+ expectError: true,
+ test: "both different",
+ },
+ }
+ for _, test := range tests {
+ err := test.preconditions.Validate(&test.controller)
+ if err != nil && !test.expectError {
+ t.Errorf("unexpected error: %v (%s)", err, test.test)
+ }
+ if err == nil && test.expectError {
+ t.Errorf("unexpected non-error: %v (%s)", err, test.test)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer.go
index c1a47a701d20..79321044a55b 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer.go
@@ -215,12 +215,12 @@ func (h *HumanReadablePrinter) validatePrintHandlerFunc(printFunc reflect.Value)
return nil
}
-var podColumns = []string{"POD", "CONTAINER(S)", "IMAGE(S)", "HOST", "LABELS", "STATUS"}
+var podColumns = []string{"POD", "IP", "CONTAINER(S)", "IMAGE(S)", "HOST", "LABELS", "STATUS"}
var replicationControllerColumns = []string{"CONTROLLER", "CONTAINER(S)", "IMAGE(S)", "SELECTOR", "REPLICAS"}
var serviceColumns = []string{"NAME", "LABELS", "SELECTOR", "IP", "PORT"}
-var minionColumns = []string{"NAME", "LABELS"}
+var minionColumns = []string{"NAME", "LABELS", "STATUS"}
var statusColumns = []string{"STATUS"}
-var eventColumns = []string{"TIME", "NAME", "KIND", "SUBOBJECT", "CONDITION", "REASON", "SOURCE", "MESSAGE"}
+var eventColumns = []string{"TIME", "NAME", "KIND", "SUBOBJECT", "REASON", "SOURCE", "MESSAGE"}
// addDefaultHandlers adds print handlers for default Kubernetes types.
func (h *HumanReadablePrinter) addDefaultHandlers() {
@@ -267,7 +267,7 @@ func printPod(pod *api.Pod, w io.Writer) error {
if len(containers) > 0 {
firstContainer, containers = containers[0], containers[1:]
}
- _, err := fmt.Fprintf(w, "%s/%s\t%s\t%s\t%s\t%s\t%s\n",
+ _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
pod.Name,
pod.Status.PodIP,
firstContainer.Name,
@@ -280,7 +280,7 @@ func printPod(pod *api.Pod, w io.Writer) error {
}
// Lay out all the other containers on separate lines.
for _, container := range containers {
- _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\n", "", container.Name, container.Image, "", "", "")
+ _, err := fmt.Fprintf(w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n", "", "", container.Name, container.Image, "", "", "")
if err != nil {
return err
}
@@ -347,7 +347,26 @@ func printServiceList(list *api.ServiceList, w io.Writer) error {
}
func printMinion(minion *api.Node, w io.Writer) error {
- _, err := fmt.Fprintf(w, "%s\t%s\n", minion.Name, formatLabels(minion.Labels))
+ conditionMap := make(map[api.NodeConditionKind]*api.NodeCondition)
+ NodeAllConditions := []api.NodeConditionKind{api.NodeReady, api.NodeReachable}
+ for i := range minion.Status.Conditions {
+ cond := minion.Status.Conditions[i]
+ conditionMap[cond.Kind] = &cond
+ }
+ var status []string
+ for _, validCondition := range NodeAllConditions {
+ if condition, ok := conditionMap[validCondition]; ok {
+ if condition.Status == api.ConditionFull {
+ status = append(status, string(condition.Kind))
+ } else {
+ status = append(status, "Not"+string(condition.Kind))
+ }
+ }
+ }
+ if len(status) == 0 {
+ status = append(status, "Unknown")
+ }
+ _, err := fmt.Fprintf(w, "%s\t%s\t%s\n", minion.Name, formatLabels(minion.Labels), strings.Join(status, ","))
return err
}
@@ -367,12 +386,11 @@ func printStatus(status *api.Status, w io.Writer) error {
func printEvent(event *api.Event, w io.Writer) error {
_, err := fmt.Fprintf(
- w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
+ w, "%s\t%s\t%s\t%s\t%s\t%s\t%s\n",
event.Timestamp.Time.Format(time.RFC1123Z),
event.InvolvedObject.Name,
event.InvolvedObject.Kind,
event.InvolvedObject.FieldPath,
- event.Condition,
event.Reason,
event.Source,
event.Message,
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer_test.go
index 8603cedb0c28..b33e5d60bfe6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/resource_printer_test.go
@@ -22,6 +22,7 @@ import (
"fmt"
"io"
"reflect"
+ "strings"
"testing"
"time"
@@ -506,3 +507,78 @@ func TestPrintEventsResultSorted(t *testing.T) {
out := buffer.String()
VerifyDatesInOrder(out, "\n" /* rowDelimiter */, " " /* columnDelimiter */, t)
}
+
+func TestPrintMinionStatus(t *testing.T) {
+ printer := NewHumanReadablePrinter(false)
+ table := []struct {
+ minion api.Node
+ status string
+ }{
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo1"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{{Kind: api.NodeReady, Status: api.ConditionFull}}},
+ },
+ status: "Ready",
+ },
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo2"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{
+ {Kind: api.NodeReady, Status: api.ConditionFull},
+ {Kind: api.NodeReachable, Status: api.ConditionFull}}},
+ },
+ status: "Ready,Reachable",
+ },
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo3"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{
+ {Kind: api.NodeReady, Status: api.ConditionFull},
+ {Kind: api.NodeReady, Status: api.ConditionFull}}},
+ },
+ status: "Ready",
+ },
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo4"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{{Kind: api.NodeReady, Status: api.ConditionNone}}},
+ },
+ status: "NotReady",
+ },
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo5"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{{Kind: "InvalidValue", Status: api.ConditionFull}}},
+ },
+ status: "Unknown",
+ },
+ {
+ minion: api.Node{
+ ObjectMeta: api.ObjectMeta{Name: "foo6"},
+ Status: api.NodeStatus{Conditions: []api.NodeCondition{{}}},
+ },
+ status: "Unknown",
+ },
+ }
+
+ for _, test := range table {
+ buffer := &bytes.Buffer{}
+ err := printer.PrintObj(&test.minion, buffer)
+ if err != nil {
+ t.Fatalf("An error occurred printing Minion: %#v", err)
+ }
+ if !contains(strings.Fields(buffer.String()), test.status) {
+ t.Fatalf("Expect printing minion %s with status %#v, got: %#v", test.minion.Name, test.status, buffer.String())
+ }
+ }
+}
+
+func contains(fields []string, field string) bool {
+ for _, v := range fields {
+ if v == field {
+ return true
+ }
+ }
+ return false
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/rolling_updater_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/rolling_updater_test.go
index d1b56cb95510..aad03195f11c 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/rolling_updater_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/rolling_updater_test.go
@@ -26,18 +26,18 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
)
-type customFake struct {
+type updaterFake struct {
*client.Fake
ctrl client.ReplicationControllerInterface
}
-func (c *customFake) ReplicationControllers(namespace string) client.ReplicationControllerInterface {
+func (c *updaterFake) ReplicationControllers(namespace string) client.ReplicationControllerInterface {
return c.ctrl
}
func fakeClientFor(namespace string, responses []fakeResponse) client.Interface {
fake := client.Fake{}
- return &customFake{
+ return &updaterFake{
&fake,
&fakeRc{
&client.FakeReplicationControllers{
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/run_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/run_test.go
new file mode 100644
index 000000000000..07bd523a1601
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/run_test.go
@@ -0,0 +1,104 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+)
+
+func TestGenerate(t *testing.T) {
+ tests := []struct {
+ params map[string]string
+ expected *api.ReplicationController
+ expectErr bool
+ }{
+ {
+ params: map[string]string{
+ "name": "foo",
+ "image": "someimage",
+ "replicas": "1",
+ },
+ expected: &api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ Labels: map[string]string{"run-container": "foo"},
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 1,
+ Selector: map[string]string{"run-container": "foo"},
+ Template: &api.PodTemplateSpec{
+ ObjectMeta: api.ObjectMeta{
+ Labels: map[string]string{"run-container": "foo"},
+ },
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ Image: "someimage",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ {
+ params: map[string]string{
+ "name": "foo",
+ "image": "someimage",
+ "replicas": "1",
+ "labels": "foo=bar,baz=blah",
+ },
+ expected: &api.ReplicationController{
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ Labels: map[string]string{"foo": "bar", "baz": "blah"},
+ },
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 1,
+ Selector: map[string]string{"foo": "bar", "baz": "blah"},
+ Template: &api.PodTemplateSpec{
+ ObjectMeta: api.ObjectMeta{
+ Labels: map[string]string{"foo": "bar", "baz": "blah"},
+ },
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ Image: "someimage",
+ },
+ },
+ },
+ },
+ },
+ },
+ },
+ }
+ generator := BasicReplicationController{}
+ for _, test := range tests {
+ obj, err := generator.Generate(test.params)
+ if !test.expectErr && err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if !reflect.DeepEqual(obj, test.expected) {
+ t.Errorf("\nexpected:\n%v\nsaw:\n%v", test.expected, obj)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop.go
new file mode 100644
index 000000000000..9f3537c91741
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop.go
@@ -0,0 +1,110 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "fmt"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/meta"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/wait"
+)
+
+const (
+ interval = time.Second * 3
+ timeout = time.Minute * 5
+)
+
+// A Reaper handles terminating an object as gracefully as possible.
+type Reaper interface {
+ Stop(namespace, name string) (string, error)
+}
+
+func ReaperFor(kind string, c client.Interface) (Reaper, error) {
+ switch kind {
+ case "ReplicationController":
+ return &ReplicationControllerReaper{c, interval, timeout}, nil
+ case "Pod":
+ return &PodReaper{c}, nil
+ case "Service":
+ return &ServiceReaper{c}, nil
+ }
+ return nil, fmt.Errorf("no reaper has been implemented for %q", kind)
+}
+
+type ReplicationControllerReaper struct {
+ client.Interface
+ pollInterval, timeout time.Duration
+}
+type PodReaper struct {
+ client.Interface
+}
+type ServiceReaper struct {
+ client.Interface
+}
+
+type objInterface interface {
+ Delete(name string) error
+ Get(name string) (meta.Interface, error)
+}
+
+func (reaper *ReplicationControllerReaper) Stop(namespace, name string) (string, error) {
+ rc := reaper.ReplicationControllers(namespace)
+ controller, err := rc.Get(name)
+ if err != nil {
+ return "", err
+ }
+
+ controller.Spec.Replicas = 0
+ // TODO: do retry on 409 errors here?
+ if _, err := rc.Update(controller); err != nil {
+ return "", err
+ }
+ if err := wait.Poll(reaper.pollInterval, reaper.timeout,
+ client.ControllerHasDesiredReplicas(reaper, controller)); err != nil {
+ return "", err
+ }
+ if err := rc.Delete(name); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s stopped", name), nil
+}
+
+func (reaper *PodReaper) Stop(namespace, name string) (string, error) {
+ pods := reaper.Pods(namespace)
+ _, err := pods.Get(name)
+ if err != nil {
+ return "", err
+ }
+ if err := pods.Delete(name); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s stopped", name), nil
+}
+
+func (reaper *ServiceReaper) Stop(namespace, name string) (string, error) {
+ services := reaper.Services(namespace)
+ _, err := services.Get(name)
+ if err != nil {
+ return "", err
+ }
+ if err := services.Delete(name); err != nil {
+ return "", err
+ }
+ return fmt.Sprintf("%s stopped", name), nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop_test.go
new file mode 100644
index 000000000000..0d7c3f3a22b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/stop_test.go
@@ -0,0 +1,168 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubectl
+
+import (
+ "fmt"
+ "testing"
+ "time"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+)
+
+func TestReplicationControllerStop(t *testing.T) {
+ fake := &client.Fake{
+ Ctrl: api.ReplicationController{
+ Spec: api.ReplicationControllerSpec{
+ Replicas: 0,
+ },
+ },
+ }
+ reaper := ReplicationControllerReaper{fake, time.Millisecond, time.Millisecond}
+ name := "foo"
+ s, err := reaper.Stop("default", name)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ expected := "foo stopped"
+ if s != expected {
+ t.Errorf("expected %s, got %s", expected, s)
+ }
+ if len(fake.Actions) != 4 {
+ t.Errorf("unexpected actions: %v, expected 4 actions (get, update, get, delete)", fake.Actions)
+ }
+ for i, action := range []string{"get", "update", "get", "delete"} {
+ if fake.Actions[i].Action != action+"-controller" {
+ t.Errorf("unexpected action: %v, expected %s-controller", fake.Actions[i], action)
+ }
+ }
+}
+
+type noSuchPod struct {
+ *client.FakePods
+}
+
+func (c *noSuchPod) Get(name string) (*api.Pod, error) {
+ return nil, fmt.Errorf("%s does not exist", name)
+}
+
+type noDeleteService struct {
+ *client.FakeServices
+}
+
+func (c *noDeleteService) Delete(service string) error {
+ return fmt.Errorf("I'm afraid I can't do that, Dave")
+}
+
+type reaperFake struct {
+ *client.Fake
+ noSuchPod, noDeleteService bool
+}
+
+func (c *reaperFake) Pods(namespace string) client.PodInterface {
+ pods := &client.FakePods{c.Fake, namespace}
+ if c.noSuchPod {
+ return &noSuchPod{pods}
+ }
+ return pods
+}
+
+func (c *reaperFake) Services(namespace string) client.ServiceInterface {
+ services := &client.FakeServices{c.Fake, namespace}
+ if c.noDeleteService {
+ return &noDeleteService{services}
+ }
+ return services
+}
+
+func TestSimpleStop(t *testing.T) {
+ tests := []struct {
+ fake *reaperFake
+ kind string
+ actions []string
+ expectError bool
+ test string
+ }{
+ {
+ fake: &reaperFake{
+ Fake: &client.Fake{},
+ },
+ kind: "Pod",
+ actions: []string{"get-pod", "delete-pod"},
+ expectError: false,
+ test: "stop pod succeeds",
+ },
+ {
+ fake: &reaperFake{
+ Fake: &client.Fake{},
+ },
+ kind: "Service",
+ actions: []string{"get-service", "delete-service"},
+ expectError: false,
+ test: "stop service succeeds",
+ },
+ {
+ fake: &reaperFake{
+ Fake: &client.Fake{},
+ noSuchPod: true,
+ },
+ kind: "Pod",
+ actions: []string{},
+ expectError: true,
+ test: "stop pod fails, no pod",
+ },
+ {
+ fake: &reaperFake{
+ Fake: &client.Fake{},
+ noDeleteService: true,
+ },
+ kind: "Service",
+ actions: []string{"get-service"},
+ expectError: true,
+ test: "stop service fails, can't delete",
+ },
+ }
+ for _, test := range tests {
+ fake := test.fake
+ reaper, err := ReaperFor(test.kind, fake)
+ if err != nil {
+ t.Errorf("unexpected error: %v (%s)", err, test.test)
+ }
+ s, err := reaper.Stop("default", "foo")
+ if err != nil && !test.expectError {
+ t.Errorf("unexpected error: %v (%s)", err, test.test)
+ }
+ if err == nil {
+ if test.expectError {
+ t.Errorf("unexpected non-error: %v (%s)", err, test.test)
+ }
+ if s != "foo stopped" {
+ t.Errorf("unexpected return: %s (%s)", s, test.test)
+ }
+ }
+ if len(test.actions) != len(fake.Actions) {
+ t.Errorf("unexpected actions: %v; expected %v (%s)", fake.Actions, test.actions, test.test)
+ }
+ for i, action := range fake.Actions {
+ testAction := test.actions[i]
+ if action.Action != testAction {
+ t.Errorf("unexpected action: %v; expected %v (%s)", action, testAction, test.test)
+ }
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/version.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/version.go
index 5c4c8e45ceef..4bb6b516bc50 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/version.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/version.go
@@ -33,7 +33,7 @@ func GetVersion(w io.Writer, kubeClient client.Interface) {
}
GetClientVersion(w)
- fmt.Fprintf(w, "Server Version: %#v\n", serverVersion)
+ fmt.Fprintf(w, "Server Version: %#v\n", *serverVersion)
}
func GetClientVersion(w io.Writer) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor.go
index fdabb7f3c0d0..e784be697730 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/cadvisor.go
@@ -20,6 +20,7 @@ import (
"fmt"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
cadvisor "github.com/google/cadvisor/info"
)
@@ -53,7 +54,7 @@ func (kl *Kubelet) statsFromDockerContainer(cc cadvisorInterface, containerId st
}
// GetContainerInfo returns stats (from Cadvisor) for a container.
-func (kl *Kubelet) GetContainerInfo(podFullName, uuid, containerName string, req *cadvisor.ContainerInfoRequest) (*cadvisor.ContainerInfo, error) {
+func (kl *Kubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *cadvisor.ContainerInfoRequest) (*cadvisor.ContainerInfo, error) {
cc := kl.GetCadvisorClient()
if cc == nil {
return nil, nil
@@ -62,7 +63,7 @@ func (kl *Kubelet) GetContainerInfo(podFullName, uuid, containerName string, req
if err != nil {
return nil, err
}
- dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, containerName)
+ dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, containerName)
if !found {
return nil, fmt.Errorf("couldn't find container")
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/apiserver_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/apiserver_test.go
index c57167b00473..936ff22aaecb 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/apiserver_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/apiserver_test.go
@@ -43,25 +43,31 @@ func (lw fakePodLW) Watch(resourceVersion string) (watch.Interface, error) {
var _ cache.ListerWatcher = fakePodLW{}
-func TestNewSourceApiserver(t *testing.T) {
- podv1 := api.Pod{
+func TestNewSourceApiserver_UpdatesAndMultiplePods(t *testing.T) {
+ pod1v1 := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
- podv2 := api.Pod{
+ pod1v2 := api.Pod{
ObjectMeta: api.ObjectMeta{Name: "p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
+ pod2 := api.Pod{
+ ObjectMeta: api.ObjectMeta{Name: "q"},
+ Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
- expectedBoundPodv1 := api.BoundPod{
+ expectedBoundPod1v1 := api.BoundPod{
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/boundPods/p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/one"}}}}
- expectedBoundPodv2 := api.BoundPod{
+ expectedBoundPod1v2 := api.BoundPod{
ObjectMeta: api.ObjectMeta{Name: "p", SelfLink: "/api/v1beta1/boundPods/p"},
Spec: api.PodSpec{Containers: []api.Container{{Image: "image/two"}}}}
+ expectedBoundPod2 := api.BoundPod{
+ ObjectMeta: api.ObjectMeta{Name: "q", SelfLink: "/api/v1beta1/boundPods/q"},
+ Spec: api.PodSpec{Containers: []api.Container{{Image: "image/blah"}}}}
// Setup fake api client.
fakeWatch := watch.NewFake()
lw := fakePodLW{
- listResp: &api.PodList{Items: []api.Pod{podv1}},
+ listResp: &api.PodList{Items: []api.Pod{pod1v1}},
watchResp: fakeWatch,
}
@@ -74,23 +80,54 @@ func TestNewSourceApiserver(t *testing.T) {
t.Errorf("Unable to read from channel when expected")
}
update := got.(kubelet.PodUpdate)
- expected := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPodv1)
+ expected := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v1)
if !api.Semantic.DeepEqual(expected, update) {
t.Errorf("Expected %#v; Got %#v", expected, update)
}
- fakeWatch.Modify(&podv2)
+ // Add another pod
+ fakeWatch.Add(&pod2)
got, ok = <-ch
if !ok {
t.Errorf("Unable to read from channel when expected")
}
update = got.(kubelet.PodUpdate)
- expected = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPodv2)
+ // Could be sorted either of these two ways:
+ expectedA := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v1, expectedBoundPod2)
+ expectedB := CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2, expectedBoundPod1v1)
+
+ if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
+ t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
+ }
+
+ // Modify pod1
+ fakeWatch.Modify(&pod1v2)
+ got, ok = <-ch
+ if !ok {
+ t.Errorf("Unable to read from channel when expected")
+ }
+ update = got.(kubelet.PodUpdate)
+ expectedA = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod1v2, expectedBoundPod2)
+ expectedB = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2, expectedBoundPod1v2)
+
+ if !api.Semantic.DeepEqual(expectedA, update) && !api.Semantic.DeepEqual(expectedB, update) {
+ t.Errorf("Expected %#v or %#v, Got %#v", expectedA, expectedB, update)
+ }
+
+ // Delete pod1
+ fakeWatch.Delete(&pod1v2)
+ got, ok = <-ch
+ if !ok {
+ t.Errorf("Unable to read from channel when expected")
+ }
+ update = got.(kubelet.PodUpdate)
+ expected = CreatePodUpdate(kubelet.SET, kubelet.ApiserverSource, expectedBoundPod2)
if !api.Semantic.DeepEqual(expected, update) {
- t.Fatalf("Expected %#v, Got %#v", expected, update)
+ t.Errorf("Expected %#v, Got %#v", expected, update)
}
- fakeWatch.Delete(&podv2)
+ // Delete pod2
+ fakeWatch.Delete(&pod2)
got, ok = <-ch
if !ok {
t.Errorf("Unable to read from channel when expected")
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config.go
index 0e8f7cd5d681..ae2701cd3be7 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config.go
@@ -305,7 +305,7 @@ func filterInvalidPods(pods []api.BoundPod, source string) (filtered []*api.Boun
name := bestPodIdentString(pod)
err := utilerrors.NewAggregate(errlist)
glog.Warningf("Pod[%d] (%s) from %s failed validation, ignoring: %v", i+1, name, source, err)
- record.Eventf(pod, "", "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
+ record.Eventf(pod, "failedValidation", "Error validating pod %s from %s, ignoring: %v", name, source, err)
continue
}
filtered = append(filtered, pod)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config_test.go
index 29aee090ecac..0b9d0450c467 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/config_test.go
@@ -22,6 +22,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
const (
@@ -52,7 +53,7 @@ func (s sortedPods) Less(i, j int) bool {
func CreateValidPod(name, namespace, source string) api.BoundPod {
return api.BoundPod{
ObjectMeta: api.ObjectMeta{
- UID: name, // for the purpose of testing, this is unique enough
+ UID: types.UID(name), // for the purpose of testing, this is unique enough
Name: name,
Namespace: namespace,
Annotations: map[string]string{kubelet.ConfigSourceAnnotationKey: source},
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd.go
index ea6276bea3e0..0f091669f993 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd.go
@@ -59,7 +59,8 @@ func NewSourceEtcd(key string, client tools.EtcdClient, updates chan<- interface
func (s *sourceEtcd) run() {
boundPods := api.BoundPods{}
- if err := s.helper.ExtractObj(s.key, &boundPods, true); err != nil {
+ err := s.helper.ExtractObj(s.key, &boundPods, false)
+ if err != nil {
glog.Errorf("etcd failed to retrieve the value for the key %q. Error: %v", s.key, err)
return
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd_test.go
index 35f2c57226c5..8debafe23f00 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/etcd_test.go
@@ -19,11 +19,63 @@ package config
import (
"reflect"
"testing"
+ "time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
)
+func TestEtcdSourceExistingBoundPods(t *testing.T) {
+ // Arrange
+ key := "/registry/nodes/machine/boundpods"
+ fakeEtcdClient := tools.NewFakeEtcdClient(t)
+ updates := make(chan interface{})
+
+ fakeEtcdClient.Set(
+ key,
+ runtime.EncodeOrDie(latest.Codec, &api.BoundPods{
+ Items: []api.BoundPod{
+ {
+ ObjectMeta: api.ObjectMeta{
+ Name: "foo",
+ Namespace: "default"},
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Image: "foo:v1",
+ }}}},
+ {
+ ObjectMeta: api.ObjectMeta{
+ Name: "bar",
+ Namespace: "default"},
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Image: "foo:v1",
+ }}}}}}),
+ 0)
+
+ // Act
+ NewSourceEtcd(key, fakeEtcdClient, updates)
+
+ // Assert
+ select {
+ case got := <-updates:
+ update := got.(kubelet.PodUpdate)
+ if len(update.Pods) != 2 ||
+ update.Pods[0].ObjectMeta.Name != "foo" ||
+ update.Pods[1].ObjectMeta.Name != "bar" {
+ t.Errorf("Unexpected update response: %#v", update)
+ }
+ case <-time.After(200 * time.Millisecond):
+ t.Errorf("Expected update, timeout instead")
+ }
+}
+
func TestEventToPods(t *testing.T) {
tests := []struct {
input watch.Event
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file.go
index f1300d9d6806..a53b826235e0 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file.go
@@ -29,7 +29,9 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/ghodss/yaml"
@@ -144,13 +146,26 @@ func extractFromFile(filename string) (api.BoundPod, error) {
return pod, err
}
- manifest := &api.ContainerManifest{}
// TODO: use api.Scheme.DecodeInto
- if err := yaml.Unmarshal(data, manifest); err != nil {
+ // This is awful. DecodeInto() expects to find an APIObject, which
+ // Manifest is not. We keep reading manifest for now for compat, but
+ // we will eventually change it to read Pod (at which point this all
+ // becomes nicer). Until then, we assert that the ContainerManifest
+ // structure on disk is always v1beta1. Read that, convert it to a
+ // "current" ContainerManifest (should be ~identical), then convert
+ // that to a BoundPod (which is a well-understood conversion). This
+ // avoids writing a v1beta1.ContainerManifest -> api.BoundPod
+ // conversion which would be identical to the api.ContainerManifest ->
+ // api.BoundPod conversion.
+ oldManifest := &v1beta1.ContainerManifest{}
+ if err := yaml.Unmarshal(data, oldManifest); err != nil {
return pod, fmt.Errorf("can't unmarshal file %q: %v", filename, err)
}
-
- if err := api.Scheme.Convert(manifest, &pod); err != nil {
+ newManifest := &api.ContainerManifest{}
+ if err := api.Scheme.Convert(oldManifest, newManifest); err != nil {
+ return pod, fmt.Errorf("can't convert pod from file %q: %v", filename, err)
+ }
+ if err := api.Scheme.Convert(newManifest, &pod); err != nil {
return pod, fmt.Errorf("can't convert pod from file %q: %v", filename, err)
}
@@ -164,9 +179,15 @@ func extractFromFile(filename string) (api.BoundPod, error) {
fmt.Fprintf(hasher, "host:%s", hostname)
fmt.Fprintf(hasher, "file:%s", filename)
util.DeepHashObject(hasher, pod)
- pod.UID = hex.EncodeToString(hasher.Sum(nil)[0:])
+ pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
glog.V(5).Infof("Generated UID %q for pod %q from file %s", pod.UID, pod.Name, filename)
}
+ // This is required for backward compatibility, and should be removed once we
+ // completely deprecate ContainerManifest.
+ if len(pod.Name) == 0 {
+ pod.Name = string(pod.UID)
+ glog.V(5).Infof("Generated Name %q for UID %q from file %s", pod.Name, pod.UID, filename)
+ }
if len(pod.Namespace) == 0 {
hasher := adler32.New()
fmt.Fprint(hasher, filename)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file_test.go
index 0c8874b938c8..ade512aa7aad 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/file_test.go
@@ -26,26 +26,28 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
-func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
- manifest := api.ContainerManifest{
+func ExampleManifestAndPod(id string) (v1beta1.ContainerManifest, api.BoundPod) {
+ manifest := v1beta1.ContainerManifest{
ID: id,
- UUID: id,
- Containers: []api.Container{
+ UUID: types.UID(id),
+ Containers: []v1beta1.Container{
{
Name: "c" + id,
Image: "foo",
TerminationMessagePath: "/somepath",
},
},
- Volumes: []api.Volume{
+ Volumes: []v1beta1.Volume{
{
Name: "host-dir",
- Source: &api.VolumeSource{
- HostDir: &api.HostDir{"/dir/path"},
+ Source: v1beta1.VolumeSource{
+ HostDir: &v1beta1.HostPath{"/dir/path"},
},
},
},
@@ -53,7 +55,7 @@ func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
expectedPod := api.BoundPod{
ObjectMeta: api.ObjectMeta{
Name: id,
- UID: id,
+ UID: types.UID(id),
},
Spec: api.PodSpec{
Containers: []api.Container{
@@ -66,8 +68,8 @@ func ExampleManifestAndPod(id string) (api.ContainerManifest, api.BoundPod) {
Volumes: []api.Volume{
{
Name: "host-dir",
- Source: &api.VolumeSource{
- HostDir: &api.HostDir{"/dir/path"},
+ Source: api.VolumeSource{
+ HostPath: &api.HostPath{"/dir/path"},
},
},
},
@@ -119,7 +121,7 @@ func TestReadFromFile(t *testing.T) {
"version": "v1beta1",
"uuid": "12345",
"id": "test",
- "containers": [{ "image": "test/image" }]
+ "containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
}`)
defer os.Remove(file.Name())
@@ -136,7 +138,13 @@ func TestReadFromFile(t *testing.T) {
SelfLink: "",
},
Spec: api.PodSpec{
- Containers: []api.Container{{Image: "test/image", TerminationMessagePath: "/dev/termination-log"}},
+ Containers: []api.Container{
+ {
+ Image: "test/image",
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: api.PullAlways,
+ },
+ },
},
})
@@ -162,6 +170,99 @@ func TestReadFromFile(t *testing.T) {
}
}
+func TestReadFromFileWithoutID(t *testing.T) {
+ file := writeTestFile(t, os.TempDir(), "test_pod_config",
+ `{
+ "version": "v1beta1",
+ "uuid": "12345",
+ "containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
+ }`)
+ defer os.Remove(file.Name())
+
+ ch := make(chan interface{})
+ NewSourceFile(file.Name(), time.Millisecond, ch)
+ select {
+ case got := <-ch:
+ update := got.(kubelet.PodUpdate)
+ expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "",
+ UID: "12345",
+ Namespace: "",
+ SelfLink: "",
+ },
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Image: "test/image",
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: api.PullAlways,
+ },
+ },
+ },
+ })
+
+ if len(update.Pods[0].ObjectMeta.Name) == 0 {
+ t.Errorf("Name did not get defaulted")
+ }
+ update.Pods[0].ObjectMeta.Name = ""
+ update.Pods[0].ObjectMeta.Namespace = ""
+ update.Pods[0].ObjectMeta.SelfLink = ""
+
+ if !api.Semantic.DeepEqual(expected, update) {
+ t.Fatalf("Expected %#v, Got %#v", expected, update)
+ }
+
+ case <-time.After(2 * time.Millisecond):
+ t.Errorf("Expected update, timeout instead")
+ }
+}
+
+func TestReadV1Beta2FromFile(t *testing.T) {
+ file := writeTestFile(t, os.TempDir(), "test_pod_config",
+ `{
+ "version": "v1beta2",
+ "uuid": "12345",
+ "id": "test",
+ "containers": [{ "image": "test/image", imagePullPolicy: "PullAlways"}]
+ }`)
+ defer os.Remove(file.Name())
+
+ ch := make(chan interface{})
+ NewSourceFile(file.Name(), time.Millisecond, ch)
+ select {
+ case got := <-ch:
+ update := got.(kubelet.PodUpdate)
+ expected := CreatePodUpdate(kubelet.SET, kubelet.FileSource, api.BoundPod{
+ ObjectMeta: api.ObjectMeta{
+ Name: "test",
+ UID: "12345",
+ Namespace: "",
+ SelfLink: "",
+ },
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Image: "test/image",
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: api.PullAlways,
+ },
+ },
+ },
+ })
+
+ update.Pods[0].ObjectMeta.Namespace = ""
+ update.Pods[0].ObjectMeta.SelfLink = ""
+
+ if !api.Semantic.DeepEqual(expected, update) {
+ t.Fatalf("Expected %#v, Got %#v", expected, update)
+ }
+
+ case <-time.After(2 * time.Millisecond):
+ t.Errorf("Expected update, timeout instead")
+ }
+}
+
func TestReadFromFileWithDefaults(t *testing.T) {
file := writeTestFile(t, os.TempDir(), "test_pod_config",
`{
@@ -223,7 +324,7 @@ func TestExtractFromDir(t *testing.T) {
manifest, expectedPod := ExampleManifestAndPod("1")
manifest2, expectedPod2 := ExampleManifestAndPod("2")
- manifests := []api.ContainerManifest{manifest, manifest2}
+ manifests := []v1beta1.ContainerManifest{manifest, manifest2}
pods := []api.BoundPod{expectedPod, expectedPod2}
files := make([]*os.File, len(manifests))
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http.go
index 9703aa22d409..fb30489e8675 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http.go
@@ -28,8 +28,10 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/ghodss/yaml"
@@ -83,40 +85,25 @@ func (s *sourceURL) extractFromURL() error {
s.data = data
// First try as if it's a single manifest
- var manifest api.ContainerManifest
- // TODO: should be api.Scheme.Decode
- singleErr := yaml.Unmarshal(data, &manifest)
- if singleErr == nil {
- if errs := validation.ValidateManifest(&manifest); len(errs) > 0 {
- singleErr = fmt.Errorf("invalid manifest: %v", errs)
- }
- }
- if singleErr == nil {
- pod := api.BoundPod{}
- if err := api.Scheme.Convert(&manifest, &pod); err != nil {
- return err
+ parsed, manifest, pod, singleErr := tryDecodeSingle(data)
+ if parsed {
+ if singleErr != nil {
+ // It parsed but could not be used.
+ return singleErr
}
+ // It parsed!
applyDefaults(&pod, s.url)
s.updates <- kubelet.PodUpdate{[]api.BoundPod{pod}, kubelet.SET, kubelet.HTTPSource}
return nil
}
// That didn't work, so try an array of manifests.
- var manifests []api.ContainerManifest
- // TODO: should be api.Scheme.Decode
- multiErr := yaml.Unmarshal(data, &manifests)
- // We're not sure if the person reading the logs is going to care about the single or
- // multiple manifest unmarshalling attempt, so we need to put both in the logs, as is
- // done at the end. Hence not returning early here.
- if multiErr == nil {
- for _, manifest := range manifests {
- if errs := validation.ValidateManifest(&manifest); len(errs) > 0 {
- multiErr = fmt.Errorf("invalid manifest: %v", errs)
- break
- }
+ parsed, manifests, pods, multiErr := tryDecodeList(data)
+ if parsed {
+ if multiErr != nil {
+ // It parsed but could not be used.
+ return multiErr
}
- }
- if multiErr == nil {
// A single manifest that did not pass semantic validation will yield an empty
// array of manifests (and no error) when unmarshaled as such. In that case,
// if the single manifest at least had a Version, we return the single-manifest
@@ -124,16 +111,12 @@ func (s *sourceURL) extractFromURL() error {
if len(manifests) == 0 && len(manifest.Version) != 0 {
return singleErr
}
- list := api.ContainerManifestList{Items: manifests}
- boundPods := &api.BoundPods{}
- if err := api.Scheme.Convert(&list, boundPods); err != nil {
- return err
- }
- for i := range boundPods.Items {
- pod := &boundPods.Items[i]
+ // Assume it parsed.
+ for i := range pods.Items {
+ pod := &pods.Items[i]
applyDefaults(pod, s.url)
}
- s.updates <- kubelet.PodUpdate{boundPods.Items, kubelet.SET, kubelet.HTTPSource}
+ s.updates <- kubelet.PodUpdate{pods.Items, kubelet.SET, kubelet.HTTPSource}
return nil
}
@@ -142,14 +125,75 @@ func (s *sourceURL) extractFromURL() error {
s.url, string(data), singleErr, manifest, multiErr, manifests)
}
+func tryDecodeSingle(data []byte) (parsed bool, manifest v1beta1.ContainerManifest, pod api.BoundPod, err error) {
+ // TODO: should be api.Scheme.Decode
+ // This is awful. DecodeInto() expects to find an APIObject, which
+ // Manifest is not. We keep reading manifest for now for compat, but
+ // we will eventually change it to read Pod (at which point this all
+ // becomes nicer). Until then, we assert that the ContainerManifest
+ // structure on disk is always v1beta1. Read that, convert it to a
+ // "current" ContainerManifest (should be ~identical), then convert
+ // that to a BoundPod (which is a well-understood conversion). This
+ // avoids writing a v1beta1.ContainerManifest -> api.BoundPod
+ // conversion which would be identical to the api.ContainerManifest ->
+ // api.BoundPod conversion.
+ if err = yaml.Unmarshal(data, &manifest); err != nil {
+ return false, manifest, pod, err
+ }
+ newManifest := api.ContainerManifest{}
+ if err = api.Scheme.Convert(&manifest, &newManifest); err != nil {
+ return false, manifest, pod, err
+ }
+ if errs := validation.ValidateManifest(&newManifest); len(errs) > 0 {
+ err = fmt.Errorf("invalid manifest: %v", errs)
+ return false, manifest, pod, err
+ }
+ if err = api.Scheme.Convert(&newManifest, &pod); err != nil {
+ return true, manifest, pod, err
+ }
+ // Success.
+ return true, manifest, pod, nil
+}
+
+func tryDecodeList(data []byte) (parsed bool, manifests []v1beta1.ContainerManifest, pods api.BoundPods, err error) {
+ // TODO: should be api.Scheme.Decode
+ // See the comment in tryDecodeSingle().
+ if err = yaml.Unmarshal(data, &manifests); err != nil {
+ return false, manifests, pods, err
+ }
+ newManifests := []api.ContainerManifest{}
+ if err = api.Scheme.Convert(&manifests, &newManifests); err != nil {
+ return false, manifests, pods, err
+ }
+ for i := range newManifests {
+ manifest := &newManifests[i]
+ if errs := validation.ValidateManifest(manifest); len(errs) > 0 {
+ err = fmt.Errorf("invalid manifest: %v", errs)
+ return false, manifests, pods, err
+ }
+ }
+ list := api.ContainerManifestList{Items: newManifests}
+ if err = api.Scheme.Convert(&list, &pods); err != nil {
+ return true, manifests, pods, err
+ }
+ // Success.
+ return true, manifests, pods, nil
+}
+
func applyDefaults(pod *api.BoundPod, url string) {
if len(pod.UID) == 0 {
hasher := md5.New()
fmt.Fprintf(hasher, "url:%s", url)
util.DeepHashObject(hasher, pod)
- pod.UID = hex.EncodeToString(hasher.Sum(nil)[0:])
+ pod.UID = types.UID(hex.EncodeToString(hasher.Sum(nil)[0:]))
glog.V(5).Infof("Generated UID %q for pod %q from URL %s", pod.UID, pod.Name, url)
}
+ // This is required for backward compatibility, and should be removed once we
+ // completely deprecate ContainerManifest.
+ if len(pod.Name) == 0 {
+ pod.Name = string(pod.UID)
+ glog.V(5).Infof("Generate Name %q from UID %q from URL %s", pod.Name, pod.UID, url)
+ }
if len(pod.Namespace) == 0 {
hasher := adler32.New()
fmt.Fprint(hasher, url)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http_test.go
index f0e0f08b71fe..5036ab55db8d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config/http_test.go
@@ -24,6 +24,7 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api/v1beta1"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
@@ -122,8 +123,9 @@ func TestExtractFromHTTP(t *testing.T) {
expected kubelet.PodUpdate
}{
{
- desc: "Single manifest",
- manifests: api.ContainerManifest{Version: "v1beta1", ID: "foo", UUID: "111"},
+ desc: "Single manifest",
+ manifests: v1beta1.ContainerManifest{Version: "v1beta1", ID: "foo", UUID: "111",
+ Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
expected: CreatePodUpdate(kubelet.SET,
kubelet.HTTPSource,
api.BoundPod{
@@ -135,14 +137,61 @@ func TestExtractFromHTTP(t *testing.T) {
Spec: api.PodSpec{
RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
DNSPolicy: api.DNSClusterFirst,
+ Containers: []api.Container{{
+ Name: "1",
+ Image: "foo",
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: "Always"}},
+ },
+ }),
+ },
+ {
+ desc: "Single manifest without ID",
+ manifests: api.ContainerManifest{Version: "v1beta1", UUID: "111"},
+ expected: CreatePodUpdate(kubelet.SET,
+ kubelet.HTTPSource,
+ api.BoundPod{
+ ObjectMeta: api.ObjectMeta{
+ UID: "111",
+ Name: "111",
+ Namespace: "foobar",
+ },
+ Spec: api.PodSpec{
+ RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
+ DNSPolicy: api.DNSClusterFirst,
+ },
+ }),
+ },
+ {
+ desc: "Single manifest with v1beta2",
+ manifests: v1beta1.ContainerManifest{Version: "v1beta2", ID: "foo", UUID: "111",
+ Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
+ expected: CreatePodUpdate(kubelet.SET,
+ kubelet.HTTPSource,
+ api.BoundPod{
+ ObjectMeta: api.ObjectMeta{
+ UID: "111",
+ Name: "foo",
+ Namespace: "foobar",
+ },
+ Spec: api.PodSpec{
+ RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
+ DNSPolicy: api.DNSClusterFirst,
+ Containers: []api.Container{{
+ Name: "1",
+ Image: "foo",
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: "Always"}},
},
}),
},
{
desc: "Multiple manifests",
- manifests: []api.ContainerManifest{
- {Version: "v1beta1", ID: "foo", UUID: "111", Containers: []api.Container{{Name: "1", Image: "foo"}}},
- {Version: "v1beta1", ID: "bar", UUID: "222", Containers: []api.Container{{Name: "1", Image: "foo"}}},
+ manifests: []v1beta1.ContainerManifest{
+ {Version: "v1beta1", ID: "foo", UUID: "111",
+ Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: v1beta1.PullAlways}}},
+ {Version: "v1beta1", ID: "bar", UUID: "222",
+ Containers: []v1beta1.Container{{Name: "1", Image: "foo", ImagePullPolicy: ""}}},
},
expected: CreatePodUpdate(kubelet.SET,
kubelet.HTTPSource,
@@ -153,10 +202,13 @@ func TestExtractFromHTTP(t *testing.T) {
Namespace: "foobar",
},
Spec: api.PodSpec{
+ RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
+ DNSPolicy: api.DNSClusterFirst,
Containers: []api.Container{{
Name: "1",
Image: "foo",
- TerminationMessagePath: "/dev/termination-log"}},
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: "Always"}},
},
},
api.BoundPod{
@@ -166,16 +218,19 @@ func TestExtractFromHTTP(t *testing.T) {
Namespace: "foobar",
},
Spec: api.PodSpec{
+ RestartPolicy: api.RestartPolicy{Always: &api.RestartPolicyAlways{}},
+ DNSPolicy: api.DNSClusterFirst,
Containers: []api.Container{{
Name: "1",
Image: "foo",
- TerminationMessagePath: "/dev/termination-log"}},
+ TerminationMessagePath: "/dev/termination-log",
+ ImagePullPolicy: "IfNotPresent"}},
},
}),
},
{
desc: "Empty Array",
- manifests: []api.ContainerManifest{},
+ manifests: []v1beta1.ContainerManifest{},
expected: CreatePodUpdate(kubelet.SET, kubelet.HTTPSource),
},
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker.go
index 6d348a09ad90..b893845a9c56 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker.go
@@ -31,6 +31,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
@@ -239,7 +240,11 @@ func (p dockerPuller) IsImagePresent(image string) (bool, error) {
// RequireLatestImage returns if the user wants the latest image
func RequireLatestImage(name string) bool {
- // REVERTED: Change behavior from upstream
+ _, tag := parseImageName(name)
+
+ if tag == "latest" {
+ return true
+ }
return false
}
@@ -250,7 +255,7 @@ func (p throttledDockerPuller) IsImagePresent(name string) (bool, error) {
// DockerContainers is a map of containers
type DockerContainers map[DockerID]*docker.APIContainers
-func (c DockerContainers) FindPodContainer(podFullName, uuid, containerName string) (*docker.APIContainers, bool, uint64) {
+func (c DockerContainers) FindPodContainer(podFullName string, uid types.UID, containerName string) (*docker.APIContainers, bool, uint64) {
for _, dockerContainer := range c {
if len(dockerContainer.Names) == 0 {
continue
@@ -258,7 +263,7 @@ func (c DockerContainers) FindPodContainer(podFullName, uuid, containerName stri
// TODO(proppy): build the docker container name and do a map lookup instead?
dockerManifestID, dockerUUID, dockerContainerName, hash := ParseDockerName(dockerContainer.Names[0])
if dockerManifestID == podFullName &&
- (uuid == "" || dockerUUID == uuid) &&
+ (uid == "" || dockerUUID == uid) &&
dockerContainerName == containerName {
return dockerContainer, true, hash
}
@@ -309,8 +314,8 @@ func GetKubeletDockerContainers(client DockerInterface, allContainers bool) (Doc
}
// GetRecentDockerContainersWithNameAndUUID returns a list of dead docker containers which matches the name
-// and uuid given.
-func GetRecentDockerContainersWithNameAndUUID(client DockerInterface, podFullName, uuid, containerName string) ([]*docker.Container, error) {
+// and uid given.
+func GetRecentDockerContainersWithNameAndUUID(client DockerInterface, podFullName string, uid types.UID, containerName string) ([]*docker.Container, error) {
var result []*docker.Container
containers, err := client.ListContainers(docker.ListContainersOptions{All: true})
if err != nil {
@@ -324,7 +329,7 @@ func GetRecentDockerContainersWithNameAndUUID(client DockerInterface, podFullNam
if dockerPodName != podFullName {
continue
}
- if uuid != "" && dockerUUID != uuid {
+ if uid != "" && dockerUUID != uid {
continue
}
if dockerContainerName != containerName {
@@ -443,7 +448,7 @@ func inspectContainer(client DockerInterface, dockerID, containerName, tPath str
}
// GetDockerPodInfo returns docker info for all containers in the pod/manifest.
-func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName, uuid string) (api.PodInfo, error) {
+func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName string, uid types.UID) (api.PodInfo, error) {
info := api.PodInfo{}
expectedContainers := make(map[string]api.Container)
for _, container := range manifest.Containers {
@@ -464,7 +469,7 @@ func GetDockerPodInfo(client DockerInterface, manifest api.PodSpec, podFullName,
if dockerManifestID != podFullName {
continue
}
- if uuid != "" && dockerUUID != uuid {
+ if uid != "" && dockerUUID != uid {
continue
}
c, found := expectedContainers[dockerContainerName]
@@ -541,7 +546,7 @@ func HashContainer(container *api.Container) uint64 {
}
// Creates a name which can be reversed to identify both full pod name and container name.
-func BuildDockerName(podUID, podFullName string, container *api.Container) string {
+func BuildDockerName(podUID types.UID, podFullName string, container *api.Container) string {
containerName := container.Name + "." + strconv.FormatUint(HashContainer(container), 16)
return fmt.Sprintf("%s_%s_%s_%s_%08x",
containerNamePrefix,
@@ -553,7 +558,7 @@ func BuildDockerName(podUID, podFullName string, container *api.Container) strin
// Unpacks a container name, returning the pod full name and container name we would have used to
// construct the docker name. If the docker name isn't the one we created, we may return empty strings.
-func ParseDockerName(name string) (podFullName, podUID, containerName string, hash uint64) {
+func ParseDockerName(name string) (podFullName string, podUID types.UID, containerName string, hash uint64) {
// For some reason docker appears to be appending '/' to names.
// If it's there, strip it.
if name[0] == '/' {
@@ -586,7 +591,7 @@ func ParseDockerName(name string) (podFullName, podUID, containerName string, ha
podFullName = parts[2]
// Pod UID.
- podUID = parts[3]
+ podUID = types.UID(parts[3])
return
}
@@ -617,21 +622,3 @@ func parseImageName(image string) (string, string) {
type ContainerCommandRunner interface {
RunInContainer(containerID string, cmd []string) ([]byte, error)
}
-
-func GetUnusedImages(client DockerInterface) ([]string, error) {
- // IMPORTANT: this is _unsafe_ to do while there are active pulls
- // See https://github.com/docker/docker/issues/8926 for details
- images, err := client.ListImages(docker.ListImagesOptions{
- Filters: map[string][]string{
- "dangling": {"true"},
- },
- })
- if err != nil {
- return nil, err
- }
- result := make([]string, len(images))
- for ix := range images {
- result[ix] = images[ix].ID
- }
- return result, nil
-}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker_test.go
index 5a47ec3083ca..df1cf58c5392 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools/docker_test.go
@@ -24,6 +24,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
docker "github.com/fsouza/go-dockerclient"
)
@@ -91,9 +92,9 @@ func verifyPackUnpack(t *testing.T, podNamespace, podUID, podName, containerName
util.DeepHashObject(hasher, *container)
computedHash := uint64(hasher.Sum32())
podFullName := fmt.Sprintf("%s.%s", podName, podNamespace)
- name := BuildDockerName(podUID, podFullName, container)
+ name := BuildDockerName(types.UID(podUID), podFullName, container)
returnedPodFullName, returnedUID, returnedContainerName, hash := ParseDockerName(name)
- if podFullName != returnedPodFullName || podUID != returnedUID || containerName != returnedContainerName || computedHash != hash {
+ if podFullName != returnedPodFullName || podUID != string(returnedUID) || containerName != returnedContainerName || computedHash != hash {
t.Errorf("For (%s, %s, %s, %d), unpacked (%s, %s, %s, %d)", podFullName, podUID, containerName, computedHash, returnedPodFullName, returnedUID, returnedContainerName, hash)
}
}
@@ -114,7 +115,7 @@ func TestContainerManifestNaming(t *testing.T) {
podFullName := fmt.Sprintf("%s.%s", podName, podNamespace)
returnedPodFullName, returnedPodUID, returnedContainerName, hash := ParseDockerName(name)
- if returnedPodFullName != podFullName || returnedPodUID != podUID || returnedContainerName != container.Name || hash != 0 {
+ if returnedPodFullName != podFullName || string(returnedPodUID) != podUID || returnedContainerName != container.Name || hash != 0 {
t.Errorf("unexpected parse: %s %s %s %d", returnedPodFullName, returnedPodUID, returnedContainerName, hash)
}
}
@@ -168,7 +169,7 @@ func TestDockerContainerCommand(t *testing.T) {
t.Errorf("unexpected command CWD: %s", cmd.Dir)
}
if !reflect.DeepEqual(cmd.Args, []string{"/usr/sbin/nsinit", "exec", "ls"}) {
- t.Errorf("unexpectd command args: %s", cmd.Args)
+ t.Errorf("unexpected command args: %s", cmd.Args)
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/handlers.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/handlers.go
index 2b02e5960be8..0e285660564f 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/handlers.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/handlers.go
@@ -24,6 +24,7 @@ import (
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/golang/glog"
)
@@ -32,8 +33,8 @@ type execActionHandler struct {
kubelet *Kubelet
}
-func (e *execActionHandler) Run(podFullName, uuid string, container *api.Container, handler *api.Handler) error {
- _, err := e.kubelet.RunInContainer(podFullName, uuid, container.Name, handler.Exec.Command)
+func (e *execActionHandler) Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error {
+ _, err := e.kubelet.RunInContainer(podFullName, uid, container.Name, handler.Exec.Command)
return err
}
@@ -67,20 +68,20 @@ func ResolvePort(portReference util.IntOrString, container *api.Container) (int,
return -1, fmt.Errorf("couldn't find port: %v in %v", portReference, container)
}
-func (h *httpActionHandler) Run(podFullName, uuid string, container *api.Container, handler *api.Handler) error {
+func (h *httpActionHandler) Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error {
host := handler.HTTPGet.Host
if len(host) == 0 {
- var info api.PodInfo
- info, err := h.kubelet.GetPodInfo(podFullName, uuid)
+ var status api.PodStatus
+ status, err := h.kubelet.GetPodStatus(podFullName, uid)
if err != nil {
glog.Errorf("unable to get pod info, event handlers may be invalid.")
return err
}
- netInfo, found := info[networkContainerName]
+ netInfo, found := status.Info[networkContainerName]
if found {
host = netInfo.PodIP
} else {
- return fmt.Errorf("failed to find networking container: %v", info)
+ return fmt.Errorf("failed to find networking container: %v", status)
}
}
var port int
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet.go
index a65fb55045c6..c2be5fe94b8f 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet.go
@@ -34,13 +34,17 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/validation"
"github.com/GoogleCloudPlatform/kubernetes/pkg/capabilities"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client/cache"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/health"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
)
@@ -76,7 +80,9 @@ func NewMainKubelet(
maxContainerCount int,
sourceReady SourceReadyFn,
clusterDomain string,
- clusterDNS net.IP) (*Kubelet, error) {
+ clusterDNS net.IP,
+ masterServiceNamespace string,
+ volumePlugins []volume.Plugin) (*Kubelet, error) {
if rootDirectory == "" {
return nil, fmt.Errorf("invalid root directory %q", rootDirectory)
}
@@ -86,29 +92,42 @@ func NewMainKubelet(
if minimumGCAge <= 0 {
return nil, fmt.Errorf("invalid minimum GC age %d", minimumGCAge)
}
+
+ serviceStore := cache.NewStore()
+ if kubeClient != nil {
+ cache.NewReflector(&cache.ListWatch{kubeClient, labels.Everything(), "services", api.NamespaceAll}, &api.Service{}, serviceStore).Run()
+ }
+ serviceLister := &cache.StoreToServiceLister{serviceStore}
+
klet := &Kubelet{
- hostname: hostname,
- dockerClient: dockerClient,
- etcdClient: etcdClient,
- rootDirectory: rootDirectory,
- resyncInterval: resyncInterval,
- networkContainerImage: networkContainerImage,
- podWorkers: newPodWorkers(),
- dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{},
- runner: dockertools.NewDockerContainerCommandRunner(dockerClient),
- httpClient: &http.Client{},
- pullQPS: pullQPS,
- pullBurst: pullBurst,
- minimumGCAge: minimumGCAge,
- maxContainerCount: maxContainerCount,
- sourceReady: sourceReady,
- clusterDomain: clusterDomain,
- clusterDNS: clusterDNS,
+ hostname: hostname,
+ dockerClient: dockerClient,
+ etcdClient: etcdClient,
+ kubeClient: kubeClient,
+ rootDirectory: rootDirectory,
+ resyncInterval: resyncInterval,
+ networkContainerImage: networkContainerImage,
+ podWorkers: newPodWorkers(),
+ dockerIDToRef: map[dockertools.DockerID]*api.ObjectReference{},
+ runner: dockertools.NewDockerContainerCommandRunner(dockerClient),
+ httpClient: &http.Client{},
+ pullQPS: pullQPS,
+ pullBurst: pullBurst,
+ minimumGCAge: minimumGCAge,
+ maxContainerCount: maxContainerCount,
+ sourceReady: sourceReady,
+ clusterDomain: clusterDomain,
+ clusterDNS: clusterDNS,
+ serviceLister: serviceLister,
+ masterServiceNamespace: masterServiceNamespace,
}
if err := klet.setupDataDirs(); err != nil {
return nil, err
}
+ if err := klet.volumePluginMgr.InitPlugins(volumePlugins, &volumeHost{klet}); err != nil {
+ return nil, err
+ }
return klet, nil
}
@@ -117,10 +136,15 @@ type httpGetter interface {
Get(url string) (*http.Response, error)
}
+type serviceLister interface {
+ List() (api.ServiceList, error)
+}
+
// Kubelet is the main kubelet implementation.
type Kubelet struct {
hostname string
dockerClient dockertools.DockerInterface
+ kubeClient *client.Client
rootDirectory string
networkContainerImage string
podWorkers *podWorkers
@@ -168,34 +192,55 @@ type Kubelet struct {
// If non-nil, use this for container DNS server.
clusterDNS net.IP
+
+ masterServiceNamespace string
+ serviceLister serviceLister
+
+ // Volume plugins.
+ volumePluginMgr volume.PluginMgr
}
-// GetRootDir returns the full path to the directory under which kubelet can
+// getRootDir returns the full path to the directory under which kubelet can
// store data. These functions are useful to pass interfaces to other modules
// that may need to know where to write data without getting a whole kubelet
// instance.
-func (kl *Kubelet) GetRootDir() string {
+func (kl *Kubelet) getRootDir() string {
return kl.rootDirectory
}
-// GetPodsDir returns the full path to the directory under which pod
+// getPodsDir returns the full path to the directory under which pod
// directories are created.
-func (kl *Kubelet) GetPodsDir() string {
- return path.Join(kl.GetRootDir(), "pods")
+func (kl *Kubelet) getPodsDir() string {
+ return path.Join(kl.getRootDir(), "pods")
+}
+
+// getPluginsDir returns the full path to the directory under which plugin
+// directories are created. Plugins can use these directories for data that
+// they need to persist. Plugins should create subdirectories under this named
+// after their own names.
+func (kl *Kubelet) getPluginsDir() string {
+ return path.Join(kl.getRootDir(), "plugins")
+}
+
+// getPluginDir returns a data directory name for a given plugin name.
+// Plugins can use these directories to store data that they need to persist.
+// For per-pod plugin data, see getPodPluginDir.
+func (kl *Kubelet) getPluginDir(pluginName string) string {
+ return path.Join(kl.getPluginsDir(), pluginName)
}
-// GetPodDir returns the full path to the per-pod data directory for the
+// getPodDir returns the full path to the per-pod data directory for the
// specified pod. This directory may not exist if the pod does not exist.
-func (kl *Kubelet) GetPodDir(podUID string) string {
+func (kl *Kubelet) getPodDir(podUID types.UID) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
- oldPath := path.Join(kl.GetRootDir(), podUID)
+ oldPath := path.Join(kl.getRootDir(), string(podUID))
oldExists := dirExists(oldPath)
- newPath := path.Join(kl.GetPodsDir(), podUID)
+ newPath := path.Join(kl.getPodsDir(), string(podUID))
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
@@ -206,26 +251,47 @@ func (kl *Kubelet) GetPodDir(podUID string) string {
return newPath
}
-// GetPodVolumesDir returns the full path to the per-pod data directory under
+// getPodVolumesDir returns the full path to the per-pod data directory under
// which volumes are created for the specified pod. This directory may not
// exist if the pod does not exist.
-func (kl *Kubelet) GetPodVolumesDir(podUID string) string {
- return path.Join(kl.GetPodDir(podUID), "volumes")
+func (kl *Kubelet) getPodVolumesDir(podUID types.UID) string {
+ return path.Join(kl.getPodDir(podUID), "volumes")
+}
+
+// getPodVolumeDir returns the full path to the directory which represents the
+// named volume under the named plugin for specified pod. This directory may not
+// exist if the pod does not exist.
+func (kl *Kubelet) getPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
+ return path.Join(kl.getPodVolumesDir(podUID), pluginName, volumeName)
+}
+
+// getPodPluginsDir returns the full path to the per-pod data directory under
+// which plugins may store data for the specified pod. This directory may not
+// exist if the pod does not exist.
+func (kl *Kubelet) getPodPluginsDir(podUID types.UID) string {
+ return path.Join(kl.getPodDir(podUID), "plugins")
+}
+
+// getPodPluginDir returns a data directory name for a given plugin name for a
+// given pod UID. Plugins can use these directories to store data that they
+// need to persist. For non-per-pod plugin data, see getPluginDir.
+func (kl *Kubelet) getPodPluginDir(podUID types.UID, pluginName string) string {
+ return path.Join(kl.getPodPluginsDir(podUID), pluginName)
}
-// GetPodContainerDir returns the full path to the per-pod data directory under
+// getPodContainerDir returns the full path to the per-pod data directory under
// which container data is held for the specified pod. This directory may not
// exist if the pod or container does not exist.
-func (kl *Kubelet) GetPodContainerDir(podUID, ctrName string) string {
+func (kl *Kubelet) getPodContainerDir(podUID types.UID, ctrName string) string {
// Backwards compat. The "old" stuff should be removed before 1.0
// release. The thinking here is this:
// !old && !new = use new
// !old && new = use new
// old && !new = use old
// old && new = use new (but warn)
- oldPath := path.Join(kl.GetPodDir(podUID), ctrName)
+ oldPath := path.Join(kl.getPodDir(podUID), ctrName)
oldExists := dirExists(oldPath)
- newPath := path.Join(kl.GetPodDir(podUID), "containers", ctrName)
+ newPath := path.Join(kl.getPodDir(podUID), "containers", ctrName)
newExists := dirExists(newPath)
if oldExists && !newExists {
return oldPath
@@ -246,25 +312,28 @@ func dirExists(path string) bool {
func (kl *Kubelet) setupDataDirs() error {
kl.rootDirectory = path.Clean(kl.rootDirectory)
- if err := os.MkdirAll(kl.GetRootDir(), 0750); err != nil {
+ if err := os.MkdirAll(kl.getRootDir(), 0750); err != nil {
return fmt.Errorf("error creating root directory: %v", err)
}
- if err := os.MkdirAll(kl.GetPodsDir(), 0750); err != nil {
+ if err := os.MkdirAll(kl.getPodsDir(), 0750); err != nil {
return fmt.Errorf("error creating pods directory: %v", err)
}
+ if err := os.MkdirAll(kl.getPluginsDir(), 0750); err != nil {
+ return fmt.Errorf("error creating plugins directory: %v", err)
+ }
return nil
}
// Get a list of pods that have data directories.
-func (kl *Kubelet) listPodsFromDisk() ([]string, error) {
- podInfos, err := ioutil.ReadDir(kl.GetPodsDir())
+func (kl *Kubelet) listPodsFromDisk() ([]types.UID, error) {
+ podInfos, err := ioutil.ReadDir(kl.getPodsDir())
if err != nil {
return nil, err
}
- pods := []string{}
+ pods := []types.UID{}
for i := range podInfos {
if podInfos[i].IsDir() {
- pods = append(pods, podInfos[i].Name())
+ pods = append(pods, types.UID(podInfos[i].Name()))
}
}
return pods, nil
@@ -307,31 +376,9 @@ func (kl *Kubelet) GarbageCollectLoop() {
if err := kl.GarbageCollectContainers(); err != nil {
glog.Errorf("Garbage collect failed: %v", err)
}
- if err := kl.GarbageCollectImages(); err != nil {
- glog.Errorf("Garbage collect images failed: %v", err)
- }
}, time.Minute*1)
}
-func (kl *Kubelet) getUnusedImages() ([]string, error) {
- kl.pullLock.Lock()
- defer kl.pullLock.Unlock()
- return dockertools.GetUnusedImages(kl.dockerClient)
-}
-
-func (kl *Kubelet) GarbageCollectImages() error {
- images, err := kl.getUnusedImages()
- if err != nil {
- return err
- }
- for ix := range images {
- if err := kl.dockerClient.RemoveImage(images[ix]); err != nil {
- glog.Errorf("Failed to remove image: %q (%v)", images[ix], err)
- }
- }
- return nil
-}
-
// TODO: Also enforce a maximum total number of containers.
func (kl *Kubelet) GarbageCollectContainers() error {
if kl.maxContainerCount == 0 {
@@ -341,13 +388,13 @@ func (kl *Kubelet) GarbageCollectContainers() error {
if err != nil {
return err
}
- uuidToIDMap := map[string][]string{}
+ uidToIDMap := map[string][]string{}
for _, container := range containers {
- _, uuid, name, _ := dockertools.ParseDockerName(container.ID)
- uuidName := uuid + "." + name
- uuidToIDMap[uuidName] = append(uuidToIDMap[uuidName], container.ID)
+ _, uid, name, _ := dockertools.ParseDockerName(container.ID)
+ uidName := string(uid) + "." + name
+ uidToIDMap[uidName] = append(uidToIDMap[uidName], container.ID)
}
- for _, list := range uuidToIDMap {
+ for _, list := range uidToIDMap {
if len(list) <= kl.maxContainerCount {
continue
}
@@ -423,14 +470,6 @@ func (self *podWorkers) Run(podFullName string, action func()) {
}()
}
-func makeEnvironmentVariables(container *api.Container) []string {
- var result []string
- for _, value := range container.Env {
- result = append(result, fmt.Sprintf("%s=%s", value.Name, value.Value))
- }
- return result
-}
-
func makeBinds(pod *api.BoundPod, container *api.Container, podVolumes volumeMap) []string {
binds := []string{}
for _, mount := range container.VolumeMounts {
@@ -493,30 +532,9 @@ func milliCPUToShares(milliCPU int64) int64 {
return shares
}
-func (kl *Kubelet) mountExternalVolumes(pod *api.BoundPod) (volumeMap, error) {
- podVolumes := make(volumeMap)
- for _, vol := range pod.Spec.Volumes {
- extVolume, err := volume.CreateVolumeBuilder(&vol, pod.Name, kl.rootDirectory)
- if err != nil {
- return nil, err
- }
- // TODO(jonesdl) When the default volume behavior is no longer supported, this case
- // should never occur and an error should be thrown instead.
- if extVolume == nil {
- continue
- }
- podVolumes[vol.Name] = extVolume
- err = extVolume.SetUp()
- if err != nil {
- return nil, err
- }
- }
- return podVolumes, nil
-}
-
// A basic interface that knows how to execute handlers
type actionHandler interface {
- Run(podFullName, uuid string, container *api.Container, handler *api.Handler) error
+ Run(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error
}
func (kl *Kubelet) newActionHandler(handler *api.Handler) actionHandler {
@@ -531,12 +549,12 @@ func (kl *Kubelet) newActionHandler(handler *api.Handler) actionHandler {
}
}
-func (kl *Kubelet) runHandler(podFullName, uuid string, container *api.Container, handler *api.Handler) error {
+func (kl *Kubelet) runHandler(podFullName string, uid types.UID, container *api.Container, handler *api.Handler) error {
actionHandler := kl.newActionHandler(handler)
if actionHandler == nil {
return fmt.Errorf("invalid handler")
}
- return actionHandler.Run(podFullName, uuid, container, handler)
+ return actionHandler.Run(podFullName, uid, container, handler)
}
// fieldPath returns a fieldPath locating container within pod.
@@ -607,7 +625,10 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
}
- envVariables := makeEnvironmentVariables(container)
+ envVariables, err := kl.makeEnvironmentVariables(pod.Namespace, container)
+ if err != nil {
+ return "", err
+ }
binds := makeBinds(pod, container, podVolumes)
exposedPorts, portBindings := makePortsAndBindings(container)
@@ -627,7 +648,7 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
dockerContainer, err := kl.dockerClient.CreateContainer(opts)
if err != nil {
if ref != nil {
- record.Eventf(ref, "failed", "failed",
+ record.Eventf(ref, "failed",
"Failed to create docker container with error: %v", err)
}
return "", err
@@ -635,11 +656,11 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
// Remember this reference so we can report events about this container
if ref != nil {
kl.setRef(dockertools.DockerID(dockerContainer.ID), ref)
- record.Eventf(ref, "waiting", "created", "Created with docker id %v", dockerContainer.ID)
+ record.Eventf(ref, "created", "Created with docker id %v", dockerContainer.ID)
}
if len(container.TerminationMessagePath) != 0 {
- p := kl.GetPodContainerDir(pod.UID, container.Name)
+ p := kl.getPodContainerDir(pod.UID, container.Name)
if err := os.MkdirAll(p, 0750); err != nil {
glog.Errorf("Error on creating %q: %v", p, err)
} else {
@@ -673,13 +694,13 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
err = kl.dockerClient.StartContainer(dockerContainer.ID, hc)
if err != nil {
if ref != nil {
- record.Eventf(ref, "failed", "failed",
+ record.Eventf(ref, "failed",
"Failed to start with docker id %v with error: %v", dockerContainer.ID, err)
}
return "", err
}
if ref != nil {
- record.Eventf(ref, "running", "started", "Started with docker id %v", dockerContainer.ID)
+ record.Eventf(ref, "started", "Started with docker id %v", dockerContainer.ID)
}
if container.Lifecycle != nil && container.Lifecycle.PostStart != nil {
@@ -692,6 +713,91 @@ func (kl *Kubelet) runContainer(pod *api.BoundPod, container *api.Container, pod
return dockertools.DockerID(dockerContainer.ID), err
}
+var masterServices = util.NewStringSet("kubernetes", "kubernetes-ro")
+
+// getServiceEnvVarMap makes a map[string]string of env vars for services a pod in namespace ns should see
+func (kl *Kubelet) getServiceEnvVarMap(ns string) (map[string]string, error) {
+ var (
+ serviceMap = make(map[string]api.Service)
+ m = make(map[string]string)
+ )
+
+ // Get all service resources from the master (via a cache),
+ // and populate them into service enviroment variables.
+ if kl.serviceLister == nil {
+ // Kubelets without masters (e.g. plain GCE ContainerVM) don't set env vars.
+ return m, nil
+ }
+ services, err := kl.serviceLister.List()
+ if err != nil {
+ return m, fmt.Errorf("Failed to list services when setting up env vars.")
+ }
+
+ // project the services in namespace ns onto the master services
+ for _, service := range services.Items {
+ serviceName := service.Name
+
+ switch service.Namespace {
+ // for the case whether the master service namespace is the namespace the pod
+ // is in, pod should receive all the pods in the namespace.
+ //
+ // ordering of the case clauses below enforces this
+ case ns:
+ serviceMap[serviceName] = service
+ case kl.masterServiceNamespace:
+ if masterServices.Has(serviceName) {
+ _, exists := serviceMap[serviceName]
+ if !exists {
+ serviceMap[serviceName] = service
+ }
+ }
+ }
+ }
+ services.Items = []api.Service{}
+ for _, service := range serviceMap {
+ services.Items = append(services.Items, service)
+ }
+
+ for _, e := range envvars.FromServices(&services) {
+ m[e.Name] = e.Value
+ }
+ return m, nil
+}
+
+// Make the service environment variables for a pod in the given namespace.
+func (kl *Kubelet) makeEnvironmentVariables(ns string, container *api.Container) ([]string, error) {
+ var result []string
+ // Note: These are added to the docker.Config, but are not included in the checksum computed
+ // by dockertools.BuildDockerName(...). That way, we can still determine whether an
+ // api.Container is already running by its hash. (We don't want to restart a container just
+ // because some service changed.)
+ //
+ // Note that there is a race between Kubelet seeing the pod and kubelet seeing the service.
+ // To avoid this users can: (1) wait between starting a service and starting; or (2) detect
+ // missing service env var and exit and be restarted; or (3) use DNS instead of env vars
+ // and keep trying to resolve the DNS name of the service (recommended).
+ serviceEnv, err := kl.getServiceEnvVarMap(ns)
+ if err != nil {
+ return result, err
+ }
+
+ for _, value := range container.Env {
+ // The code is in transition from using etcd+BoundPods to apiserver+Pods.
+ // So, the master may set service env vars, or kubelet may. In case both are doing
+ // it, we delete the key from the kubelet-generated ones so we don't have duplicate
+ // env vars.
+ // TODO: remove this net line once all platforms use apiserver+Pods.
+ delete(serviceEnv, value.Name)
+ result = append(result, fmt.Sprintf("%s=%s", value.Name, value.Value))
+ }
+
+ // Append remaining service env vars.
+ for k, v := range serviceEnv {
+ result = append(result, fmt.Sprintf("%s=%s", k, v))
+ }
+ return result, nil
+}
+
func (kl *Kubelet) applyClusterDNS(hc *docker.HostConfig, pod *api.BoundPod) error {
// Get host DNS settings and append them to cluster DNS settings.
f, err := os.Open("/etc/resolv.conf")
@@ -765,7 +871,7 @@ func (kl *Kubelet) killContainerByID(ID, name string) error {
glog.Warningf("No ref for pod '%v' - '%v'", ID, name)
} else {
// TODO: pass reason down here, and state, or move this call up the stack.
- record.Eventf(ref, "terminated", "killing", "Killing %v - %v", ID, name)
+ record.Eventf(ref, "killing", "Killing %v - %v", ID, name)
}
return err
@@ -797,7 +903,7 @@ func (kl *Kubelet) createNetworkContainer(pod *api.BoundPod) (dockertools.Docker
ok, err := kl.dockerPuller.IsImagePresent(container.Image)
if err != nil {
if ref != nil {
- record.Eventf(ref, "failed", "failed", "Failed to inspect image %q", container.Image)
+ record.Eventf(ref, "failed", "Failed to inspect image %q", container.Image)
}
return "", err
}
@@ -807,7 +913,7 @@ func (kl *Kubelet) createNetworkContainer(pod *api.BoundPod) (dockertools.Docker
}
}
if ref != nil {
- record.Eventf(ref, "waiting", "pulled", "Successfully pulled image %q", container.Image)
+ record.Eventf(ref, "pulled", "Successfully pulled image %q", container.Image)
}
return kl.runContainer(pod, container, nil, "")
}
@@ -817,12 +923,12 @@ func (kl *Kubelet) pullImage(img string, ref *api.ObjectReference) error {
defer kl.pullLock.RUnlock()
if err := kl.dockerPuller.Pull(img); err != nil {
if ref != nil {
- record.Eventf(ref, "failed", "failed", "Failed to pull image %q", img)
+ record.Eventf(ref, "failed", "Failed to pull image %q", img)
}
return err
}
if ref != nil {
- record.Eventf(ref, "waiting", "pulled", "Successfully pulled image %q", img)
+ record.Eventf(ref, "pulled", "Successfully pulled image %q", img)
}
return nil
}
@@ -865,22 +971,25 @@ type empty struct{}
func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.DockerContainers) error {
podFullName := GetPodFullName(pod)
- uuid := pod.UID
+ uid := pod.UID
containersToKeep := make(map[dockertools.DockerID]empty)
killedContainers := make(map[dockertools.DockerID]empty)
- glog.V(4).Infof("Syncing Pod, podFullName: %q, uuid: %q", podFullName, uuid)
+ glog.V(4).Infof("Syncing Pod, podFullName: %q, uid: %q", podFullName, uid)
// Make data dirs.
- if err := os.Mkdir(kl.GetPodDir(uuid), 0750); err != nil && !os.IsExist(err) {
+ if err := os.Mkdir(kl.getPodDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
- if err := os.Mkdir(kl.GetPodVolumesDir(uuid), 0750); err != nil && !os.IsExist(err) {
+ if err := os.Mkdir(kl.getPodVolumesDir(uid), 0750); err != nil && !os.IsExist(err) {
+ return err
+ }
+ if err := os.Mkdir(kl.getPodPluginsDir(uid), 0750); err != nil && !os.IsExist(err) {
return err
}
// Make sure we have a network container
var netID dockertools.DockerID
- if netDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found {
+ if netDockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, networkContainerName); found {
netID = dockertools.DockerID(netDockerContainer.ID)
} else {
glog.V(2).Infof("Network container doesn't exist for pod %q, killing and re-creating the pod", podFullName)
@@ -910,26 +1019,26 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
return err
}
- podStatus := api.PodStatus{}
- info, err := kl.GetPodInfo(podFullName, uuid)
+ podStatus, err := kl.GetPodStatus(podFullName, uid)
if err != nil {
- glog.Errorf("Unable to get pod with name %q and uuid %q info, health checks may be invalid", podFullName, uuid)
+ glog.Errorf("Unable to get pod with name %q and uid %q info, health checks may be invalid", podFullName, uid)
}
- netInfo, found := info[networkContainerName]
+ netInfo, found := podStatus.Info[networkContainerName]
if found {
podStatus.PodIP = netInfo.PodIP
}
for _, container := range pod.Spec.Containers {
expectedHash := dockertools.HashContainer(&container)
- if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uuid, container.Name); found {
+ dockerContainerName := dockertools.BuildDockerName(uid, podFullName, &container)
+ if dockerContainer, found, hash := dockerContainers.FindPodContainer(podFullName, uid, container.Name); found {
containerID := dockertools.DockerID(dockerContainer.ID)
glog.V(3).Infof("pod %q container %q exists as %v", podFullName, container.Name, containerID)
// look for changes in the container.
if hash == 0 || hash == expectedHash {
// TODO: This should probably be separated out into a separate goroutine.
- healthy, err := kl.healthy(podFullName, uuid, podStatus, container, dockerContainer)
+ healthy, err := kl.healthy(podFullName, uid, podStatus, container, dockerContainer)
if err != nil {
glog.V(1).Infof("health check errored: %v", err)
containersToKeep[containerID] = empty{}
@@ -950,7 +1059,7 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
killedContainers[containerID] = empty{}
// Also kill associated network container
- if netContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, networkContainerName); found {
+ if netContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, networkContainerName); found {
if err := kl.killContainer(netContainer); err != nil {
glog.V(1).Infof("Failed to kill network container %q: %v", netContainer.ID, err)
continue
@@ -959,45 +1068,44 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
}
// Check RestartPolicy for container
- recentContainers, err := dockertools.GetRecentDockerContainersWithNameAndUUID(kl.dockerClient, podFullName, uuid, container.Name)
+ recentContainers, err := dockertools.GetRecentDockerContainersWithNameAndUUID(kl.dockerClient, podFullName, uid, container.Name)
if err != nil {
- glog.Errorf("Error listing recent containers with name and uuid:%s--%s--%s", podFullName, uuid, container.Name)
+ glog.Errorf("Error listing recent containers:%s", dockerContainerName)
// TODO(dawnchen): error handling here?
}
if len(recentContainers) > 0 && pod.Spec.RestartPolicy.Always == nil {
if pod.Spec.RestartPolicy.Never != nil {
- glog.V(3).Infof("Already ran container with name %s--%s--%s, do nothing",
- podFullName, uuid, container.Name)
+ glog.V(3).Infof("Already ran container with name %s, do nothing",
+ dockerContainerName)
continue
}
if pod.Spec.RestartPolicy.OnFailure != nil {
// Check the exit code of last run
if recentContainers[0].State.ExitCode == 0 {
- glog.V(3).Infof("Already successfully ran container with name %s--%s--%s, do nothing",
- podFullName, uuid, container.Name)
+ glog.V(3).Infof("Already successfully ran container with name %s, do nothing",
+ dockerContainerName)
continue
}
}
}
- glog.V(3).Infof("Container with name %s--%s--%s doesn't exist, creating %#v", podFullName, uuid, container.Name, container)
+ glog.V(3).Infof("Container with name %s doesn't exist, creating %#v", dockerContainerName)
ref, err := containerRef(pod, &container)
if err != nil {
glog.Errorf("Couldn't make a ref to pod %v, container %v: '%v'", pod.Name, container.Name, err)
}
- if !api.IsPullNever(container.ImagePullPolicy) {
+ if container.ImagePullPolicy != api.PullNever {
present, err := kl.dockerPuller.IsImagePresent(container.Image)
- latest := dockertools.RequireLatestImage(container.Image)
if err != nil {
if ref != nil {
- record.Eventf(ref, "failed", "failed", "Failed to inspect image %q", container.Image)
+ record.Eventf(ref, "failed", "Failed to inspect image %q", container.Image)
}
glog.Errorf("Failed to inspect image %q: %v; skipping pod %q container %q", container.Image, err, podFullName, container.Name)
continue
}
- if api.IsPullAlways(container.ImagePullPolicy) ||
- (api.IsPullIfNotPresent(container.ImagePullPolicy) && (!present || latest)) {
+ if container.ImagePullPolicy == api.PullAlways ||
+ (container.ImagePullPolicy == api.PullIfNotPresent && (!present)) {
if err := kl.pullImage(container.Image, ref); err != nil {
continue
}
@@ -1016,7 +1124,7 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
// Kill any containers in this pod which were not identified above (guards against duplicates).
for id, container := range dockerContainers {
curPodFullName, curUUID, _, _ := dockertools.ParseDockerName(container.Names[0])
- if curPodFullName == podFullName && curUUID == uuid {
+ if curPodFullName == podFullName && curUUID == uid {
// Don't kill containers we want to keep or those we already killed.
_, keep := containersToKeep[id]
_, killed := killedContainers[id]
@@ -1035,7 +1143,7 @@ func (kl *Kubelet) syncPod(pod *api.BoundPod, dockerContainers dockertools.Docke
type podContainer struct {
podFullName string
- uuid string
+ uid types.UID
containerName string
}
@@ -1045,7 +1153,7 @@ func getDesiredVolumes(pods []api.BoundPod) map[string]api.Volume {
desiredVolumes := make(map[string]api.Volume)
for _, pod := range pods {
for _, volume := range pod.Spec.Volumes {
- identifier := path.Join(pod.Name, volume.Name)
+ identifier := path.Join(string(pod.UID), volume.Name)
desiredVolumes[identifier] = volume
}
}
@@ -1055,7 +1163,7 @@ func getDesiredVolumes(pods []api.BoundPod) map[string]api.Volume {
func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
desired := util.NewStringSet()
for i := range pods {
- desired.Insert(pods[i].UID)
+ desired.Insert(string(pods[i].UID))
}
found, err := kl.listPodsFromDisk()
if err != nil {
@@ -1063,9 +1171,9 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
}
errlist := []error{}
for i := range found {
- if !desired.Has(found[i]) {
+ if !desired.Has(string(found[i])) {
glog.V(3).Infof("Orphaned pod %q found, removing", found[i])
- if err := os.RemoveAll(kl.GetPodDir(found[i])); err != nil {
+ if err := os.RemoveAll(kl.getPodDir(found[i])); err != nil {
errlist = append(errlist, err)
}
}
@@ -1077,7 +1185,7 @@ func (kl *Kubelet) cleanupOrphanedPods(pods []api.BoundPod) error {
// If an active volume does not have a respective desired volume, clean it up.
func (kl *Kubelet) cleanupOrphanedVolumes(pods []api.BoundPod) error {
desiredVolumes := getDesiredVolumes(pods)
- currentVolumes := volume.GetCurrentVolumes(kl.rootDirectory)
+ currentVolumes := kl.getPodVolumesFromDisk()
for name, vol := range currentVolumes {
if _, ok := desiredVolumes[name]; !ok {
//TODO (jonesdl) We should somehow differentiate between volumes that are supposed
@@ -1098,7 +1206,7 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
glog.V(4).Infof("Desired: %#v", pods)
var err error
desiredContainers := make(map[podContainer]empty)
- desiredPods := make(map[string]empty)
+ desiredPods := make(map[types.UID]empty)
dockerContainers, err := dockertools.GetKubeletDockerContainers(kl.dockerClient, false)
if err != nil {
@@ -1110,28 +1218,28 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
for ix := range pods {
pod := &pods[ix]
podFullName := GetPodFullName(pod)
- uuid := pod.UID
- desiredPods[uuid] = empty{}
+ uid := pod.UID
+ desiredPods[uid] = empty{}
// Add all containers (including net) to the map.
- desiredContainers[podContainer{podFullName, uuid, networkContainerName}] = empty{}
+ desiredContainers[podContainer{podFullName, uid, networkContainerName}] = empty{}
for _, cont := range pod.Spec.Containers {
- desiredContainers[podContainer{podFullName, uuid, cont.Name}] = empty{}
+ desiredContainers[podContainer{podFullName, uid, cont.Name}] = empty{}
}
// Run the sync in an async manifest worker.
kl.podWorkers.Run(podFullName, func() {
if err := kl.syncPod(pod, dockerContainers); err != nil {
glog.Errorf("Error syncing pod, skipping: %v", err)
- record.Eventf(pod, "", "failedSync", "Error syncing pod, skipping: %v", err)
+ record.Eventf(pod, "failedSync", "Error syncing pod, skipping: %v", err)
}
})
}
// Kill any containers we don't need.
for _, container := range dockerContainers {
// Don't kill containers that are in the desired pods.
- podFullName, uuid, containerName, _ := dockertools.ParseDockerName(container.Names[0])
- if _, found := desiredPods[uuid]; found {
+ podFullName, uid, containerName, _ := dockertools.ParseDockerName(container.Names[0])
+ if _, found := desiredPods[uid]; found {
// syncPod() will handle this one.
continue
}
@@ -1142,7 +1250,7 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
glog.V(4).Infof("Skipping delete of container (%q), source (%s) aren't ready yet.", podFullName, source)
continue
}
- pc := podContainer{podFullName, uuid, containerName}
+ pc := podContainer{podFullName, uid, containerName}
if _, ok := desiredContainers[pc]; !ok {
glog.V(1).Infof("Killing unwanted container %+v", pc)
err = kl.killContainer(container)
@@ -1169,7 +1277,7 @@ func (kl *Kubelet) SyncPods(pods []api.BoundPod) error {
func updateBoundPods(changed []api.BoundPod, current []api.BoundPod) []api.BoundPod {
updated := []api.BoundPod{}
- m := map[string]*api.BoundPod{}
+ m := map[types.UID]*api.BoundPod{}
for i := range changed {
pod := &changed[i]
m[pod.UID] = pod
@@ -1240,11 +1348,11 @@ func (kl *Kubelet) syncLoop(updates <-chan PodUpdate, handler SyncHandler) {
}
// GetKubeletContainerLogs returns logs from the container
-// The second parameter of GetPodInfo and FindPodContainer methods represents pod UUID, which is allowed to be blank
+// The second parameter of GetPodStatus and FindPodContainer methods represents pod UUID, which is allowed to be blank
// TODO: this method is returning logs of random container attempts, when it should be returning the most recent attempt
// or all of them.
func (kl *Kubelet) GetKubeletContainerLogs(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error {
- _, err := kl.GetPodInfo(podFullName, "")
+ _, err := kl.GetPodStatus(podFullName, "")
if err == dockertools.ErrNoContainersInPod {
return fmt.Errorf("pod not found (%q)\n", podFullName)
}
@@ -1276,8 +1384,8 @@ func (kl *Kubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool) {
return nil, false
}
-// GetPodInfo returns information from Docker about the containers in a pod
-func (kl *Kubelet) GetPodInfo(podFullName, uuid string) (api.PodInfo, error) {
+// GetPodStatus returns information from Docker about the containers in a pod
+func (kl *Kubelet) GetPodStatus(podFullName string, uid types.UID) (api.PodStatus, error) {
var manifest api.PodSpec
for _, pod := range kl.pods {
if GetPodFullName(&pod) == podFullName {
@@ -1285,10 +1393,17 @@ func (kl *Kubelet) GetPodInfo(podFullName, uuid string) (api.PodInfo, error) {
break
}
}
- return dockertools.GetDockerPodInfo(kl.dockerClient, manifest, podFullName, uuid)
+
+ info, err := dockertools.GetDockerPodInfo(kl.dockerClient, manifest, podFullName, uid)
+
+ // TODO(dchen1107): Determine PodPhase here
+ var podStatus api.PodStatus
+ podStatus.Info = info
+
+ return podStatus, err
}
-func (kl *Kubelet) healthy(podFullName, podUUID string, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (health.Status, error) {
+func (kl *Kubelet) healthy(podFullName string, podUID types.UID, status api.PodStatus, container api.Container, dockerContainer *docker.APIContainers) (health.Status, error) {
// Give the container 60 seconds to start up.
if container.LivenessProbe == nil {
return health.Healthy, nil
@@ -1299,7 +1414,7 @@ func (kl *Kubelet) healthy(podFullName, podUUID string, status api.PodStatus, co
if kl.healthChecker == nil {
return health.Healthy, nil
}
- return kl.healthChecker.HealthCheck(podFullName, podUUID, status, container)
+ return kl.healthChecker.HealthCheck(podFullName, podUID, status, container)
}
// Returns logs of current machine.
@@ -1309,7 +1424,7 @@ func (kl *Kubelet) ServeLogs(w http.ResponseWriter, req *http.Request) {
}
// Run a command in a container, returns the combined stdout, stderr as an array of bytes
-func (kl *Kubelet) RunInContainer(podFullName, uuid, container string, cmd []string) ([]byte, error) {
+func (kl *Kubelet) RunInContainer(podFullName string, uid types.UID, container string, cmd []string) ([]byte, error) {
if kl.runner == nil {
return nil, fmt.Errorf("no runner specified.")
}
@@ -1317,7 +1432,7 @@ func (kl *Kubelet) RunInContainer(podFullName, uuid, container string, cmd []str
if err != nil {
return nil, err
}
- dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uuid, container)
+ dockerContainer, found, _ := dockerContainers.FindPodContainer(podFullName, uid, container)
if !found {
return nil, fmt.Errorf("container not found (%q)", container)
}
@@ -1332,8 +1447,8 @@ func (kl *Kubelet) BirthCry() {
ref := &api.ObjectReference{
Kind: "Minion",
Name: kl.hostname,
- UID: kl.hostname,
+ UID: types.UID(kl.hostname),
Namespace: api.NamespaceDefault,
}
- record.Eventf(ref, "", "starting", "Starting kubelet.")
+ record.Eventf(ref, "starting", "Starting kubelet.")
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet_test.go
index e7babf7fd3a2..3c1811359896 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/kubelet_test.go
@@ -18,6 +18,7 @@ package kubelet
import (
"fmt"
+ "io/ioutil"
"net/http"
"os"
"path"
@@ -32,9 +33,10 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/health"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/dockertools"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ _ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/volume"
"github.com/fsouza/go-dockerclient"
"github.com/google/cadvisor/info"
"github.com/stretchr/testify/mock"
@@ -45,8 +47,7 @@ func init() {
util.ReallyCrash = true
}
-func newTestKubelet(t *testing.T) (*Kubelet, *tools.FakeEtcdClient, *dockertools.FakeDockerClient) {
- fakeEtcdClient := tools.NewFakeEtcdClient(t)
+func newTestKubelet(t *testing.T) (*Kubelet, *dockertools.FakeDockerClient) {
fakeDocker := &dockertools.FakeDockerClient{
RemovedImages: util.StringSet{},
}
@@ -54,11 +55,23 @@ func newTestKubelet(t *testing.T) (*Kubelet, *tools.FakeEtcdClient, *dockertools
kubelet := &Kubelet{}
kubelet.dockerClient = fakeDocker
kubelet.dockerPuller = &dockertools.FakeDockerPuller{}
- kubelet.etcdClient = fakeEtcdClient
- kubelet.rootDirectory = "/tmp/kubelet"
+ if tempDir, err := ioutil.TempDir("/tmp", "kubelet_test."); err != nil {
+ t.Fatalf("can't make a temp rootdir: %v", err)
+ } else {
+ kubelet.rootDirectory = tempDir
+ }
+ if err := os.MkdirAll(kubelet.rootDirectory, 0750); err != nil {
+ t.Fatalf("can't mkdir(%q): %v", kubelet.rootDirectory, err)
+ }
kubelet.podWorkers = newPodWorkers()
kubelet.sourceReady = func(source string) bool { return true }
- return kubelet, fakeEtcdClient, fakeDocker
+ kubelet.masterServiceNamespace = api.NamespaceDefault
+ kubelet.serviceLister = testServiceLister{}
+ if err := kubelet.setupDataDirs(); err != nil {
+ t.Fatalf("can't initialize kubelet data dirs: %v", err)
+ }
+
+ return kubelet, fakeDocker
}
func verifyCalls(t *testing.T, fakeDocker *dockertools.FakeDockerClient, calls []string) {
@@ -89,33 +102,60 @@ func verifyBoolean(t *testing.T, expected, value bool) {
}
func TestKubeletDirs(t *testing.T) {
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
root := kubelet.rootDirectory
- if err := os.MkdirAll(root, 0750); err != nil {
- t.Fatalf("can't mkdir(%q): %s", root, err)
- }
var exp, got string
- got = kubelet.GetPodsDir()
+ got = kubelet.getPodsDir()
exp = path.Join(root, "pods")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodDir("abc123")
+ got = kubelet.getPluginsDir()
+ exp = path.Join(root, "plugins")
+ if got != exp {
+ t.Errorf("expected %q', got %q", exp, got)
+ }
+
+ got = kubelet.getPluginDir("foobar")
+ exp = path.Join(root, "plugins/foobar")
+ if got != exp {
+ t.Errorf("expected %q', got %q", exp, got)
+ }
+
+ got = kubelet.getPodDir("abc123")
exp = path.Join(root, "pods/abc123")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodVolumesDir("abc123")
+ got = kubelet.getPodVolumesDir("abc123")
exp = path.Join(root, "pods/abc123/volumes")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodContainerDir("abc123", "def456")
+ got = kubelet.getPodVolumeDir("abc123", "plugin", "foobar")
+ exp = path.Join(root, "pods/abc123/volumes/plugin/foobar")
+ if got != exp {
+ t.Errorf("expected %q', got %q", exp, got)
+ }
+
+ got = kubelet.getPodPluginsDir("abc123")
+ exp = path.Join(root, "pods/abc123/plugins")
+ if got != exp {
+ t.Errorf("expected %q', got %q", exp, got)
+ }
+
+ got = kubelet.getPodPluginDir("abc123", "foobar")
+ exp = path.Join(root, "pods/abc123/plugins/foobar")
+ if got != exp {
+ t.Errorf("expected %q', got %q", exp, got)
+ }
+
+ got = kubelet.getPodContainerDir("abc123", "def456")
exp = path.Join(root, "pods/abc123/containers/def456")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
@@ -123,7 +163,7 @@ func TestKubeletDirs(t *testing.T) {
}
func TestKubeletDirsCompat(t *testing.T) {
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
root := kubelet.rootDirectory
if err := os.MkdirAll(root, 0750); err != nil {
t.Fatalf("can't mkdir(%q): %s", root, err)
@@ -147,31 +187,31 @@ func TestKubeletDirsCompat(t *testing.T) {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
- got = kubelet.GetPodDir("oldpod")
+ got = kubelet.getPodDir("oldpod")
exp = path.Join(root, "oldpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodDir("newpod")
+ got = kubelet.getPodDir("newpod")
exp = path.Join(root, "pods/newpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodDir("bothpod")
+ got = kubelet.getPodDir("bothpod")
exp = path.Join(root, "pods/bothpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodDir("neitherpod")
+ got = kubelet.getPodDir("neitherpod")
exp = path.Join(root, "pods/neitherpod")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- root = kubelet.GetPodDir("newpod")
+ root = kubelet.getPodDir("newpod")
// Old-style container dir.
if err := os.MkdirAll(fmt.Sprintf("%s/oldctr", root), 0750); err != nil {
@@ -189,25 +229,25 @@ func TestKubeletDirsCompat(t *testing.T) {
t.Fatalf("can't mkdir(%q): %s", root, err)
}
- got = kubelet.GetPodContainerDir("newpod", "oldctr")
+ got = kubelet.getPodContainerDir("newpod", "oldctr")
exp = path.Join(root, "oldctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodContainerDir("newpod", "newctr")
+ got = kubelet.getPodContainerDir("newpod", "newctr")
exp = path.Join(root, "containers/newctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodContainerDir("newpod", "bothctr")
+ got = kubelet.getPodContainerDir("newpod", "bothctr")
exp = path.Join(root, "containers/bothctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
}
- got = kubelet.GetPodContainerDir("newpod", "neitherctr")
+ got = kubelet.getPodContainerDir("newpod", "neitherctr")
exp = path.Join(root, "containers/neitherctr")
if got != exp {
t.Errorf("expected %q', got %q", exp, got)
@@ -228,7 +268,7 @@ func TestKillContainerWithError(t *testing.T) {
},
},
}
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
kubelet.dockerClient = fakeDocker
err := kubelet.killContainer(&fakeDocker.ContainerList[0])
if err == nil {
@@ -238,7 +278,7 @@ func TestKillContainerWithError(t *testing.T) {
}
func TestKillContainer(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{
{
ID: "1234",
@@ -287,7 +327,7 @@ func (cr *channelReader) GetList() [][]api.BoundPod {
}
func TestSyncPodsDoesNothing(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
container := api.Container{Name: "bar"}
fakeDocker.ContainerList = []docker.APIContainers{
{
@@ -324,7 +364,7 @@ func TestSyncPodsDoesNothing(t *testing.T) {
}
func TestSyncPodsWithTerminationLog(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
container := api.Container{
Name: "bar",
TerminationMessagePath: "/dev/somepath",
@@ -354,7 +394,7 @@ func TestSyncPodsWithTerminationLog(t *testing.T) {
fakeDocker.Lock()
parts := strings.Split(fakeDocker.Container.HostConfig.Binds[0], ":")
- if !matchString(t, kubelet.GetPodContainerDir("12345678", "bar")+"/k8s_bar\\.[a-f0-9]", parts[0]) {
+ if !matchString(t, kubelet.getPodContainerDir("12345678", "bar")+"/k8s_bar\\.[a-f0-9]", parts[0]) {
t.Errorf("Unexpected host path: %s", parts[0])
}
if parts[1] != "/dev/somepath" {
@@ -385,7 +425,7 @@ func matchString(t *testing.T, pattern, str string) bool {
}
func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.networkContainerImage = "custom_image_name"
fakeDocker.ContainerList = []docker.APIContainers{}
err := kubelet.SyncPods([]api.BoundPod{
@@ -432,7 +472,7 @@ func TestSyncPodsCreatesNetAndContainer(t *testing.T) {
}
func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
puller := kubelet.dockerPuller.(*dockertools.FakeDockerPuller)
puller.HasImages = []string{}
kubelet.networkContainerImage = "custom_image_name"
@@ -447,7 +487,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
},
Spec: api.PodSpec{
Containers: []api.Container{
- {Name: "bar"},
+ {Name: "bar", Image: "something", ImagePullPolicy: "IfNotPresent"},
},
},
},
@@ -462,7 +502,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
fakeDocker.Lock()
- if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", ""}) {
+ if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", "something"}) {
t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled)
}
@@ -475,7 +515,7 @@ func TestSyncPodsCreatesNetAndContainerPullsImage(t *testing.T) {
}
func TestSyncPodsWithNetCreatesContainer(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{
{
// network container
@@ -515,7 +555,7 @@ func TestSyncPodsWithNetCreatesContainer(t *testing.T) {
}
func TestSyncPodsWithNetCreatesContainerCallsHandler(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeHttp := fakeHTTP{}
kubelet.httpClient = &fakeHttp
fakeDocker.ContainerList = []docker.APIContainers{
@@ -571,7 +611,7 @@ func TestSyncPodsWithNetCreatesContainerCallsHandler(t *testing.T) {
}
func TestSyncPodsDeletesWithNoNetContainer(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{
{
// format is // k8s___
@@ -616,7 +656,7 @@ func TestSyncPodsDeletesWithNoNetContainer(t *testing.T) {
func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
ready := false
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.sourceReady = func(source string) bool { return ready }
fakeDocker.ContainerList = []docker.APIContainers{
@@ -659,7 +699,7 @@ func TestSyncPodsDeletesWhenSourcesAreReady(t *testing.T) {
func TestSyncPodsDeletesWhenContainerSourceReady(t *testing.T) {
ready := false
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.sourceReady = func(source string) bool {
if source == "testSource" {
return ready
@@ -719,7 +759,7 @@ func TestSyncPodsDeletesWhenContainerSourceReady(t *testing.T) {
}
func TestSyncPodsDeletes(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{
{
// the k8s prefix is required for the kubelet to manage the container
@@ -757,7 +797,7 @@ func TestSyncPodsDeletes(t *testing.T) {
}
func TestSyncPodDeletesDuplicate(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
dockerContainers := dockertools.DockerContainers{
"1234": &docker.APIContainers{
// the k8s prefix is required for the kubelet to manage the container
@@ -807,7 +847,7 @@ func TestSyncPodDeletesDuplicate(t *testing.T) {
type FalseHealthChecker struct{}
-func (f *FalseHealthChecker) HealthCheck(podFullName, podUUID string, status api.PodStatus, container api.Container) (health.Status, error) {
+func (f *FalseHealthChecker) HealthCheck(podFullName string, podUID types.UID, status api.PodStatus, container api.Container) (health.Status, error) {
return health.Unhealthy, nil
}
@@ -816,7 +856,7 @@ func (f *FalseHealthChecker) CanCheck(probe *api.LivenessProbe) bool {
}
func TestSyncPodBadHash(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.healthChecker = &FalseHealthChecker{}
dockerContainers := dockertools.DockerContainers{
"1234": &docker.APIContainers{
@@ -863,7 +903,7 @@ func TestSyncPodBadHash(t *testing.T) {
}
func TestSyncPodUnhealthy(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.healthChecker = &FalseHealthChecker{}
dockerContainers := dockertools.DockerContainers{
"1234": &docker.APIContainers{
@@ -913,33 +953,10 @@ func TestSyncPodUnhealthy(t *testing.T) {
}
}
-func TestMakeEnvVariables(t *testing.T) {
- container := api.Container{
- Env: []api.EnvVar{
- {
- Name: "foo",
- Value: "bar",
- },
- {
- Name: "baz",
- Value: "blah",
- },
- },
- }
- vars := makeEnvironmentVariables(&container)
- if len(vars) != len(container.Env) {
- t.Errorf("Vars don't match. Expected: %#v Found: %#v", container.Env, vars)
- }
- for ix, env := range container.Env {
- value := fmt.Sprintf("%s=%s", env.Name, env.Value)
- if value != vars[ix] {
- t.Errorf("Unexpected value: %s. Expected: %s", vars[ix], value)
- }
- }
-}
-
func TestMountExternalVolumes(t *testing.T) {
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
+ kubelet.volumePluginMgr.InitPlugins([]volume.Plugin{&volume.FakePlugin{"fake", nil}}, &volumeHost{kubelet})
+
pod := api.BoundPod{
ObjectMeta: api.ObjectMeta{
UID: "12345678",
@@ -949,27 +966,74 @@ func TestMountExternalVolumes(t *testing.T) {
Spec: api.PodSpec{
Volumes: []api.Volume{
{
- Name: "host-dir",
- Source: &api.VolumeSource{
- HostDir: &api.HostDir{"/dir/path"},
- },
+ Name: "vol1",
+ Source: api.VolumeSource{},
},
},
},
}
- podVolumes, _ := kubelet.mountExternalVolumes(&pod)
- expectedPodVolumes := make(volumeMap)
- expectedPodVolumes["host-dir"] = &volume.HostDir{"/dir/path"}
+ podVolumes, err := kubelet.mountExternalVolumes(&pod)
+ if err != nil {
+ t.Errorf("Expected sucess: %v", err)
+ }
+ expectedPodVolumes := []string{"vol1"}
if len(expectedPodVolumes) != len(podVolumes) {
t.Errorf("Unexpected volumes. Expected %#v got %#v. Manifest was: %#v", expectedPodVolumes, podVolumes, pod)
}
- for name, expectedVolume := range expectedPodVolumes {
+ for _, name := range expectedPodVolumes {
if _, ok := podVolumes[name]; !ok {
- t.Errorf("api.BoundPod volumes map is missing key: %s. %#v", expectedVolume, podVolumes)
+ t.Errorf("api.BoundPod volumes map is missing key: %s. %#v", name, podVolumes)
+ }
+ }
+}
+
+func TestGetPodVolumesFromDisk(t *testing.T) {
+ kubelet, _ := newTestKubelet(t)
+ plug := &volume.FakePlugin{"fake", nil}
+ kubelet.volumePluginMgr.InitPlugins([]volume.Plugin{plug}, &volumeHost{kubelet})
+
+ volsOnDisk := []struct {
+ podUID types.UID
+ volName string
+ }{
+ {"pod1", "vol1"},
+ {"pod1", "vol2"},
+ {"pod2", "vol1"},
+ }
+
+ expectedPaths := []string{}
+ for i := range volsOnDisk {
+ fv := volume.FakeVolume{volsOnDisk[i].podUID, volsOnDisk[i].volName, plug}
+ fv.SetUp()
+ expectedPaths = append(expectedPaths, fv.GetPath())
+ }
+
+ volumesFound := kubelet.getPodVolumesFromDisk()
+ if len(volumesFound) != len(expectedPaths) {
+ t.Errorf("Expected to find %d cleaners, got %d", len(expectedPaths), len(volumesFound))
+ }
+ for _, ep := range expectedPaths {
+ found := false
+ for _, cl := range volumesFound {
+ if ep == cl.GetPath() {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("Could not find a volume with path %s", ep)
}
}
}
+type stubVolume struct {
+ path string
+}
+
+func (f *stubVolume) GetPath() string {
+ return f.path
+}
+
func TestMakeVolumesAndBinds(t *testing.T) {
container := api.Container{
VolumeMounts: []api.VolumeMount{
@@ -1005,9 +1069,9 @@ func TestMakeVolumesAndBinds(t *testing.T) {
}
podVolumes := volumeMap{
- "disk": &volume.HostDir{"/mnt/disk"},
- "disk4": &volume.HostDir{"/mnt/host"},
- "disk5": &volume.EmptyDir{"disk5", "podID", "/var/lib/kubelet"},
+ "disk": &stubVolume{"/mnt/disk"},
+ "disk4": &stubVolume{"/mnt/host"},
+ "disk5": &stubVolume{"/var/lib/kubelet/podID/volumes/empty/disk5"},
}
binds := makeBinds(&pod, &container, podVolumes)
@@ -1189,7 +1253,7 @@ func TestGetContainerInfo(t *testing.T) {
cadvisorReq := &info.ContainerInfoRequest{}
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, nil)
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.cadvisorClient = mockCadvisor
fakeDocker.ContainerList = []docker.APIContainers{
{
@@ -1239,7 +1303,7 @@ func TestGetRootInfo(t *testing.T) {
}
func TestGetContainerInfoWithoutCadvisor(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{
{
ID: "foobar",
@@ -1265,7 +1329,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
expectedErr := fmt.Errorf("some error")
mockCadvisor.On("DockerContainer", containerID, cadvisorReq).Return(containerInfo, expectedErr)
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.cadvisorClient = mockCadvisor
fakeDocker.ContainerList = []docker.APIContainers{
{
@@ -1293,7 +1357,7 @@ func TestGetContainerInfoWhenCadvisorFailed(t *testing.T) {
func TestGetContainerInfoOnNonExistContainer(t *testing.T) {
mockCadvisor := &mockCadvisorClient{}
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.cadvisorClient = mockCadvisor
fakeDocker.ContainerList = []docker.APIContainers{}
@@ -1318,7 +1382,7 @@ func (f *fakeContainerCommandRunner) RunInContainer(id string, cmd []string) ([]
func TestRunInContainerNoSuchPod(t *testing.T) {
fakeCommandRunner := fakeContainerCommandRunner{}
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
fakeDocker.ContainerList = []docker.APIContainers{}
kubelet.runner = &fakeCommandRunner
@@ -1340,7 +1404,7 @@ func TestRunInContainerNoSuchPod(t *testing.T) {
func TestRunInContainer(t *testing.T) {
fakeCommandRunner := fakeContainerCommandRunner{}
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.runner = &fakeCommandRunner
containerID := "abc1234"
@@ -1369,10 +1433,10 @@ func TestRunInContainer(t *testing.T) {
containerName,
cmd)
if fakeCommandRunner.ID != containerID {
- t.Errorf("unexected Name: %s", fakeCommandRunner.ID)
+ t.Errorf("unexpected Name: %s", fakeCommandRunner.ID)
}
if !reflect.DeepEqual(fakeCommandRunner.Cmd, cmd) {
- t.Errorf("unexpected commnd: %s", fakeCommandRunner.Cmd)
+ t.Errorf("unexpected command: %s", fakeCommandRunner.Cmd)
}
if err != nil {
t.Errorf("unexpected error: %v", err)
@@ -1381,7 +1445,7 @@ func TestRunInContainer(t *testing.T) {
func TestRunHandlerExec(t *testing.T) {
fakeCommandRunner := fakeContainerCommandRunner{}
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.runner = &fakeCommandRunner
containerID := "abc1234"
@@ -1429,7 +1493,7 @@ func (f *fakeHTTP) Get(url string) (*http.Response, error) {
func TestRunHandlerHttp(t *testing.T) {
fakeHttp := fakeHTTP{}
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
kubelet.httpClient = &fakeHttp
podName := "podFoo"
@@ -1458,7 +1522,7 @@ func TestRunHandlerHttp(t *testing.T) {
}
func TestNewHandler(t *testing.T) {
- kubelet, _, _ := newTestKubelet(t)
+ kubelet, _ := newTestKubelet(t)
handler := &api.Handler{
HTTPGet: &api.HTTPGetAction{
Host: "foo",
@@ -1489,7 +1553,7 @@ func TestNewHandler(t *testing.T) {
}
func TestSyncPodEventHandlerFails(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.httpClient = &fakeHTTP{
err: fmt.Errorf("test error"),
}
@@ -1651,7 +1715,7 @@ func TestKubeletGarbageCollection(t *testing.T) {
},
}
for _, test := range tests {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.maxContainerCount = 5
fakeDocker.ContainerList = test.containers
fakeDocker.ContainerMap = test.containerDetails
@@ -1818,7 +1882,7 @@ func TestPurgeOldest(t *testing.T) {
},
}
for _, test := range tests {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
kubelet.maxContainerCount = 5
fakeDocker.ContainerMap = test.containerDetails
kubelet.purgeOldest(test.ids)
@@ -1829,7 +1893,7 @@ func TestPurgeOldest(t *testing.T) {
}
func TestSyncPodsWithPullPolicy(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
+ kubelet, fakeDocker := newTestKubelet(t)
puller := kubelet.dockerPuller.(*dockertools.FakeDockerPuller)
puller.HasImages = []string{"existing_one", "want:latest"}
kubelet.networkContainerImage = "custom_image_name"
@@ -1860,7 +1924,7 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
fakeDocker.Lock()
- if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", "pull_always_image", "pull_if_not_present_image", "want:latest"}) {
+ if !reflect.DeepEqual(puller.ImagesPulled, []string{"custom_image_name", "pull_always_image", "pull_if_not_present_image"}) {
t.Errorf("Unexpected pulled containers: %v", puller.ImagesPulled)
}
@@ -1870,29 +1934,6 @@ func TestSyncPodsWithPullPolicy(t *testing.T) {
fakeDocker.Unlock()
}
-func TestGarbageCollectImages(t *testing.T) {
- kubelet, _, fakeDocker := newTestKubelet(t)
-
- fakeDocker.Images = []docker.APIImages{
- {
- ID: "foo",
- },
- {
- ID: "bar",
- },
- }
-
- if err := kubelet.GarbageCollectImages(); err != nil {
- t.Errorf("unexpected error: %v", err)
- }
-
- if len(fakeDocker.RemovedImages) != 2 ||
- !fakeDocker.RemovedImages.Has("foo") ||
- !fakeDocker.RemovedImages.Has("bar") {
- t.Errorf("unexpected images removed: %v", fakeDocker.RemovedImages)
- }
-}
-
func TestParseResolvConf(t *testing.T) {
testCases := []struct {
data string
@@ -1936,3 +1977,275 @@ func TestParseResolvConf(t *testing.T) {
}
}
}
+
+type testServiceLister struct {
+ services []api.Service
+}
+
+func (ls testServiceLister) List() (api.ServiceList, error) {
+ return api.ServiceList{
+ Items: ls.services,
+ }, nil
+}
+
+func TestMakeEnvironmentVariables(t *testing.T) {
+ services := []api.Service{
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{
+ Port: 8081,
+ PortalIP: "1.2.3.1",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes-ro", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{
+ Port: 8082,
+ PortalIP: "1.2.3.2",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test1"},
+ Spec: api.ServiceSpec{
+ Port: 8083,
+ PortalIP: "1.2.3.3",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "test2"},
+ Spec: api.ServiceSpec{
+ Port: 8084,
+ PortalIP: "1.2.3.4",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test2"},
+ Spec: api.ServiceSpec{
+ Port: 8085,
+ PortalIP: "1.2.3.5",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "kubernetes"},
+ Spec: api.ServiceSpec{
+ Port: 8086,
+ PortalIP: "1.2.3.6",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes-ro", Namespace: "kubernetes"},
+ Spec: api.ServiceSpec{
+ Port: 8087,
+ PortalIP: "1.2.3.7",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "not-special", Namespace: "kubernetes"},
+ Spec: api.ServiceSpec{
+ Port: 8088,
+ PortalIP: "1.2.3.8",
+ },
+ },
+ }
+
+ testCases := []struct {
+ name string // the name of the test case
+ ns string // the namespace to generate environment for
+ container *api.Container // the container to use
+ masterServiceNamespace string // the namespace to read master service info from
+ nilLister bool // whether the lister should be nil
+ expectedEnvs util.StringSet // a set of expected environment vars
+ expectedEnvSize int // total number of expected env vars
+ }{
+ {
+ "api server = Y, kubelet = Y",
+ "test1",
+ &api.Container{
+ Env: []api.EnvVar{
+ {Name: "FOO", Value: "BAR"},
+ {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
+ {Name: "TEST_SERVICE_PORT", Value: "8083"},
+ {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
+ {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
+ {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
+ {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
+ {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
+ },
+ },
+ api.NamespaceDefault,
+ false,
+ util.NewStringSet("FOO=BAR",
+ "TEST_SERVICE_HOST=1.2.3.3",
+ "TEST_SERVICE_PORT=8083",
+ "TEST_PORT=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP_PROTO=tcp",
+ "TEST_PORT_8083_TCP_PORT=8083",
+ "TEST_PORT_8083_TCP_ADDR=1.2.3.3",
+ "KUBERNETES_SERVICE_HOST=1.2.3.1",
+ "KUBERNETES_SERVICE_PORT=8081",
+ "KUBERNETES_PORT=tcp://1.2.3.1:8081",
+ "KUBERNETES_PORT_8081_TCP=tcp://1.2.3.1:8081",
+ "KUBERNETES_PORT_8081_TCP_PROTO=tcp",
+ "KUBERNETES_PORT_8081_TCP_PORT=8081",
+ "KUBERNETES_PORT_8081_TCP_ADDR=1.2.3.1",
+ "KUBERNETES_RO_SERVICE_HOST=1.2.3.2",
+ "KUBERNETES_RO_SERVICE_PORT=8082",
+ "KUBERNETES_RO_PORT=tcp://1.2.3.2:8082",
+ "KUBERNETES_RO_PORT_8082_TCP=tcp://1.2.3.2:8082",
+ "KUBERNETES_RO_PORT_8082_TCP_PROTO=tcp",
+ "KUBERNETES_RO_PORT_8082_TCP_PORT=8082",
+ "KUBERNETES_RO_PORT_8082_TCP_ADDR=1.2.3.2"),
+ 22,
+ },
+ {
+ "api server = Y, kubelet = N",
+ "test1",
+ &api.Container{
+ Env: []api.EnvVar{
+ {Name: "FOO", Value: "BAR"},
+ {Name: "TEST_SERVICE_HOST", Value: "1.2.3.3"},
+ {Name: "TEST_SERVICE_PORT", Value: "8083"},
+ {Name: "TEST_PORT", Value: "tcp://1.2.3.3:8083"},
+ {Name: "TEST_PORT_8083_TCP", Value: "tcp://1.2.3.3:8083"},
+ {Name: "TEST_PORT_8083_TCP_PROTO", Value: "tcp"},
+ {Name: "TEST_PORT_8083_TCP_PORT", Value: "8083"},
+ {Name: "TEST_PORT_8083_TCP_ADDR", Value: "1.2.3.3"},
+ },
+ },
+ api.NamespaceDefault,
+ true,
+ util.NewStringSet("FOO=BAR",
+ "TEST_SERVICE_HOST=1.2.3.3",
+ "TEST_SERVICE_PORT=8083",
+ "TEST_PORT=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP_PROTO=tcp",
+ "TEST_PORT_8083_TCP_PORT=8083",
+ "TEST_PORT_8083_TCP_ADDR=1.2.3.3"),
+ 8,
+ },
+ {
+ "api server = N; kubelet = Y",
+ "test1",
+ &api.Container{
+ Env: []api.EnvVar{
+ {Name: "FOO", Value: "BAZ"},
+ },
+ },
+ api.NamespaceDefault,
+ false,
+ util.NewStringSet("FOO=BAZ",
+ "TEST_SERVICE_HOST=1.2.3.3",
+ "TEST_SERVICE_PORT=8083",
+ "TEST_PORT=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP=tcp://1.2.3.3:8083",
+ "TEST_PORT_8083_TCP_PROTO=tcp",
+ "TEST_PORT_8083_TCP_PORT=8083",
+ "TEST_PORT_8083_TCP_ADDR=1.2.3.3",
+ "KUBERNETES_SERVICE_HOST=1.2.3.1",
+ "KUBERNETES_SERVICE_PORT=8081",
+ "KUBERNETES_PORT=tcp://1.2.3.1:8081",
+ "KUBERNETES_PORT_8081_TCP=tcp://1.2.3.1:8081",
+ "KUBERNETES_PORT_8081_TCP_PROTO=tcp",
+ "KUBERNETES_PORT_8081_TCP_PORT=8081",
+ "KUBERNETES_PORT_8081_TCP_ADDR=1.2.3.1",
+ "KUBERNETES_RO_SERVICE_HOST=1.2.3.2",
+ "KUBERNETES_RO_SERVICE_PORT=8082",
+ "KUBERNETES_RO_PORT=tcp://1.2.3.2:8082",
+ "KUBERNETES_RO_PORT_8082_TCP=tcp://1.2.3.2:8082",
+ "KUBERNETES_RO_PORT_8082_TCP_PROTO=tcp",
+ "KUBERNETES_RO_PORT_8082_TCP_PORT=8082",
+ "KUBERNETES_RO_PORT_8082_TCP_ADDR=1.2.3.2"),
+ 22,
+ },
+ {
+ "master service in pod ns",
+ "test2",
+ &api.Container{
+ Env: []api.EnvVar{
+ {Name: "FOO", Value: "ZAP"},
+ },
+ },
+ "kubernetes",
+ false,
+ util.NewStringSet("FOO=ZAP",
+ "TEST_SERVICE_HOST=1.2.3.5",
+ "TEST_SERVICE_PORT=8085",
+ "TEST_PORT=tcp://1.2.3.5:8085",
+ "TEST_PORT_8085_TCP=tcp://1.2.3.5:8085",
+ "TEST_PORT_8085_TCP_PROTO=tcp",
+ "TEST_PORT_8085_TCP_PORT=8085",
+ "TEST_PORT_8085_TCP_ADDR=1.2.3.5",
+ "KUBERNETES_SERVICE_HOST=1.2.3.4",
+ "KUBERNETES_SERVICE_PORT=8084",
+ "KUBERNETES_PORT=tcp://1.2.3.4:8084",
+ "KUBERNETES_PORT_8084_TCP=tcp://1.2.3.4:8084",
+ "KUBERNETES_PORT_8084_TCP_PROTO=tcp",
+ "KUBERNETES_PORT_8084_TCP_PORT=8084",
+ "KUBERNETES_PORT_8084_TCP_ADDR=1.2.3.4",
+ "KUBERNETES_RO_SERVICE_HOST=1.2.3.7",
+ "KUBERNETES_RO_SERVICE_PORT=8087",
+ "KUBERNETES_RO_PORT=tcp://1.2.3.7:8087",
+ "KUBERNETES_RO_PORT_8087_TCP=tcp://1.2.3.7:8087",
+ "KUBERNETES_RO_PORT_8087_TCP_PROTO=tcp",
+ "KUBERNETES_RO_PORT_8087_TCP_PORT=8087",
+ "KUBERNETES_RO_PORT_8087_TCP_ADDR=1.2.3.7"),
+ 22,
+ },
+ {
+ "pod in master service ns",
+ "kubernetes",
+ &api.Container{},
+ "kubernetes",
+ false,
+ util.NewStringSet(
+ "NOT_SPECIAL_SERVICE_HOST=1.2.3.8",
+ "NOT_SPECIAL_SERVICE_PORT=8088",
+ "NOT_SPECIAL_PORT=tcp://1.2.3.8:8088",
+ "NOT_SPECIAL_PORT_8088_TCP=tcp://1.2.3.8:8088",
+ "NOT_SPECIAL_PORT_8088_TCP_PROTO=tcp",
+ "NOT_SPECIAL_PORT_8088_TCP_PORT=8088",
+ "NOT_SPECIAL_PORT_8088_TCP_ADDR=1.2.3.8",
+ "KUBERNETES_SERVICE_HOST=1.2.3.6",
+ "KUBERNETES_SERVICE_PORT=8086",
+ "KUBERNETES_PORT=tcp://1.2.3.6:8086",
+ "KUBERNETES_PORT_8086_TCP=tcp://1.2.3.6:8086",
+ "KUBERNETES_PORT_8086_TCP_PROTO=tcp",
+ "KUBERNETES_PORT_8086_TCP_PORT=8086",
+ "KUBERNETES_PORT_8086_TCP_ADDR=1.2.3.6",
+ "KUBERNETES_RO_SERVICE_HOST=1.2.3.7",
+ "KUBERNETES_RO_SERVICE_PORT=8087",
+ "KUBERNETES_RO_PORT=tcp://1.2.3.7:8087",
+ "KUBERNETES_RO_PORT_8087_TCP=tcp://1.2.3.7:8087",
+ "KUBERNETES_RO_PORT_8087_TCP_PROTO=tcp",
+ "KUBERNETES_RO_PORT_8087_TCP_PORT=8087",
+ "KUBERNETES_RO_PORT_8087_TCP_ADDR=1.2.3.7"),
+ 21,
+ },
+ }
+
+ for _, tc := range testCases {
+ kl, _ := newTestKubelet(t)
+ kl.masterServiceNamespace = tc.masterServiceNamespace
+ if tc.nilLister {
+ kl.serviceLister = nil
+ } else {
+ kl.serviceLister = testServiceLister{services}
+ }
+
+ result, err := kl.makeEnvironmentVariables(tc.ns, tc.container)
+ if err != nil {
+ t.Errorf("[%v] Unexpected error: %v", tc.name, err)
+ }
+
+ resultSet := util.NewStringSet(result...)
+ if !resultSet.IsSuperset(tc.expectedEnvs) {
+ t.Errorf("[%v] Unexpected env entries; expected {%v}, got {%v}", tc.name, tc.expectedEnvs, resultSet)
+ }
+
+ if a := len(resultSet); a != tc.expectedEnvSize {
+ t.Errorf("[%v] Unexpected number of env vars; expected %v, got %v", tc.name, tc.expectedEnvSize, a)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go
index 60ee5e0d0b95..222cedb52e27 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server.go
@@ -34,6 +34,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/healthz"
"github.com/GoogleCloudPlatform/kubernetes/pkg/httplog"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/golang/glog"
"github.com/google/cadvisor/info"
)
@@ -61,13 +62,13 @@ func ListenAndServeKubeletServer(host HostInterface, address net.IP, port uint,
// HostInterface contains all the kubelet methods required by the server.
// For testablitiy.
type HostInterface interface {
- GetContainerInfo(podFullName, uuid, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error)
+ GetContainerInfo(podFullName string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error)
GetRootInfo(req *info.ContainerInfoRequest) (*info.ContainerInfo, error)
GetMachineInfo() (*info.MachineInfo, error)
GetBoundPods() ([]api.BoundPod, error)
GetPodByName(namespace, name string) (*api.BoundPod, bool)
- GetPodInfo(name, uuid string) (api.PodInfo, error)
- RunInContainer(name, uuid, container string, cmd []string) ([]byte, error)
+ GetPodStatus(name string, uid types.UID) (api.PodStatus, error)
+ RunInContainer(name string, uid types.UID, container string, cmd []string) ([]byte, error)
GetKubeletContainerLogs(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error
ServeLogs(w http.ResponseWriter, req *http.Request)
}
@@ -188,22 +189,22 @@ func (s *Server) handleBoundPods(w http.ResponseWriter, req *http.Request) {
}
func (s *Server) handlePodInfoOld(w http.ResponseWriter, req *http.Request) {
- s.handlePodInfo(w, req, false)
+ s.handlePodStatus(w, req, false)
}
func (s *Server) handlePodInfoVersioned(w http.ResponseWriter, req *http.Request) {
- s.handlePodInfo(w, req, true)
+ s.handlePodStatus(w, req, true)
}
-// handlePodInfo handles podInfo requests against the Kubelet
-func (s *Server) handlePodInfo(w http.ResponseWriter, req *http.Request, versioned bool) {
+// handlePodStatus handles podInfo requests against the Kubelet
+func (s *Server) handlePodStatus(w http.ResponseWriter, req *http.Request, versioned bool) {
u, err := url.ParseRequestURI(req.RequestURI)
if err != nil {
s.error(w, err)
return
}
podID := u.Query().Get("podID")
- podUUID := u.Query().Get("UUID")
+ podUID := types.UID(u.Query().Get("UUID"))
podNamespace := u.Query().Get("podNamespace")
if len(podID) == 0 {
w.WriteHeader(http.StatusBadRequest)
@@ -220,12 +221,12 @@ func (s *Server) handlePodInfo(w http.ResponseWriter, req *http.Request, version
http.Error(w, "Pod does not exist", http.StatusNotFound)
return
}
- info, err := s.host.GetPodInfo(GetPodFullName(pod), podUUID)
+ status, err := s.host.GetPodStatus(GetPodFullName(pod), podUID)
if err != nil {
s.error(w, err)
return
}
- data, err := exportPodInfo(info, versioned)
+ data, err := exportPodStatus(status, versioned)
if err != nil {
s.error(w, err)
return
@@ -270,7 +271,8 @@ func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) {
return
}
parts := strings.Split(u.Path, "/")
- var podNamespace, podID, uuid, container string
+ var podNamespace, podID, container string
+ var uid types.UID
if len(parts) == 5 {
podNamespace = parts[2]
podID = parts[3]
@@ -278,7 +280,7 @@ func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) {
} else if len(parts) == 6 {
podNamespace = parts[2]
podID = parts[3]
- uuid = parts[4]
+ uid = types.UID(parts[4])
container = parts[5]
} else {
http.Error(w, "Unexpected path for command running", http.StatusBadRequest)
@@ -290,7 +292,7 @@ func (s *Server) handleRun(w http.ResponseWriter, req *http.Request) {
return
}
command := strings.Split(u.Query().Get("cmd"), " ")
- data, err := s.host.RunInContainer(GetPodFullName(pod), uuid, container, command)
+ data, err := s.host.RunInContainer(GetPodFullName(pod), uid, container, command)
if err != nil {
s.error(w, err)
return
@@ -314,7 +316,7 @@ func (s *Server) ServeHTTP(w http.ResponseWriter, req *http.Request) {
// serveStats implements stats logic.
func (s *Server) serveStats(w http.ResponseWriter, req *http.Request) {
- // /stats// or /stats////
+ // /stats// or /stats////
components := strings.Split(strings.TrimPrefix(path.Clean(req.URL.Path), "/"), "/")
var stats *info.ContainerInfo
var err error
@@ -333,7 +335,7 @@ func (s *Server) serveStats(w http.ResponseWriter, req *http.Request) {
// TODO(monnand) Implement this
errors.New("pod level status currently unimplemented")
case 3:
- // Backward compatibility without uuid information, does not support namespace
+ // Backward compatibility without uid information, does not support namespace
pod, ok := s.host.GetPodByName(api.NamespaceDefault, components[1])
if !ok {
http.Error(w, "Pod does not exist", http.StatusNotFound)
@@ -346,7 +348,7 @@ func (s *Server) serveStats(w http.ResponseWriter, req *http.Request) {
http.Error(w, "Pod does not exist", http.StatusNotFound)
return
}
- stats, err = s.host.GetContainerInfo(GetPodFullName(pod), components[3], components[4], &query)
+ stats, err = s.host.GetContainerInfo(GetPodFullName(pod), types.UID(components[3]), components[4], &query)
default:
http.Error(w, "unknown resource.", http.StatusNotFound)
return
@@ -371,16 +373,16 @@ func (s *Server) serveStats(w http.ResponseWriter, req *http.Request) {
return
}
-func exportPodInfo(info api.PodInfo, versioned bool) ([]byte, error) {
+func exportPodStatus(status api.PodStatus, versioned bool) ([]byte, error) {
if versioned {
// TODO: support arbitrary versions here
codec, err := findCodec("v1beta1")
if err != nil {
return nil, err
}
- return codec.Encode(&api.PodContainerInfo{ContainerInfo: info})
+ return codec.Encode(&api.PodStatusResult{Status: status})
}
- return json.Marshal(info)
+ return json.Marshal(status)
}
func findCodec(version string) (runtime.Codec, error) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server_test.go
index 2f04fc5ab69a..69dd77eeb129 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/server_test.go
@@ -29,18 +29,19 @@ import (
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/google/cadvisor/info"
)
type fakeKubelet struct {
podByNameFunc func(namespace, name string) (*api.BoundPod, bool)
- infoFunc func(name string) (api.PodInfo, error)
- containerInfoFunc func(podFullName, uid, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error)
+ statusFunc func(name string) (api.PodStatus, error)
+ containerInfoFunc func(podFullName string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error)
rootInfoFunc func(query *info.ContainerInfoRequest) (*info.ContainerInfo, error)
machineInfoFunc func() (*info.MachineInfo, error)
boundPodsFunc func() ([]api.BoundPod, error)
logFunc func(w http.ResponseWriter, req *http.Request)
- runFunc func(podFullName, uuid, containerName string, cmd []string) ([]byte, error)
+ runFunc func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error)
containerLogsFunc func(podFullName, containerName, tail string, follow bool, stdout, stderr io.Writer) error
}
@@ -48,12 +49,12 @@ func (fk *fakeKubelet) GetPodByName(namespace, name string) (*api.BoundPod, bool
return fk.podByNameFunc(namespace, name)
}
-func (fk *fakeKubelet) GetPodInfo(name, uuid string) (api.PodInfo, error) {
- return fk.infoFunc(name)
+func (fk *fakeKubelet) GetPodStatus(name string, uid types.UID) (api.PodStatus, error) {
+ return fk.statusFunc(name)
}
-func (fk *fakeKubelet) GetContainerInfo(podFullName, uuid, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
- return fk.containerInfoFunc(podFullName, uuid, containerName, req)
+func (fk *fakeKubelet) GetContainerInfo(podFullName string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
+ return fk.containerInfoFunc(podFullName, uid, containerName, req)
}
func (fk *fakeKubelet) GetRootInfo(req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
@@ -76,8 +77,8 @@ func (fk *fakeKubelet) GetKubeletContainerLogs(podFullName, containerName, tail
return fk.containerLogsFunc(podFullName, containerName, tail, follow, stdout, stderr)
}
-func (fk *fakeKubelet) RunInContainer(podFullName, uuid, containerName string, cmd []string) ([]byte, error) {
- return fk.runFunc(podFullName, uuid, containerName, cmd)
+func (fk *fakeKubelet) RunInContainer(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
+ return fk.runFunc(podFullName, uid, containerName, cmd)
}
type serverTestFramework struct {
@@ -127,16 +128,18 @@ func readResp(resp *http.Response) (string, error) {
return string(body), err
}
-func TestPodInfo(t *testing.T) {
+func TestPodStatus(t *testing.T) {
fw := newServerTest()
- expected := api.PodInfo{
- "goodpod": api.ContainerStatus{},
+ expected := api.PodStatus{
+ Info: map[string]api.ContainerStatus{
+ "goodpod": {},
+ },
}
- fw.fakeKubelet.infoFunc = func(name string) (api.PodInfo, error) {
+ fw.fakeKubelet.statusFunc = func(name string) (api.PodStatus, error) {
if name == "goodpod.default.etcd" {
return expected, nil
}
- return nil, fmt.Errorf("bad pod %s", name)
+ return api.PodStatus{}, fmt.Errorf("bad pod %s", name)
}
resp, err := http.Get(fw.testHTTPServer.URL + "/podInfo?podID=goodpod&podNamespace=default")
if err != nil {
@@ -161,7 +164,7 @@ func TestContainerInfo(t *testing.T) {
podID := "somepod"
expectedPodID := "somepod" + ".default.etcd"
expectedContainerName := "goodcontainer"
- fw.fakeKubelet.containerInfoFunc = func(podID, uid, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
+ fw.fakeKubelet.containerInfoFunc = func(podID string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
if podID != expectedPodID || containerName != expectedContainerName {
return nil, fmt.Errorf("bad podID or containerName: podID=%v; containerName=%v", podID, containerName)
}
@@ -191,8 +194,8 @@ func TestContainerInfoWithUidNamespace(t *testing.T) {
expectedPodID := "somepod" + "." + expectedNamespace + ".etcd"
expectedContainerName := "goodcontainer"
expectedUid := "9b01b80f-8fb4-11e4-95ab-4200af06647"
- fw.fakeKubelet.containerInfoFunc = func(podID, uid, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
- if podID != expectedPodID || uid != expectedUid || containerName != expectedContainerName {
+ fw.fakeKubelet.containerInfoFunc = func(podID string, uid types.UID, containerName string, req *info.ContainerInfoRequest) (*info.ContainerInfo, error) {
+ if podID != expectedPodID || string(uid) != expectedUid || containerName != expectedContainerName {
return nil, fmt.Errorf("bad podID or uid or containerName: podID=%v; uid=%v; containerName=%v", podID, uid, containerName)
}
return expectedInfo, nil
@@ -296,7 +299,7 @@ func TestServeRunInContainer(t *testing.T) {
expectedPodName := podName + "." + podNamespace + ".etcd"
expectedContainerName := "baz"
expectedCommand := "ls -a"
- fw.fakeKubelet.runFunc = func(podFullName, uuid, containerName string, cmd []string) ([]byte, error) {
+ fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
if podFullName != expectedPodName {
t.Errorf("expected %s, got %s", expectedPodName, podFullName)
}
@@ -328,21 +331,21 @@ func TestServeRunInContainer(t *testing.T) {
}
}
-func TestServeRunInContainerWithUUID(t *testing.T) {
+func TestServeRunInContainerWithUID(t *testing.T) {
fw := newServerTest()
output := "foo bar"
podNamespace := "other"
podName := "foo"
expectedPodName := podName + "." + podNamespace + ".etcd"
- expectedUuid := "7e00838d_-_3523_-_11e4_-_8421_-_42010af0a720"
+ expectedUID := "7e00838d_-_3523_-_11e4_-_8421_-_42010af0a720"
expectedContainerName := "baz"
expectedCommand := "ls -a"
- fw.fakeKubelet.runFunc = func(podFullName, uuid, containerName string, cmd []string) ([]byte, error) {
+ fw.fakeKubelet.runFunc = func(podFullName string, uid types.UID, containerName string, cmd []string) ([]byte, error) {
if podFullName != expectedPodName {
t.Errorf("expected %s, got %s", expectedPodName, podFullName)
}
- if uuid != expectedUuid {
- t.Errorf("expected %s, got %s", expectedUuid, uuid)
+ if string(uid) != expectedUID {
+ t.Errorf("expected %s, got %s", expectedUID, uid)
}
if containerName != expectedContainerName {
t.Errorf("expected %s, got %s", expectedContainerName, containerName)
@@ -354,7 +357,7 @@ func TestServeRunInContainerWithUUID(t *testing.T) {
return []byte(output), nil
}
- resp, err := http.Get(fw.testHTTPServer.URL + "/run/" + podNamespace + "/" + podName + "/" + expectedUuid + "/" + expectedContainerName + "?cmd=ls%20-a")
+ resp, err := http.Get(fw.testHTTPServer.URL + "/run/" + podNamespace + "/" + podName + "/" + expectedUID + "/" + expectedContainerName + "?cmd=ls%20-a")
if err != nil {
t.Fatalf("Got error GETing: %v", err)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/util.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/util.go
index 767108c9bf5e..079acc046586 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/util.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/util.go
@@ -25,6 +25,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/health"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/coreos/go-etcd/etcd"
"github.com/golang/glog"
@@ -55,7 +56,7 @@ func InitHealthChecking(k *Kubelet) {
}
// TODO: move this into a pkg/tools/etcd_tools
-func EtcdClientOrDie(etcdServerList util.StringList, etcdConfigFile string) *etcd.Client {
+func EtcdClientOrDie(etcdServerList util.StringList, etcdConfigFile string) tools.EtcdClient {
if len(etcdServerList) > 0 {
return etcd.NewClient(etcdServerList)
} else if etcdConfigFile != "" {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/doc.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/doc.go
similarity index 100%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/doc.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/doc.go
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir.go
new file mode 100644
index 000000000000..2df9540f054c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir.go
@@ -0,0 +1,125 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package empty_dir
+
+import (
+ "fmt"
+ "os"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.Plugin {
+ return []volume.Plugin{&emptyDirPlugin{nil, false}, &emptyDirPlugin{nil, true}}
+}
+
+type emptyDirPlugin struct {
+ host volume.Host
+ legacyMode bool // if set, plugin answers to the legacy name
+}
+
+var _ volume.Plugin = &emptyDirPlugin{}
+
+const (
+ emptyDirPluginName = "kubernetes.io/empty-dir"
+ emptyDirPluginLegacyName = "empty"
+)
+
+func (plugin *emptyDirPlugin) Init(host volume.Host) {
+ plugin.host = host
+}
+
+func (plugin *emptyDirPlugin) Name() string {
+ if plugin.legacyMode {
+ return emptyDirPluginLegacyName
+ }
+ return emptyDirPluginName
+}
+
+func (plugin *emptyDirPlugin) CanSupport(spec *api.Volume) bool {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return false
+ }
+
+ if util.AllPtrFieldsNil(&spec.Source) {
+ return true
+ }
+ if spec.Source.EmptyDir != nil {
+ return true
+ }
+ return false
+}
+
+func (plugin *emptyDirPlugin) NewBuilder(spec *api.Volume, podUID types.UID) (volume.Builder, error) {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return nil, fmt.Errorf("legacy mode: can not create new instances")
+ }
+ return &emptyDir{podUID, spec.Name, plugin, false}, nil
+}
+
+func (plugin *emptyDirPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
+ legacy := false
+ if plugin.legacyMode {
+ legacy = true
+ }
+ return &emptyDir{podUID, volName, plugin, legacy}, nil
+}
+
+// EmptyDir volumes are temporary directories exposed to the pod.
+// These do not persist beyond the lifetime of a pod.
+type emptyDir struct {
+ podUID types.UID
+ volName string
+ plugin *emptyDirPlugin
+ legacyMode bool
+}
+
+// SetUp creates new directory.
+func (ed *emptyDir) SetUp() error {
+ if ed.legacyMode {
+ return fmt.Errorf("legacy mode: can not create new instances")
+ }
+ path := ed.GetPath()
+ return os.MkdirAll(path, 0750)
+}
+
+func (ed *emptyDir) GetPath() string {
+ name := emptyDirPluginName
+ if ed.legacyMode {
+ name = emptyDirPluginLegacyName
+ }
+ return ed.plugin.host.GetPodVolumeDir(ed.podUID, volume.EscapePluginName(name), ed.volName)
+}
+
+// TearDown simply deletes everything in the directory.
+func (ed *emptyDir) TearDown() error {
+ tmpDir, err := volume.RenameDirectory(ed.GetPath(), ed.volName+".deleting~")
+ if err != nil {
+ return err
+ }
+ err = os.RemoveAll(tmpDir)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir_test.go
new file mode 100644
index 000000000000..addeb013a35a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir/empty_dir_test.go
@@ -0,0 +1,152 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package empty_dir
+
+import (
+ "os"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+)
+
+func TestCanSupport(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/empty-dir")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "kubernetes.io/empty-dir" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if !plug.CanSupport(&api.Volume{Source: api.VolumeSource{EmptyDir: &api.EmptyDir{}}}) {
+ t.Errorf("Expected true")
+ }
+ if !plug.CanSupport(&api.Volume{Source: api.VolumeSource{}}) {
+ t.Errorf("Expected true")
+ }
+}
+
+func TestPlugin(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/empty-dir")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ Source: api.VolumeSource{EmptyDir: &api.EmptyDir{}},
+ }
+ builder, err := plug.NewBuilder(spec, types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~empty-dir/vol1" {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+
+ if err := builder.SetUp(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+
+ if err := cleaner.TearDown(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err == nil {
+ t.Errorf("TearDown() failed, volume path still exists: %s", path)
+ } else if !os.IsNotExist(err) {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+}
+
+func TestPluginBackCompat(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/empty-dir")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ }
+ builder, err := plug.NewBuilder(spec, types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~empty-dir/vol1" {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+}
+
+func TestPluginLegacy(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("empty")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "empty" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if plug.CanSupport(&api.Volume{Source: api.VolumeSource{EmptyDir: &api.EmptyDir{}}}) {
+ t.Errorf("Expected false")
+ }
+
+ if _, err := plug.NewBuilder(&api.Volume{Source: api.VolumeSource{EmptyDir: &api.EmptyDir{}}}, types.UID("poduid")); err == nil {
+ t.Errorf("Expected failiure")
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd.go
new file mode 100644
index 000000000000..d93b9ff72918
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd.go
@@ -0,0 +1,237 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gce_pd
+
+import (
+ "fmt"
+ "os"
+ "path"
+ "strconv"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
+ "github.com/golang/glog"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.Plugin {
+ return []volume.Plugin{&gcePersistentDiskPlugin{nil, false}, &gcePersistentDiskPlugin{nil, true}}
+}
+
+type gcePersistentDiskPlugin struct {
+ host volume.Host
+ legacyMode bool // if set, plugin answers to the legacy name
+}
+
+var _ volume.Plugin = &gcePersistentDiskPlugin{}
+
+const (
+ gcePersistentDiskPluginName = "kubernetes.io/gce-pd"
+ gcePersistentDiskPluginLegacyName = "gce-pd"
+)
+
+func (plugin *gcePersistentDiskPlugin) Init(host volume.Host) {
+ plugin.host = host
+}
+
+func (plugin *gcePersistentDiskPlugin) Name() string {
+ if plugin.legacyMode {
+ return gcePersistentDiskPluginLegacyName
+ }
+ return gcePersistentDiskPluginName
+}
+
+func (plugin *gcePersistentDiskPlugin) CanSupport(spec *api.Volume) bool {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return false
+ }
+
+ if spec.Source.GCEPersistentDisk != nil {
+ return true
+ }
+ return false
+}
+
+func (plugin *gcePersistentDiskPlugin) NewBuilder(spec *api.Volume, podUID types.UID) (volume.Builder, error) {
+ // Inject real implementations here, test through the internal function.
+ return plugin.newBuilderInternal(spec, podUID, &GCEDiskUtil{}, mount.New())
+}
+
+func (plugin *gcePersistentDiskPlugin) newBuilderInternal(spec *api.Volume, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Builder, error) {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return nil, fmt.Errorf("legacy mode: can not create new instances")
+ }
+
+ pdName := spec.Source.GCEPersistentDisk.PDName
+ fsType := spec.Source.GCEPersistentDisk.FSType
+ partition := ""
+ if spec.Source.GCEPersistentDisk.Partition != 0 {
+ partition = strconv.Itoa(spec.Source.GCEPersistentDisk.Partition)
+ }
+ readOnly := spec.Source.GCEPersistentDisk.ReadOnly
+
+ return &gcePersistentDisk{
+ podUID: podUID,
+ volName: spec.Name,
+ pdName: pdName,
+ fsType: fsType,
+ partition: partition,
+ readOnly: readOnly,
+ manager: manager,
+ mounter: mounter,
+ plugin: plugin,
+ legacyMode: false,
+ }, nil
+}
+
+func (plugin *gcePersistentDiskPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
+ // Inject real implementations here, test through the internal function.
+ return plugin.newCleanerInternal(volName, podUID, &GCEDiskUtil{}, mount.New())
+}
+
+func (plugin *gcePersistentDiskPlugin) newCleanerInternal(volName string, podUID types.UID, manager pdManager, mounter mount.Interface) (volume.Cleaner, error) {
+ legacy := false
+ if plugin.legacyMode {
+ legacy = true
+ }
+ return &gcePersistentDisk{
+ podUID: podUID,
+ volName: volName,
+ manager: manager,
+ mounter: mounter,
+ plugin: plugin,
+ legacyMode: legacy,
+ }, nil
+}
+
+// Abstract interface to PD operations.
+type pdManager interface {
+ // Attaches the disk to the kubelet's host machine.
+ AttachDisk(pd *gcePersistentDisk) error
+ // Detaches the disk from the kubelet's host machine.
+ DetachDisk(pd *gcePersistentDisk, devicePath string) error
+}
+
+// gcePersistentDisk volumes are disk resources provided by Google Compute Engine
+// that are attached to the kubelet's host machine and exposed to the pod.
+type gcePersistentDisk struct {
+ volName string
+ podUID types.UID
+ // Unique identifier of the PD, used to find the disk resource in the provider.
+ pdName string
+ // Filesystem type, optional.
+ fsType string
+ // Specifies the partition to mount
+ partition string
+ // Specifies whether the disk will be attached as read-only.
+ readOnly bool
+ // Utility interface that provides API calls to the provider to attach/detach disks.
+ manager pdManager
+ // Mounter interface that provides system calls to mount the disks.
+ mounter mount.Interface
+ plugin *gcePersistentDiskPlugin
+ legacyMode bool
+}
+
+// SetUp attaches the disk and bind mounts to the volume path.
+func (pd *gcePersistentDisk) SetUp() error {
+ if pd.legacyMode {
+ return fmt.Errorf("legacy mode: can not create new instances")
+ }
+
+ // TODO: handle failed mounts here.
+ mountpoint, err := isMountPoint(pd.GetPath())
+ glog.V(4).Infof("PersistentDisk set up: %s %v %v", pd.GetPath(), mountpoint, err)
+ if err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ if mountpoint {
+ return nil
+ }
+
+ if err := pd.manager.AttachDisk(pd); err != nil {
+ return err
+ }
+
+ flags := uintptr(0)
+ if pd.readOnly {
+ flags = mount.FlagReadOnly
+ }
+
+ volPath := pd.GetPath()
+ if err := os.MkdirAll(volPath, 0750); err != nil {
+ return err
+ }
+
+ // Perform a bind mount to the full path to allow duplicate mounts of the same PD.
+ globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName, pd.readOnly)
+ err = pd.mounter.Mount(globalPDPath, pd.GetPath(), "", mount.FlagBind|flags, "")
+ if err != nil {
+ os.RemoveAll(pd.GetPath())
+ return err
+ }
+
+ return nil
+}
+
+func makeGlobalPDName(host volume.Host, devName string, readOnly bool) string {
+ return path.Join(host.GetPluginDir(gcePersistentDiskPluginName), "mounts", devName)
+}
+
+func (pd *gcePersistentDisk) GetPath() string {
+ name := gcePersistentDiskPluginName
+ if pd.legacyMode {
+ name = gcePersistentDiskPluginLegacyName
+ }
+ return pd.plugin.host.GetPodVolumeDir(pd.podUID, volume.EscapePluginName(name), pd.volName)
+}
+
+// Unmounts the bind mount, and detaches the disk only if the PD
+// resource was the last reference to that disk on the kubelet.
+func (pd *gcePersistentDisk) TearDown() error {
+ mountpoint, err := isMountPoint(pd.GetPath())
+ if err != nil {
+ return err
+ }
+ if !mountpoint {
+ return os.RemoveAll(pd.GetPath())
+ }
+
+ devicePath, refCount, err := getMountRefCount(pd.mounter, pd.GetPath())
+ if err != nil {
+ return err
+ }
+ if err := pd.mounter.Unmount(pd.GetPath(), 0); err != nil {
+ return err
+ }
+ refCount--
+ if err := os.RemoveAll(pd.GetPath()); err != nil {
+ return err
+ }
+ // If refCount is 1, then all bind mounts have been removed, and the
+ // remaining reference is the global mount. It is safe to detach.
+ if refCount == 1 {
+ if err := pd.manager.DetachDisk(pd, devicePath); err != nil {
+ return err
+ }
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd_test.go
new file mode 100644
index 000000000000..01129bd5fab9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_pd_test.go
@@ -0,0 +1,173 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gce_pd
+
+import (
+ "os"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
+)
+
+func TestCanSupport(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "kubernetes.io/gce-pd" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if !plug.CanSupport(&api.Volume{Source: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{}}}) {
+ t.Errorf("Expected true")
+ }
+}
+
+type fakePDManager struct{}
+
+// TODO(jonesdl) To fully test this, we could create a loopback device
+// and mount that instead.
+func (fake *fakePDManager) AttachDisk(pd *gcePersistentDisk) error {
+ globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName, pd.readOnly)
+ err := os.MkdirAll(globalPath, 0750)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func (fake *fakePDManager) DetachDisk(pd *gcePersistentDisk, devicePath string) error {
+ globalPath := makeGlobalPDName(pd.plugin.host, pd.pdName, pd.readOnly)
+ err := os.RemoveAll(globalPath)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+type fakeMounter struct{}
+
+func (fake *fakeMounter) Mount(source string, target string, fstype string, flags uintptr, data string) error {
+ return nil
+}
+
+func (fake *fakeMounter) Unmount(target string, flags int) error {
+ return nil
+}
+
+func (fake *fakeMounter) List() ([]mount.MountPoint, error) {
+ return []mount.MountPoint{}, nil
+}
+
+func TestPlugin(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/gce-pd")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ Source: api.VolumeSource{
+ GCEPersistentDisk: &api.GCEPersistentDisk{
+ PDName: "pd",
+ FSType: "ext4",
+ },
+ },
+ }
+ builder, err := plug.(*gcePersistentDiskPlugin).newBuilderInternal(spec, types.UID("poduid"), &fakePDManager{}, &fakeMounter{})
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if path != "/tmp/fake/pods/poduid/volumes/kubernetes.io~gce-pd/vol1" {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+
+ if err := builder.SetUp(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+
+ cleaner, err := plug.(*gcePersistentDiskPlugin).newCleanerInternal("vol1", types.UID("poduid"), &fakePDManager{}, &fakeMounter{})
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+
+ if err := cleaner.TearDown(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err == nil {
+ t.Errorf("TearDown() failed, volume path still exists: %s", path)
+ } else if !os.IsNotExist(err) {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+}
+
+func TestPluginLegacy(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"/tmp/fake"})
+
+ plug, err := plugMgr.FindPluginByName("gce-pd")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "gce-pd" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if plug.CanSupport(&api.Volume{Source: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{}}}) {
+ t.Errorf("Expected false")
+ }
+
+ if _, err := plug.NewBuilder(&api.Volume{Source: api.VolumeSource{GCEPersistentDisk: &api.GCEPersistentDisk{}}}, types.UID("poduid")); err == nil {
+ t.Errorf("Expected failiure")
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util.go
similarity index 78%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util.go
index 9682579ad3ca..cec0212faf66 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package volume
+package gce_pd
import (
"errors"
@@ -27,7 +27,8 @@ import (
"time"
"github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider"
- gce_cloud "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/cloudprovider/gce"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
)
const partitionRegex = "[a-z][a-z]*(?P[0-9][0-9]*)?"
@@ -38,21 +39,21 @@ type GCEDiskUtil struct{}
// Attaches a disk specified by a volume.GCEPersistentDisk to the current kubelet.
// Mounts the disk to it's global path.
-func (util *GCEDiskUtil) AttachDisk(GCEPD *GCEPersistentDisk) error {
+func (util *GCEDiskUtil) AttachDisk(pd *gcePersistentDisk) error {
gce, err := cloudprovider.GetCloudProvider("gce", nil)
if err != nil {
return err
}
flags := uintptr(0)
- if GCEPD.ReadOnly {
- flags = MOUNT_MS_RDONLY
+ if pd.readOnly {
+ flags = mount.FlagReadOnly
}
- if err := gce.(*gce_cloud.GCECloud).AttachDisk(GCEPD.PDName, GCEPD.ReadOnly); err != nil {
+ if err := gce.(*gce_cloud.GCECloud).AttachDisk(pd.pdName, pd.readOnly); err != nil {
return err
}
- devicePath := path.Join("/dev/disk/by-id/", "google-"+GCEPD.PDName)
- if GCEPD.Partition != "" {
- devicePath = devicePath + "-part" + GCEPD.Partition
+ devicePath := path.Join("/dev/disk/by-id/", "google-"+pd.pdName)
+ if pd.partition != "" {
+ devicePath = devicePath + "-part" + pd.partition
}
//TODO(jonesdl) There should probably be better method than busy-waiting here.
numTries := 0
@@ -70,7 +71,7 @@ func (util *GCEDiskUtil) AttachDisk(GCEPD *GCEPersistentDisk) error {
}
time.Sleep(time.Second)
}
- globalPDPath := makeGlobalPDName(GCEPD.RootDir, GCEPD.PDName, GCEPD.ReadOnly)
+ globalPDPath := makeGlobalPDName(pd.plugin.host, pd.pdName, pd.readOnly)
// Only mount the PD globally once.
mountpoint, err := isMountPoint(globalPDPath)
if err != nil {
@@ -84,7 +85,7 @@ func (util *GCEDiskUtil) AttachDisk(GCEPD *GCEPersistentDisk) error {
}
}
if !mountpoint {
- err = GCEPD.mounter.Mount(devicePath, globalPDPath, GCEPD.FSType, flags, "")
+ err = pd.mounter.Mount(devicePath, globalPDPath, pd.fsType, flags, "")
if err != nil {
os.RemoveAll(globalPDPath)
return err
@@ -112,7 +113,7 @@ func getDeviceName(devicePath, canonicalDevicePath string) (string, error) {
// Unmounts the device and detaches the disk from the kubelet's host machine.
// Expects a GCE device path symlink. Ex: /dev/disk/by-id/google-mydisk-part1
-func (util *GCEDiskUtil) DetachDisk(GCEPD *GCEPersistentDisk, devicePath string) error {
+func (util *GCEDiskUtil) DetachDisk(pd *gcePersistentDisk, devicePath string) error {
// Follow the symlink to the actual device path.
canonicalDevicePath, err := filepath.EvalSymlinks(devicePath)
if err != nil {
@@ -122,8 +123,8 @@ func (util *GCEDiskUtil) DetachDisk(GCEPD *GCEPersistentDisk, devicePath string)
if err != nil {
return err
}
- globalPDPath := makeGlobalPDName(GCEPD.RootDir, deviceName, GCEPD.ReadOnly)
- if err := GCEPD.mounter.Unmount(globalPDPath, 0); err != nil {
+ globalPDPath := makeGlobalPDName(pd.plugin.host, deviceName, pd.readOnly)
+ if err := pd.mounter.Unmount(globalPDPath, 0); err != nil {
return err
}
if err := os.RemoveAll(globalPDPath); err != nil {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util_test.go
similarity index 98%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util_test.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util_test.go
index 089e87fb561b..1770e76308a7 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/gce_util_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/gce_util_test.go
@@ -14,7 +14,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package volume
+package gce_pd
import (
"testing"
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util.go
new file mode 100644
index 000000000000..38189e04eca9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util.go
@@ -0,0 +1,53 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package gce_pd
+
+import (
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/mount"
+)
+
+// Examines /proc/mounts to find the source device of the PD resource and the
+// number of references to that device. Returns both the full device path under
+// the /dev tree and the number of references.
+func getMountRefCount(mounter mount.Interface, mountPath string) (string, int, error) {
+ // TODO(jonesdl) This can be split up into two procedures, finding the device path
+ // and finding the number of references. The parsing could also be separated and another
+ // utility could determine if a path is an active mount point.
+
+ mps, err := mounter.List()
+ if err != nil {
+ return "", -1, err
+ }
+
+ // Find the device name.
+ deviceName := ""
+ for i := range mps {
+ if mps[i].Path == mountPath {
+ deviceName = mps[i].Device
+ break
+ }
+ }
+
+ // Find the number of references to the device.
+ refCount := 0
+ for i := range mps {
+ if mps[i].Device == deviceName {
+ refCount++
+ }
+ }
+ return deviceName, refCount, nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_linux.go
similarity index 98%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_linux.go
index 3a0cdc587f23..e7570e99c759 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_linux.go
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package volume
+package gce_pd
import (
"os"
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils_windows.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_unsupported.go
similarity index 95%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils_windows.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_unsupported.go
index 3250b08ee277..1d6e7edb18ec 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mount_utils_windows.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd/mount_util_unsupported.go
@@ -1,4 +1,4 @@
-// +build windows
+// +build !linux
/*
Copyright 2014 Google Inc. All rights reserved.
@@ -16,7 +16,7 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package volume
+package gce_pd
import (
"fmt"
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo.go
new file mode 100644
index 000000000000..1409444ba224
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo.go
@@ -0,0 +1,214 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package git_repo
+
+import (
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
+ "github.com/golang/glog"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.Plugin {
+ return []volume.Plugin{&gitRepoPlugin{nil, false}, &gitRepoPlugin{nil, true}}
+}
+
+type gitRepoPlugin struct {
+ host volume.Host
+ legacyMode bool // if set, plugin answers to the legacy name
+}
+
+var _ volume.Plugin = &gitRepoPlugin{}
+
+const (
+ gitRepoPluginName = "kubernetes.io/git-repo"
+ gitRepoPluginLegacyName = "git"
+)
+
+func (plugin *gitRepoPlugin) Init(host volume.Host) {
+ plugin.host = host
+}
+
+func (plugin *gitRepoPlugin) Name() string {
+ if plugin.legacyMode {
+ return gitRepoPluginLegacyName
+ }
+ return gitRepoPluginName
+}
+
+func (plugin *gitRepoPlugin) CanSupport(spec *api.Volume) bool {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return false
+ }
+
+ if spec.Source.GitRepo != nil {
+ return true
+ }
+ return false
+}
+
+func (plugin *gitRepoPlugin) NewBuilder(spec *api.Volume, podUID types.UID) (volume.Builder, error) {
+ if plugin.legacyMode {
+ // Legacy mode instances can be cleaned up but not created anew.
+ return nil, fmt.Errorf("legacy mode: can not create new instances")
+ }
+ return &gitRepo{
+ podUID: podUID,
+ volName: spec.Name,
+ source: spec.Source.GitRepo.Repository,
+ revision: spec.Source.GitRepo.Revision,
+ exec: exec.New(),
+ plugin: plugin,
+ legacyMode: false,
+ }, nil
+}
+
+func (plugin *gitRepoPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
+ legacy := false
+ if plugin.legacyMode {
+ legacy = true
+ }
+ return &gitRepo{
+ podUID: podUID,
+ volName: volName,
+ plugin: plugin,
+ legacyMode: legacy,
+ }, nil
+}
+
+// gitRepo volumes are directories which are pre-filled from a git repository.
+// These do not persist beyond the lifetime of a pod.
+type gitRepo struct {
+ volName string
+ podUID types.UID
+ source string
+ revision string
+ exec exec.Interface
+ plugin *gitRepoPlugin
+ legacyMode bool
+}
+
+// SetUp creates new directory and clones a git repo.
+func (gr *gitRepo) SetUp() error {
+ if gr.isReady() {
+ return nil
+ }
+ if gr.legacyMode {
+ return fmt.Errorf("legacy mode: can not create new instances")
+ }
+
+ volPath := gr.GetPath()
+ if err := os.MkdirAll(volPath, 0750); err != nil {
+ return err
+ }
+
+ if output, err := gr.execCommand("git", []string{"clone", gr.source}, gr.GetPath()); err != nil {
+ return fmt.Errorf("failed to exec 'git clone %s': %s: %v", gr.source, output, err)
+ }
+
+ files, err := ioutil.ReadDir(gr.GetPath())
+ if err != nil {
+ return err
+ }
+ if len(files) != 1 {
+ return fmt.Errorf("unexpected directory contents: %v", files)
+ }
+ if len(gr.revision) == 0 {
+ // Done!
+ gr.setReady()
+ return nil
+ }
+
+ dir := path.Join(gr.GetPath(), files[0].Name())
+ if output, err := gr.execCommand("git", []string{"checkout", gr.revision}, dir); err != nil {
+ return fmt.Errorf("failed to exec 'git checkout %s': %s: %v", gr.revision, output, err)
+ }
+ if output, err := gr.execCommand("git", []string{"reset", "--hard"}, dir); err != nil {
+ return fmt.Errorf("failed to exec 'git reset --hard': %s: %v", output, err)
+ }
+
+ gr.setReady()
+ return nil
+}
+
+func (gr *gitRepo) getMetaDir() string {
+ return path.Join(gr.plugin.host.GetPodPluginDir(gr.podUID, volume.EscapePluginName(gitRepoPluginName)), gr.volName)
+}
+
+func (gr *gitRepo) isReady() bool {
+ metaDir := gr.getMetaDir()
+ readyFile := path.Join(metaDir, "ready")
+ s, err := os.Stat(readyFile)
+ if err != nil {
+ return false
+ }
+ if !s.Mode().IsRegular() {
+ glog.Errorf("GitRepo ready-file is not a file: %s", readyFile)
+ return false
+ }
+ return true
+}
+
+func (gr *gitRepo) setReady() {
+ metaDir := gr.getMetaDir()
+ if err := os.MkdirAll(metaDir, 0750); err != nil && !os.IsExist(err) {
+ glog.Errorf("Can't mkdir %s: %v", metaDir, err)
+ return
+ }
+ readyFile := path.Join(metaDir, "ready")
+ file, err := os.Create(readyFile)
+ if err != nil {
+ glog.Errorf("Can't touch %s: %v", readyFile, err)
+ return
+ }
+ file.Close()
+}
+
+func (gr *gitRepo) execCommand(command string, args []string, dir string) ([]byte, error) {
+ cmd := gr.exec.Command(command, args...)
+ cmd.SetDir(dir)
+ return cmd.CombinedOutput()
+}
+
+func (gr *gitRepo) GetPath() string {
+ name := gitRepoPluginName
+ if gr.legacyMode {
+ name = gitRepoPluginLegacyName
+ }
+ return gr.plugin.host.GetPodVolumeDir(gr.podUID, volume.EscapePluginName(name), gr.volName)
+}
+
+// TearDown simply deletes everything in the directory.
+func (gr *gitRepo) TearDown() error {
+ tmpDir, err := volume.RenameDirectory(gr.GetPath(), gr.volName+".deleting~")
+ if err != nil {
+ return err
+ }
+ err = os.RemoveAll(tmpDir)
+ if err != nil {
+ return err
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo_test.go
new file mode 100644
index 000000000000..f4443bcf9cef
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo/git_repo_test.go
@@ -0,0 +1,186 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package git_repo
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
+)
+
+func newTestHost(t *testing.T) volume.Host {
+ tempDir, err := ioutil.TempDir("/tmp", "git_repo_test.")
+ if err != nil {
+ t.Fatalf("can't make a temp rootdir: %v", err)
+ }
+ return &volume.FakeHost{tempDir}
+}
+
+func TestCanSupport(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "kubernetes.io/git-repo" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if !plug.CanSupport(&api.Volume{Source: api.VolumeSource{GitRepo: &api.GitRepo{}}}) {
+ t.Errorf("Expected true")
+ }
+}
+
+func testSetUp(plug volume.Plugin, builder volume.Builder, t *testing.T) {
+ var fcmd exec.FakeCmd
+ fcmd = exec.FakeCmd{
+ CombinedOutputScript: []exec.FakeCombinedOutputAction{
+ // git clone
+ func() ([]byte, error) {
+ os.MkdirAll(path.Join(fcmd.Dirs[0], "kubernetes"), 0750)
+ return []byte{}, nil
+ },
+ // git checkout
+ func() ([]byte, error) { return []byte{}, nil },
+ // git reset
+ func() ([]byte, error) { return []byte{}, nil },
+ },
+ }
+ fake := exec.FakeExec{
+ CommandScript: []exec.FakeCommandAction{
+ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
+ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
+ func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
+ },
+ }
+ g := builder.(*gitRepo)
+ g.exec = &fake
+
+ err := g.SetUp()
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ expectedCmds := [][]string{
+ {"git", "clone", g.source},
+ {"git", "checkout", g.revision},
+ {"git", "reset", "--hard"},
+ }
+ if fake.CommandCalls != len(expectedCmds) {
+ t.Errorf("unexpected command calls: expected 3, saw: %d", fake.CommandCalls)
+ }
+ if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
+ t.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds)
+ }
+ expectedDirs := []string{g.GetPath(), g.GetPath() + "/kubernetes", g.GetPath() + "/kubernetes"}
+ if len(fcmd.Dirs) != 3 || !reflect.DeepEqual(expectedDirs, fcmd.Dirs) {
+ t.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedDirs)
+ }
+}
+
+func TestPlugin(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/git-repo")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ Source: api.VolumeSource{
+ GitRepo: &api.GitRepo{
+ Repository: "https://github.com/GoogleCloudPlatform/kubernetes.git",
+ Revision: "2a30ce65c5ab586b98916d83385c5983edd353a1",
+ },
+ },
+ }
+ builder, err := plug.NewBuilder(spec, types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if !strings.HasSuffix(path, "pods/poduid/volumes/kubernetes.io~git-repo/vol1") {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+
+ testSetUp(plug, builder, t)
+ if _, err := os.Stat(path); err != nil {
+ if os.IsNotExist(err) {
+ t.Errorf("SetUp() failed, volume path not created: %s", path)
+ } else {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+
+ if err := cleaner.TearDown(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+ if _, err := os.Stat(path); err == nil {
+ t.Errorf("TearDown() failed, volume path still exists: %s", path)
+ } else if !os.IsNotExist(err) {
+ t.Errorf("SetUp() failed: %v", err)
+ }
+}
+
+func TestPluginLegacy(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), newTestHost(t))
+
+ plug, err := plugMgr.FindPluginByName("git")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "git" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if plug.CanSupport(&api.Volume{Source: api.VolumeSource{GitRepo: &api.GitRepo{}}}) {
+ t.Errorf("Expected false")
+ }
+
+ if _, err := plug.NewBuilder(&api.Volume{Source: api.VolumeSource{GitRepo: &api.GitRepo{}}}, types.UID("poduid")); err == nil {
+ t.Errorf("Expected failiure")
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path.go
new file mode 100644
index 000000000000..9e34e3e75b35
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path.go
@@ -0,0 +1,81 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package host_path
+
+import (
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+)
+
+// This is the primary entrypoint for volume plugins.
+func ProbeVolumePlugins() []volume.Plugin {
+ return []volume.Plugin{&hostPathPlugin{nil}}
+}
+
+type hostPathPlugin struct {
+ host volume.Host
+}
+
+var _ volume.Plugin = &hostPathPlugin{}
+
+const (
+ hostPathPluginName = "kubernetes.io/host-path"
+)
+
+func (plugin *hostPathPlugin) Init(host volume.Host) {
+ plugin.host = host
+}
+
+func (plugin *hostPathPlugin) Name() string {
+ return hostPathPluginName
+}
+
+func (plugin *hostPathPlugin) CanSupport(spec *api.Volume) bool {
+ if spec.Source.HostPath != nil {
+ return true
+ }
+ return false
+}
+
+func (plugin *hostPathPlugin) NewBuilder(spec *api.Volume, podUID types.UID) (volume.Builder, error) {
+ return &hostPath{spec.Source.HostPath.Path}, nil
+}
+
+func (plugin *hostPathPlugin) NewCleaner(volName string, podUID types.UID) (volume.Cleaner, error) {
+ return &hostPath{""}, nil
+}
+
+// HostPath volumes represent a bare host file or directory mount.
+// The direct at the specified path will be directly exposed to the container.
+type hostPath struct {
+ path string
+}
+
+// SetUp does nothing.
+func (hp *hostPath) SetUp() error {
+ return nil
+}
+
+func (hp *hostPath) GetPath() string {
+ return hp.path
+}
+
+// TearDown does nothing.
+func (hp *hostPath) TearDown() error {
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path_test.go
new file mode 100644
index 000000000000..2914fd46cf52
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path/host_path_test.go
@@ -0,0 +1,86 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package host_path
+
+import (
+ "testing"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+)
+
+func TestCanSupport(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ if plug.Name() != "kubernetes.io/host-path" {
+ t.Errorf("Wrong name: %s", plug.Name())
+ }
+ if !plug.CanSupport(&api.Volume{Source: api.VolumeSource{HostPath: &api.HostPath{}}}) {
+ t.Errorf("Expected true")
+ }
+ if plug.CanSupport(&api.Volume{Source: api.VolumeSource{}}) {
+ t.Errorf("Expected false")
+ }
+}
+
+func TestPlugin(t *testing.T) {
+ plugMgr := volume.PluginMgr{}
+ plugMgr.InitPlugins(ProbeVolumePlugins(), &volume.FakeHost{"fake"})
+
+ plug, err := plugMgr.FindPluginByName("kubernetes.io/host-path")
+ if err != nil {
+ t.Errorf("Can't find the plugin by name")
+ }
+ spec := &api.Volume{
+ Name: "vol1",
+ Source: api.VolumeSource{HostPath: &api.HostPath{"/vol1"}},
+ }
+ builder, err := plug.NewBuilder(spec, types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Builder: %v", err)
+ }
+ if builder == nil {
+ t.Errorf("Got a nil Builder: %v")
+ }
+
+ path := builder.GetPath()
+ if path != "/vol1" {
+ t.Errorf("Got unexpected path: %s", path)
+ }
+
+ if err := builder.SetUp(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+
+ cleaner, err := plug.NewCleaner("vol1", types.UID("poduid"))
+ if err != nil {
+ t.Errorf("Failed to make a new Cleaner: %v", err)
+ }
+ if cleaner == nil {
+ t.Errorf("Got a nil Cleaner: %v")
+ }
+
+ if err := cleaner.TearDown(); err != nil {
+ t.Errorf("Expected success, got: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/plugins.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/plugins.go
new file mode 100644
index 000000000000..5180846698a0
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/plugins.go
@@ -0,0 +1,174 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "fmt"
+ "strings"
+ "sync"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors"
+ "github.com/golang/glog"
+)
+
+// Plugin is an interface to volume plugins.
+type Plugin interface {
+ // Init initializes the plugin. This will be called exactly once
+ // before any New* calls are made - implementations of plugins may
+ // depend on this.
+ Init(host Host)
+
+ // Name returns the plugin's name. Plugins should use namespaced names
+ // such as "example.com/volume". The "kubernetes.io" namespace is
+ // reserved for plugins which are bundled with kubernetes.
+ Name() string
+
+ // CanSupport tests whether the Plugin supports a given volume
+ // specification from the API. The spec pointer should be considered
+ // const.
+ CanSupport(spec *api.Volume) bool
+
+ // NewBuilder creates a new volume.Builder from an API specification.
+ // Ownership of the spec pointer in *not* transferred.
+ // - spec: The api.Volume spec
+ // - podUID: The UID of the enclosing pod
+ NewBuilder(spec *api.Volume, podUID types.UID) (Builder, error)
+
+ // NewCleaner creates a new volume.Cleaner from recoverable state.
+ // - name: The volume name, as per the api.Volume spec.
+ // - podUID: The UID of the enclosing pod
+ NewCleaner(name string, podUID types.UID) (Cleaner, error)
+}
+
+// Host is an interface that plugins can use to access the kubelet.
+type Host interface {
+ // GetPluginDir returns the absolute path to a directory under which
+ // a given plugin may store data. This directory might not actually
+ // exist on disk yet. For plugin data that is per-pod, see
+ // GetPodPluginDir().
+ GetPluginDir(pluginName string) string
+
+ // GetPodVolumeDir returns the absolute path a directory which
+ // represents the named volume under the named plugin for the given
+ // pod. If the specified pod does not exist, the result of this call
+ // might not exist.
+ GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string
+
+ // GetPodPluginDir returns the absolute path to a directory under which
+ // a given plugin may store data for a given pod. If the specified pod
+ // does not exist, the result of this call might not exist. This
+ // directory might not actually exist on disk yet.
+ GetPodPluginDir(podUID types.UID, pluginName string) string
+}
+
+// PluginMgr tracks registered plugins.
+type PluginMgr struct {
+ mutex sync.Mutex
+ plugins map[string]Plugin
+}
+
+// InitPlugins initializes each plugin. All plugins must have unique names.
+// This must be called exactly once before any New* methods are called on any
+// plugins.
+func (pm *PluginMgr) InitPlugins(plugins []Plugin, host Host) error {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ if pm.plugins == nil {
+ pm.plugins = map[string]Plugin{}
+ }
+
+ allErrs := []error{}
+ for _, plugin := range plugins {
+ name := plugin.Name()
+ if !util.IsQualifiedName(name) {
+ allErrs = append(allErrs, fmt.Errorf("volume plugin has invalid name: %#v", plugin))
+ continue
+ }
+
+ if _, found := pm.plugins[name]; found {
+ allErrs = append(allErrs, fmt.Errorf("volume plugin %q was registered more than once", name))
+ continue
+ }
+ plugin.Init(host)
+ pm.plugins[name] = plugin
+ glog.V(1).Infof("Loaded volume plugin %q", name)
+ }
+ return errors.NewAggregate(allErrs)
+}
+
+// FindPluginBySpec looks for a plugin that can support a given volume
+// specification. If no plugins can support or more than one plugin can
+// support it, return error.
+func (pm *PluginMgr) FindPluginBySpec(spec *api.Volume) (Plugin, error) {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ matches := []string{}
+ for k, v := range pm.plugins {
+ if v.CanSupport(spec) {
+ matches = append(matches, k)
+ }
+ }
+ if len(matches) == 0 {
+ return nil, fmt.Errorf("no volume plugin matched")
+ }
+ if len(matches) > 1 {
+ return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
+ }
+ return pm.plugins[matches[0]], nil
+}
+
+// FindPluginByName fetches a plugin by name or by legacy name. If no plugin
+// is found, returns error.
+func (pm *PluginMgr) FindPluginByName(name string) (Plugin, error) {
+ pm.mutex.Lock()
+ defer pm.mutex.Unlock()
+
+ // Once we can get rid of legacy names we can reduce this to a map lookup.
+ matches := []string{}
+ for k, v := range pm.plugins {
+ if v.Name() == name {
+ matches = append(matches, k)
+ }
+ }
+ if len(matches) == 0 {
+ return nil, fmt.Errorf("no volume plugin matched")
+ }
+ if len(matches) > 1 {
+ return nil, fmt.Errorf("multiple volume plugins matched: %s", strings.Join(matches, ","))
+ }
+ return pm.plugins[matches[0]], nil
+}
+
+// EscapePluginName converts a plugin name, which might contain a / into a
+// string that is safe to use on-disk. This assumes that the input has already
+// been validates as a qualified name. we use "~" rather than ":" here in case
+// we ever use a filesystem that doesn't allow ":".
+func EscapePluginName(in string) string {
+ return strings.Replace(in, "/", "~", -1)
+}
+
+// UnescapePluginName converts an escaped plugin name (as per EscapePluginName)
+// back to its normal form. This assumes that the input has already been
+// validates as a qualified name.
+func UnescapePluginName(in string) string {
+ return strings.Replace(in, "~", "/", -1)
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/testing.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/testing.go
new file mode 100644
index 000000000000..68dee5dffbb4
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/testing.go
@@ -0,0 +1,92 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "os"
+ "path"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+)
+
+// FakeHost is useful for testing volume plugins.
+type FakeHost struct {
+ RootDir string
+}
+
+func (f *FakeHost) GetPluginDir(podUID string) string {
+ return path.Join(f.RootDir, "plugins", podUID)
+}
+
+func (f *FakeHost) GetPodVolumeDir(podUID types.UID, pluginName, volumeName string) string {
+ return path.Join(f.RootDir, "pods", string(podUID), "volumes", pluginName, volumeName)
+}
+
+func (f *FakeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
+ return path.Join(f.RootDir, "pods", string(podUID), "plugins", pluginName)
+}
+
+// FakePlugin is useful for for testing. It tries to be a fully compliant
+// plugin, but all it does is make empty directories.
+// Use as:
+// volume.RegisterPlugin(&FakePlugin{"fake-name"})
+type FakePlugin struct {
+ PluginName string
+ Host Host
+}
+
+var _ Plugin = &FakePlugin{}
+
+func (plugin *FakePlugin) Init(host Host) {
+ plugin.Host = host
+}
+
+func (plugin *FakePlugin) Name() string {
+ return plugin.PluginName
+}
+
+func (plugin *FakePlugin) CanSupport(spec *api.Volume) bool {
+ // TODO: maybe pattern-match on spec.Name to decide?
+ return true
+}
+
+func (plugin *FakePlugin) NewBuilder(spec *api.Volume, podUID types.UID) (Builder, error) {
+ return &FakeVolume{podUID, spec.Name, plugin}, nil
+}
+
+func (plugin *FakePlugin) NewCleaner(volName string, podUID types.UID) (Cleaner, error) {
+ return &FakeVolume{podUID, volName, plugin}, nil
+}
+
+type FakeVolume struct {
+ PodUID types.UID
+ VolName string
+ Plugin *FakePlugin
+}
+
+func (fv *FakeVolume) SetUp() error {
+ return os.MkdirAll(fv.GetPath(), 0750)
+}
+
+func (fv *FakeVolume) GetPath() string {
+ return path.Join(fv.Plugin.Host.GetPodVolumeDir(fv.PodUID, EscapePluginName(fv.Plugin.PluginName), fv.VolName))
+}
+
+func (fv *FakeVolume) TearDown() error {
+ return os.RemoveAll(fv.GetPath())
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/volume.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/volume.go
new file mode 100644
index 000000000000..cfa7faaf6ea7
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/volume.go
@@ -0,0 +1,59 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package volume
+
+import (
+ "io/ioutil"
+ "os"
+ "path"
+)
+
+// Interface is a directory used by pods or hosts.
+// All method implementations of methods in the volume interface must be idempotent.
+type Interface interface {
+ // GetPath returns the directory path the volume is mounted to.
+ GetPath() string
+}
+
+// Builder interface provides method to set up/mount the volume.
+type Builder interface {
+ // Uses Interface to provide the path for Docker binds.
+ Interface
+ // SetUp prepares and mounts/unpacks the volume to a directory path.
+ // This may be called more than once, so implementations must be
+ // idempotent.
+ SetUp() error
+}
+
+// Cleaner interface provides method to cleanup/unmount the volumes.
+type Cleaner interface {
+ Interface
+ // TearDown unmounts the volume and removes traces of the SetUp procedure.
+ TearDown() error
+}
+
+func RenameDirectory(oldPath, newName string) (string, error) {
+ newPath, err := ioutil.TempDir(path.Dir(oldPath), newName)
+ if err != nil {
+ return "", err
+ }
+ err = os.Rename(oldPath, newPath)
+ if err != nil {
+ return "", err
+ }
+ return newPath, nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volumes.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volumes.go
new file mode 100644
index 000000000000..2f49c007cd81
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volumes.go
@@ -0,0 +1,154 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package kubelet
+
+import (
+ "fmt"
+ "io/ioutil"
+ "path"
+
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
+ "github.com/davecgh/go-spew/spew"
+ "github.com/golang/glog"
+)
+
+var errUnsupportedVolumeType = fmt.Errorf("unsupported volume type")
+
+// This just exports required functions from kubelet proper, for use by volume
+// plugins.
+type volumeHost struct {
+ kubelet *Kubelet
+}
+
+func (vh *volumeHost) GetPluginDir(pluginName string) string {
+ return vh.kubelet.getPluginDir(pluginName)
+}
+
+func (vh *volumeHost) GetPodVolumeDir(podUID types.UID, pluginName string, volumeName string) string {
+ return vh.kubelet.getPodVolumeDir(podUID, pluginName, volumeName)
+}
+
+func (vh *volumeHost) GetPodPluginDir(podUID types.UID, pluginName string) string {
+ return vh.kubelet.getPodPluginDir(podUID, pluginName)
+}
+
+func (kl *Kubelet) newVolumeBuilderFromPlugins(spec *api.Volume, podUID types.UID) volume.Builder {
+ plugin, err := kl.volumePluginMgr.FindPluginBySpec(spec)
+ if err != nil {
+ glog.Warningf("Can't use volume plugins for %s: %v", spew.Sprintf("%#v", *spec), err)
+ return nil
+ }
+ if plugin == nil {
+ glog.Errorf("No error, but nil volume plugin for %s", spew.Sprintf("%#v", *spec))
+ return nil
+ }
+ builder, err := plugin.NewBuilder(spec, podUID)
+ if err != nil {
+ glog.Warningf("Error instantiating volume plugin for %s: %v", spew.Sprintf("%#v", *spec), err)
+ return nil
+ }
+ glog.V(3).Infof("Used volume plugin %q for %s", plugin.Name(), spew.Sprintf("%#v", *spec))
+ return builder
+}
+
+func (kl *Kubelet) mountExternalVolumes(pod *api.BoundPod) (volumeMap, error) {
+ podVolumes := make(volumeMap)
+ for i := range pod.Spec.Volumes {
+ volSpec := &pod.Spec.Volumes[i]
+
+ // Try to use a plugin for this volume.
+ builder := kl.newVolumeBuilderFromPlugins(volSpec, pod.UID)
+ if builder == nil {
+ return nil, errUnsupportedVolumeType
+ }
+ err := builder.SetUp()
+ if err != nil {
+ return nil, err
+ }
+ podVolumes[volSpec.Name] = builder
+ }
+ return podVolumes, nil
+}
+
+// getPodVolumesFromDisk examines directory structure to determine volumes that
+// are presently active and mounted. Returns a map of volume.Cleaner types.
+func (kl *Kubelet) getPodVolumesFromDisk() map[string]volume.Cleaner {
+ currentVolumes := make(map[string]volume.Cleaner)
+
+ podUIDs, err := kl.listPodsFromDisk()
+ if err != nil {
+ glog.Errorf("Could not get pods from disk: %v", err)
+ return map[string]volume.Cleaner{}
+ }
+
+ // Find the volumes for each on-disk pod.
+ for _, podUID := range podUIDs {
+ podVolDir := kl.getPodVolumesDir(podUID)
+ volumeKindDirs, err := ioutil.ReadDir(podVolDir)
+ if err != nil {
+ glog.Errorf("Could not read directory %s: %v", podVolDir, err)
+ }
+ for _, volumeKindDir := range volumeKindDirs {
+ volumeKind := volumeKindDir.Name()
+ volumeKindPath := path.Join(podVolDir, volumeKind)
+ volumeNameDirs, err := ioutil.ReadDir(volumeKindPath)
+ if err != nil {
+ glog.Errorf("Could not read directory %s: %v", volumeKindPath, err)
+ }
+ for _, volumeNameDir := range volumeNameDirs {
+ volumeName := volumeNameDir.Name()
+ identifier := fmt.Sprintf("%s/%s", podUID, volumeName)
+ glog.V(4).Infof("Making a volume.Cleaner for %s", volumeKindPath)
+ // TODO(thockin) This should instead return a reference to an extant
+ // volume object, except that we don't actually hold on to pod specs
+ // or volume objects.
+
+ // Try to use a plugin for this volume.
+ cleaner := kl.newVolumeCleanerFromPlugins(volumeKind, volumeName, podUID)
+ if cleaner == nil {
+ glog.Errorf("Could not create volume cleaner for %s: %v", volumeNameDir.Name(), errUnsupportedVolumeType)
+ continue
+ }
+ currentVolumes[identifier] = cleaner
+ }
+ }
+ }
+ return currentVolumes
+}
+
+func (kl *Kubelet) newVolumeCleanerFromPlugins(kind string, name string, podUID types.UID) volume.Cleaner {
+ plugName := volume.UnescapePluginName(kind)
+ plugin, err := kl.volumePluginMgr.FindPluginByName(plugName)
+ if err != nil {
+ // TODO: Maybe we should launch a cleanup of this dir?
+ glog.Warningf("Can't use volume plugins for %s/%s: %v", podUID, kind, err)
+ return nil
+ }
+ if plugin == nil {
+ glog.Errorf("No error, but nil volume plugin for %s/%s", podUID, kind)
+ return nil
+ }
+ cleaner, err := plugin.NewCleaner(name, podUID)
+ if err != nil {
+ glog.Warningf("Error instantiating volume plugin for %s/%s: %v", podUID, kind, err)
+ return nil
+ }
+ glog.V(3).Infof("Used volume plugin %q for %s/%s", plugin.Name(), podUID, kind)
+ return cleaner
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
index a7d43ada8525..be5ad949b412 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
@@ -52,7 +52,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
- //"github.com/GoogleCloudPlatform/kubernetes/pkg/ui"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/ui"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/emicklei/go-restful"
@@ -62,23 +62,24 @@ import (
// Config is a structure used to configure a Master.
type Config struct {
- Client *client.Client
- Cloud cloudprovider.Interface
- EtcdHelper tools.EtcdHelper
- HealthCheckMinions bool
- EventTTL time.Duration
- MinionRegexp string
- KubeletClient client.KubeletClient
- PortalNet *net.IPNet
- EnableLogsSupport bool
- EnableUISupport bool
- EnableSwaggerSupport bool
- EnableV1Beta3 bool
- APIPrefix string
- CorsAllowedOriginList util.StringList
- Authenticator authenticator.Request
- Authorizer authorizer.Authorizer
- AdmissionControl admission.Interface
+ Client *client.Client
+ Cloud cloudprovider.Interface
+ EtcdHelper tools.EtcdHelper
+ HealthCheckMinions bool
+ EventTTL time.Duration
+ MinionRegexp string
+ KubeletClient client.KubeletClient
+ PortalNet *net.IPNet
+ EnableLogsSupport bool
+ EnableUISupport bool
+ EnableSwaggerSupport bool
+ EnableV1Beta3 bool
+ APIPrefix string
+ CorsAllowedOriginList util.StringList
+ Authenticator authenticator.Request
+ Authorizer authorizer.Authorizer
+ AdmissionControl admission.Interface
+ MasterServiceNamespace string
// If specified, all web services will be registered into this container
RestfulContainer *restful.Container
@@ -113,6 +114,7 @@ type Master struct {
portalNet *net.IPNet
mux apiserver.Mux
+ muxHelper *apiserver.MuxHelper
handlerContainer *restful.Container
rootWebService *restful.WebService
enableLogsSupport bool
@@ -195,7 +197,7 @@ func setDefaults(c *Config) {
break
}
if !found {
- glog.Errorf("Unable to find suitible network address in list: '%v'\n"+
+ glog.Errorf("Unable to find suitable network address in list: '%v'\n"+
"Will try again in 5 seconds. Set the public address directly to avoid this wait.", addrs)
time.Sleep(5 * time.Second)
}
@@ -231,7 +233,8 @@ func New(c *Config) *Master {
minionRegistry := makeMinionRegistry(c)
serviceRegistry := etcd.NewRegistry(c.EtcdHelper, nil)
boundPodFactory := &pod.BasicBoundPodFactory{
- ServiceRegistry: serviceRegistry,
+ ServiceRegistry: serviceRegistry,
+ MasterServiceNamespace: c.MasterServiceNamespace,
}
if c.KubeletClient == nil {
glog.Fatalf("master.New() called with config.KubeletClient == nil")
@@ -272,6 +275,7 @@ func New(c *Config) *Master {
m.mux = mux
m.handlerContainer = NewHandlerContainer(mux)
}
+ m.muxHelper = &apiserver.MuxHelper{m.mux, []string{}}
m.masterServices = util.NewRunner(m.serviceWriterLoop, m.roServiceWriterLoop)
m.init(c)
@@ -287,7 +291,7 @@ func (m *Master) HandleWithAuth(pattern string, handler http.Handler) {
// sensible policy defaults for plugged-in endpoints. This will be different
// for generic endpoints versus REST object endpoints.
// TODO: convert to go-restful
- m.mux.Handle(pattern, handler)
+ m.muxHelper.Handle(pattern, handler)
}
// HandleFuncWithAuth adds an http.Handler for pattern to an http.ServeMux
@@ -295,7 +299,7 @@ func (m *Master) HandleWithAuth(pattern string, handler http.Handler) {
// to the request is used for the master's built-in endpoints.
func (m *Master) HandleFuncWithAuth(pattern string, handler func(http.ResponseWriter, *http.Request)) {
// TODO: convert to go-restful
- m.mux.HandleFunc(pattern, handler)
+ m.muxHelper.HandleFunc(pattern, handler)
}
func NewHandlerContainer(mux *http.ServeMux) *restful.Container {
@@ -360,28 +364,34 @@ func (m *Master) init(c *Config) {
}
apiVersions := []string{"v1beta1", "v1beta2"}
- apiserver.NewAPIGroupVersion(m.API_v1beta1()).InstallREST(m.handlerContainer, c.APIPrefix, "v1beta1")
- apiserver.NewAPIGroupVersion(m.API_v1beta2()).InstallREST(m.handlerContainer, c.APIPrefix, "v1beta2")
+ if err := apiserver.NewAPIGroupVersion(m.api_v1beta1()).InstallREST(m.handlerContainer, m.muxHelper, c.APIPrefix, "v1beta1"); err != nil {
+ glog.Fatalf("Unable to setup API v1beta1: %v", err)
+ }
+ if err := apiserver.NewAPIGroupVersion(m.api_v1beta2()).InstallREST(m.handlerContainer, m.muxHelper, c.APIPrefix, "v1beta2"); err != nil {
+ glog.Fatalf("Unable to setup API v1beta2: %v", err)
+ }
if c.EnableV1Beta3 {
- apiserver.NewAPIGroupVersion(m.API_v1beta3()).InstallREST(m.handlerContainer, c.APIPrefix, "v1beta3")
+ if err := apiserver.NewAPIGroupVersion(m.api_v1beta3()).InstallREST(m.handlerContainer, m.muxHelper, c.APIPrefix, "v1beta3"); err != nil {
+ glog.Fatalf("Unable to setup API v1beta3: %v", err)
+ }
apiVersions = []string{"v1beta1", "v1beta2", "v1beta3"}
}
- apiserver.InstallSupport(m.handlerContainer, m.rootWebService)
+ apiserver.InstallSupport(m.muxHelper, m.rootWebService)
apiserver.AddApiWebService(m.handlerContainer, c.APIPrefix, apiVersions)
// Register root handler.
// We do not register this using restful Webservice since we do not want to surface this in api docs.
- //m.mux.HandleFunc("/", apiserver.HandleIndex)
+ m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper))
// TODO: use go-restful
- apiserver.InstallValidator(m.mux, func() map[string]apiserver.Server { return m.getServersToValidate(c) })
+ apiserver.InstallValidator(m.muxHelper, func() map[string]apiserver.Server { return m.getServersToValidate(c) })
if c.EnableLogsSupport {
- apiserver.InstallLogsSupport(m.mux)
+ apiserver.InstallLogsSupport(m.muxHelper)
+ }
+ if c.EnableUISupport {
+ ui.InstallSupport(m.muxHelper, m.enableSwaggerSupport)
}
- /*if c.EnableUISupport {
- ui.InstallSupport(m.mux)
- }*/
// TODO: install runtime/pprof handler
// See github.com/emicklei/go-restful/blob/master/examples/restful-cpuprofiler-service.go
@@ -478,8 +488,8 @@ func (m *Master) getServersToValidate(c *Config) map[string]apiserver.Server {
return serversToValidate
}
-// API_v1beta1 returns the resources and codec for API version v1beta1.
-func (m *Master) API_v1beta1() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
+// api_v1beta1 returns the resources and codec for API version v1beta1.
+func (m *Master) api_v1beta1() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
storage := make(map[string]apiserver.RESTStorage)
for k, v := range m.storage {
storage[k] = v
@@ -487,8 +497,8 @@ func (m *Master) API_v1beta1() (map[string]apiserver.RESTStorage, runtime.Codec,
return storage, v1beta1.Codec, "/api/v1beta1", latest.SelfLinker, m.admissionControl
}
-// API_v1beta2 returns the resources and codec for API version v1beta2.
-func (m *Master) API_v1beta2() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
+// api_v1beta2 returns the resources and codec for API version v1beta2.
+func (m *Master) api_v1beta2() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
storage := make(map[string]apiserver.RESTStorage)
for k, v := range m.storage {
storage[k] = v
@@ -496,8 +506,8 @@ func (m *Master) API_v1beta2() (map[string]apiserver.RESTStorage, runtime.Codec,
return storage, v1beta2.Codec, "/api/v1beta2", latest.SelfLinker, m.admissionControl
}
-// API_v1beta3 returns the resources and codec for API version v1beta3.
-func (m *Master) API_v1beta3() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
+// api_v1beta3 returns the resources and codec for API version v1beta3.
+func (m *Master) api_v1beta3() (map[string]apiserver.RESTStorage, runtime.Codec, string, runtime.SelfLinker, admission.Interface) {
storage := make(map[string]apiserver.RESTStorage)
for k, v := range m.storage {
if k == "minions" {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache.go
index 4da894698130..7b2ba52a0ba1 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache.go
@@ -144,13 +144,13 @@ func (p *PodCache) computePodStatus(pod *api.Pod) (api.PodStatus, error) {
return newStatus, nil
}
- info, err := p.containerInfo.GetPodInfo(pod.Status.Host, pod.Namespace, pod.Name)
+ result, err := p.containerInfo.GetPodStatus(pod.Status.Host, pod.Namespace, pod.Name)
newStatus.HostIP = p.ipCache.GetInstanceIP(pod.Status.Host)
if err != nil {
newStatus.Phase = api.PodUnknown
} else {
- newStatus.Info = info.ContainerInfo
+ newStatus.Info = result.Status.Info
newStatus.Phase = getPhase(&pod.Spec, newStatus.Info)
if netContainerInfo, ok := newStatus.Info["net"]; ok {
if netContainerInfo.PodIP != "" {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache_test.go
index 09c09a9e3970..37f7257ad9f2 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/pod_cache_test.go
@@ -36,7 +36,7 @@ type podInfoCall struct {
type podInfoResponse struct {
useCount int
- data api.PodContainerInfo
+ data api.PodStatusResult
err error
}
@@ -48,11 +48,11 @@ type FakePodInfoGetter struct {
// default data/error to return, or you can add
// responses to specific calls-- that will take precedence.
- data api.PodContainerInfo
+ data api.PodStatusResult
err error
}
-func (f *FakePodInfoGetter) GetPodInfo(host, namespace, name string) (api.PodContainerInfo, error) {
+func (f *FakePodInfoGetter) GetPodStatus(host, namespace, name string) (api.PodStatusResult, error) {
f.lock.Lock()
defer f.lock.Unlock()
@@ -147,7 +147,7 @@ type podCacheTestConfig struct {
ipFunc func(string) string // Construct will set a default if nil
nodes []api.Node
pods []api.Pod
- kubeletContainerInfo api.PodInfo
+ kubeletContainerInfo api.PodStatus
// Construct will fill in these fields
fakePodInfo *FakePodInfoGetter
@@ -162,8 +162,8 @@ func (c *podCacheTestConfig) Construct() *PodCache {
}
}
c.fakePodInfo = &FakePodInfoGetter{
- data: api.PodContainerInfo{
- ContainerInfo: c.kubeletContainerInfo,
+ data: api.PodStatusResult{
+ Status: c.kubeletContainerInfo,
},
}
c.fakeNodes = &client.Fake{
@@ -209,9 +209,10 @@ func TestPodUpdateAllContainers(t *testing.T) {
}
return ""
},
- kubeletContainerInfo: api.PodInfo{"bar": api.ContainerStatus{}},
- nodes: []api.Node{*makeNode("machine")},
- pods: []api.Pod{*pod, *pod2},
+ kubeletContainerInfo: api.PodStatus{
+ Info: api.PodInfo{"bar": api.ContainerStatus{}}},
+ nodes: []api.Node{*makeNode("machine")},
+ pods: []api.Pod{*pod, *pod2},
}
cache := config.Construct()
@@ -233,7 +234,7 @@ func TestPodUpdateAllContainers(t *testing.T) {
if err != nil {
t.Fatalf("Unexpected error: %+v", err)
}
- if e, a := config.kubeletContainerInfo, status.Info; !reflect.DeepEqual(e, a) {
+ if e, a := config.kubeletContainerInfo.Info, status.Info; !reflect.DeepEqual(e, a) {
t.Errorf("Unexpected mismatch. Expected: %+v, Got: %+v", e, a)
}
if e, a := "1.2.3.5", status.HostIP; e != a {
@@ -252,7 +253,7 @@ func TestPodUpdateAllContainers(t *testing.T) {
func TestFillPodStatusNoHost(t *testing.T) {
pod := makePod(api.NamespaceDefault, "foo", "", "bar")
config := podCacheTestConfig{
- kubeletContainerInfo: api.PodInfo{},
+ kubeletContainerInfo: api.PodStatus{},
nodes: []api.Node{*makeNode("machine")},
pods: []api.Pod{*pod},
}
@@ -271,7 +272,7 @@ func TestFillPodStatusNoHost(t *testing.T) {
func TestFillPodStatusMissingMachine(t *testing.T) {
pod := makePod(api.NamespaceDefault, "foo", "machine", "bar")
config := podCacheTestConfig{
- kubeletContainerInfo: api.PodInfo{},
+ kubeletContainerInfo: api.PodStatus{},
nodes: []api.Node{},
pods: []api.Pod{*pod},
}
@@ -292,15 +293,21 @@ func TestFillPodStatus(t *testing.T) {
expectedIP := "1.2.3.4"
expectedTime, _ := time.Parse("2013-Feb-03", "2013-Feb-03")
config := podCacheTestConfig{
- kubeletContainerInfo: api.PodInfo{
- "net": {
- State: api.ContainerState{
- Running: &api.ContainerStateRunning{
- StartedAt: util.NewTime(expectedTime),
+ kubeletContainerInfo: api.PodStatus{
+ Phase: api.PodPending,
+ Host: "machine",
+ HostIP: "ip of machine",
+ PodIP: expectedIP,
+ Info: api.PodInfo{
+ "net": {
+ State: api.ContainerState{
+ Running: &api.ContainerStateRunning{
+ StartedAt: util.NewTime(expectedTime),
+ },
},
+ RestartCount: 1,
+ PodIP: expectedIP,
},
- RestartCount: 1,
- PodIP: expectedIP,
},
},
nodes: []api.Node{*makeNode("machine")},
@@ -313,20 +320,22 @@ func TestFillPodStatus(t *testing.T) {
}
status, err := cache.GetPodStatus(pod.Namespace, pod.Name)
- if e, a := config.kubeletContainerInfo, status.Info; !reflect.DeepEqual(e, a) {
+ if e, a := &config.kubeletContainerInfo, status; !reflect.DeepEqual(e, a) {
t.Errorf("Expected: %+v, Got %+v", e, a)
}
- if status.PodIP != expectedIP {
- t.Errorf("Expected %s, Got %s\n%+v", expectedIP, status.PodIP, status)
- }
}
func TestFillPodInfoNoData(t *testing.T) {
pod := makePod(api.NamespaceDefault, "foo", "machine", "bar")
expectedIP := ""
config := podCacheTestConfig{
- kubeletContainerInfo: api.PodInfo{
- "net": {},
+ kubeletContainerInfo: api.PodStatus{
+ Phase: api.PodPending,
+ Host: "machine",
+ HostIP: "ip of machine",
+ Info: api.PodInfo{
+ "net": {},
+ },
},
nodes: []api.Node{*makeNode("machine")},
pods: []api.Pod{*pod},
@@ -338,7 +347,7 @@ func TestFillPodInfoNoData(t *testing.T) {
}
status, err := cache.GetPodStatus(pod.Namespace, pod.Name)
- if e, a := config.kubeletContainerInfo, status.Info; !reflect.DeepEqual(e, a) {
+ if e, a := &config.kubeletContainerInfo, status; !reflect.DeepEqual(e, a) {
t.Errorf("Expected: %+v, Got %+v", e, a)
}
if status.PodIP != expectedIP {
@@ -411,7 +420,7 @@ func TestPodPhaseWithBadNode(t *testing.T) {
}
for _, test := range tests {
config := podCacheTestConfig{
- kubeletContainerInfo: test.pod.Status.Info,
+ kubeletContainerInfo: test.pod.Status,
nodes: []api.Node{},
pods: []api.Pod{*test.pod},
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config/config.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config/config.go
index 269a672ec4d8..fc275a08bc65 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config/config.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/proxy/config/config.go
@@ -106,8 +106,8 @@ func (c *EndpointsConfig) Channel(source string) chan EndpointsUpdate {
return endpointsCh
}
-func (c *EndpointsConfig) Config() map[string]map[string]api.Endpoints {
- return c.store.MergedState().(map[string]map[string]api.Endpoints)
+func (c *EndpointsConfig) Config() []api.Endpoints {
+ return c.store.MergedState().([]api.Endpoints)
}
type endpointsStore struct {
@@ -201,8 +201,8 @@ func (c *ServiceConfig) Channel(source string) chan ServiceUpdate {
return serviceCh
}
-func (c *ServiceConfig) Config() map[string]map[string]api.Service {
- return c.store.MergedState().(map[string]map[string]api.Service)
+func (c *ServiceConfig) Config() []api.Service {
+ return c.store.MergedState().([]api.Service)
}
type serviceStore struct {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller/rest.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller/rest.go
index 0a38fef53d01..350985f1f2a0 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller/rest.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/controller/rest.go
@@ -63,7 +63,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RE
}
if len(controller.Name) == 0 {
- controller.Name = util.NewUUID().String()
+ controller.Name = string(util.NewUUID())
}
if errs := validation.ValidateReplicationController(controller); len(errs) > 0 {
return nil, errors.NewInvalid("replicationController", controller.Name, errs)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd/etcd_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd/etcd_test.go
index ef6647eb6b15..8a30c7c5d45c 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd/etcd_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/etcd/etcd_test.go
@@ -130,7 +130,8 @@ func TestEtcdCreatePod(t *testing.T) {
registry := NewTestEtcdRegistry(fakeClient)
err := registry.CreatePod(ctx, &api.Pod{
ObjectMeta: api.ObjectMeta{
- Name: "foo",
+ Name: "foo",
+ Namespace: api.NamespaceDefault,
},
Spec: api.PodSpec{
Containers: []api.Container{
@@ -240,7 +241,8 @@ func TestEtcdCreatePodWithContainersError(t *testing.T) {
registry := NewTestEtcdRegistry(fakeClient)
err := registry.CreatePod(ctx, &api.Pod{
ObjectMeta: api.ObjectMeta{
- Name: "foo",
+ Name: "foo",
+ Namespace: api.NamespaceDefault,
},
})
if err != nil {
@@ -282,7 +284,8 @@ func TestEtcdCreatePodWithContainersNotFound(t *testing.T) {
registry := NewTestEtcdRegistry(fakeClient)
err := registry.CreatePod(ctx, &api.Pod{
ObjectMeta: api.ObjectMeta{
- Name: "foo",
+ Name: "foo",
+ Namespace: api.NamespaceDefault,
},
Spec: api.PodSpec{
Containers: []api.Container{
@@ -346,7 +349,8 @@ func TestEtcdCreatePodWithExistingContainers(t *testing.T) {
registry := NewTestEtcdRegistry(fakeClient)
err := registry.CreatePod(ctx, &api.Pod{
ObjectMeta: api.ObjectMeta{
- Name: "foo",
+ Name: "foo",
+ Namespace: api.NamespaceDefault,
},
Spec: api.PodSpec{
Containers: []api.Container{
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest.go
index 8432c09c3d08..b36511638431 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest.go
@@ -102,12 +102,10 @@ func (rs *REST) getAttrs(obj runtime.Object) (objLabels, objFields labels.Set, e
"involvedObject.kind": event.InvolvedObject.Kind,
"involvedObject.namespace": event.InvolvedObject.Namespace,
"involvedObject.name": event.InvolvedObject.Name,
- "involvedObject.uid": event.InvolvedObject.UID,
+ "involvedObject.uid": string(event.InvolvedObject.UID),
"involvedObject.apiVersion": event.InvolvedObject.APIVersion,
"involvedObject.resourceVersion": fmt.Sprintf("%s", event.InvolvedObject.ResourceVersion),
"involvedObject.fieldPath": event.InvolvedObject.FieldPath,
- "condition": event.Condition,
- "status": event.Condition, // TODO: remove me when we version fields
"reason": event.Reason,
"source": event.Source.Component,
}, nil
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest_test.go
index f52e388c2590..c2b60b499df3 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/event/rest_test.go
@@ -143,9 +143,8 @@ func TestRESTgetAttrs(t *testing.T) {
ResourceVersion: "0",
FieldPath: "",
},
- Condition: "Tested",
- Reason: "ForTesting",
- Source: api.EventSource{Component: "test"},
+ Reason: "ForTesting",
+ Source: api.EventSource{Component: "test"},
}
label, field, err := rest.getAttrs(eventA)
if err != nil {
@@ -162,8 +161,6 @@ func TestRESTgetAttrs(t *testing.T) {
"involvedObject.apiVersion": testapi.Version(),
"involvedObject.resourceVersion": "0",
"involvedObject.fieldPath": "",
- "condition": "Tested",
- "status": "Tested",
"reason": "ForTesting",
"source": "test",
}
@@ -183,8 +180,8 @@ func TestRESTList(t *testing.T) {
ResourceVersion: "0",
FieldPath: "",
},
- Condition: "Tested",
- Reason: "ForTesting",
+ Reason: "ForTesting",
+ Source: api.EventSource{Component: "GoodSource"},
}
eventB := &api.Event{
InvolvedObject: api.ObjectReference{
@@ -195,8 +192,8 @@ func TestRESTList(t *testing.T) {
ResourceVersion: "0",
FieldPath: "",
},
- Condition: "Tested",
- Reason: "ForTesting",
+ Reason: "ForTesting",
+ Source: api.EventSource{Component: "GoodSource"},
}
eventC := &api.Event{
InvolvedObject: api.ObjectReference{
@@ -207,13 +204,13 @@ func TestRESTList(t *testing.T) {
ResourceVersion: "0",
FieldPath: "",
},
- Condition: "Untested",
- Reason: "ForTesting",
+ Reason: "ForTesting",
+ Source: api.EventSource{Component: "OtherSource"},
}
reg.ObjectList = &api.EventList{
Items: []api.Event{*eventA, *eventB, *eventC},
}
- got, err := rest.List(api.NewContext(), labels.Everything(), labels.Set{"status": "Tested"}.AsSelector())
+ got, err := rest.List(api.NewContext(), labels.Everything(), labels.Set{"source": "GoodSource"}.AsSelector())
if err != nil {
t.Fatalf("Unexpected error %v", err)
}
@@ -235,8 +232,7 @@ func TestRESTWatch(t *testing.T) {
ResourceVersion: "0",
FieldPath: "",
},
- Condition: "Tested",
- Reason: "ForTesting",
+ Reason: "ForTesting",
}
reg, rest := NewTestREST()
wi, err := rest.Watch(api.NewContext(), labels.Everything(), labels.Everything(), "0")
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion/healthy_registry.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion/healthy_registry.go
index 3524413ee3ca..a6d32607e086 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion/healthy_registry.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/minion/healthy_registry.go
@@ -26,6 +26,8 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/watch"
+
+ "github.com/golang/glog"
)
type HealthyRegistry struct {
@@ -108,12 +110,16 @@ func (r *HealthyRegistry) checkMinion(node *api.Node) *api.Node {
// This is called to fill the cache.
func (r *HealthyRegistry) doCheck(key string) util.T {
+ var nodeStatus api.NodeConditionStatus
switch status, err := r.client.HealthCheck(key); {
case err != nil:
- return api.ConditionUnknown
+ glog.V(2).Infof("HealthyRegistry: node %q health check error: %v", key, err)
+ nodeStatus = api.ConditionUnknown
case status == health.Unhealthy:
- return api.ConditionNone
+ nodeStatus = api.ConditionNone
default:
- return api.ConditionFull
+ nodeStatus = api.ConditionFull
}
+ glog.V(3).Infof("HealthyRegistry: node %q status was %q", key, nodeStatus)
+ return nodeStatus
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory.go
index 5cd915874d6e..2664aaa44380 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory.go
@@ -20,6 +20,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/envvars"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
type BoundPodFactory interface {
@@ -29,25 +30,49 @@ type BoundPodFactory interface {
type BasicBoundPodFactory struct {
// TODO: this should really point at the API rather than a registry
- ServiceRegistry service.Registry
+ ServiceRegistry service.Registry
+ MasterServiceNamespace string
}
-// getServiceEnvironmentVariables populates a list of environment variables that are use
+var masterServiceNames = util.NewStringSet("kubernetes", "kubernetes-ro")
+
+// getServiceEnvironmentVariables populates a list of environment variables that are used
// in the container environment to get access to services.
-func getServiceEnvironmentVariables(ctx api.Context, registry service.Registry, machine string) ([]api.EnvVar, error) {
+func (b *BasicBoundPodFactory) getServiceEnvironmentVariables(ctx api.Context, registry service.Registry, machine string) ([]api.EnvVar, error) {
var result []api.EnvVar
- services, err := registry.ListServices(ctx)
+ servicesInNs, err := registry.ListServices(ctx)
+ if err != nil {
+ return result, err
+ }
+
+ masterServices, err := registry.ListServices(api.WithNamespace(api.NewContext(), b.MasterServiceNamespace))
if err != nil {
return result, err
}
- return envvars.FromServices(services), nil
+
+ projection := map[string]api.Service{}
+ services := []api.Service{}
+ for _, service := range masterServices.Items {
+ if masterServiceNames.Has(service.Name) {
+ projection[service.Name] = service
+ }
+ }
+ for _, service := range servicesInNs.Items {
+ projection[service.Name] = service
+ }
+ for _, service := range projection {
+ services = append(services, service)
+ }
+
+ return envvars.FromServices(&api.ServiceList{Items: services}), nil
}
func (b *BasicBoundPodFactory) MakeBoundPod(machine string, pod *api.Pod) (*api.BoundPod, error) {
- envVars, err := getServiceEnvironmentVariables(api.NewContext(), b.ServiceRegistry, machine)
+ envVars, err := b.getServiceEnvironmentVariables(api.WithNamespace(api.NewContext(), pod.Namespace), b.ServiceRegistry, machine)
if err != nil {
return nil, err
}
+
boundPod := &api.BoundPod{}
if err := api.Scheme.Convert(pod, boundPod); err != nil {
return nil, err
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory_test.go
index c0ec30008227..84323bd16a23 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/bound_pod_factory_test.go
@@ -22,13 +22,13 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/registry/registrytest"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
)
func TestMakeBoundPodNoServices(t *testing.T) {
registry := registrytest.ServiceRegistry{}
factory := &BasicBoundPodFactory{
- ServiceRegistry: ®istry,
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
}
pod, err := factory.MakeBoundPod("machine", &api.Pod{
@@ -63,13 +63,9 @@ func TestMakeBoundPodServices(t *testing.T) {
List: api.ServiceList{
Items: []api.Service{
{
- ObjectMeta: api.ObjectMeta{Name: "test"},
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test"},
Spec: api.ServiceSpec{
- Port: 8080,
- ContainerPort: util.IntOrString{
- Kind: util.IntstrInt,
- IntVal: 900,
- },
+ Port: 8080,
PortalIP: "1.2.3.4",
},
},
@@ -77,11 +73,12 @@ func TestMakeBoundPodServices(t *testing.T) {
},
}
factory := &BasicBoundPodFactory{
- ServiceRegistry: ®istry,
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
}
pod, err := factory.MakeBoundPod("machine", &api.Pod{
- ObjectMeta: api.ObjectMeta{Name: "foobar"},
+ ObjectMeta: api.ObjectMeta{Name: "foobar", Namespace: "test"},
Spec: api.PodSpec{
Containers: []api.Container{
{
@@ -140,13 +137,9 @@ func TestMakeBoundPodServicesExistingEnvVar(t *testing.T) {
List: api.ServiceList{
Items: []api.Service{
{
- ObjectMeta: api.ObjectMeta{Name: "test"},
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test"},
Spec: api.ServiceSpec{
- Port: 8080,
- ContainerPort: util.IntOrString{
- Kind: util.IntstrInt,
- IntVal: 900,
- },
+ Port: 8080,
PortalIP: "1.2.3.4",
},
},
@@ -154,10 +147,12 @@ func TestMakeBoundPodServicesExistingEnvVar(t *testing.T) {
},
}
factory := &BasicBoundPodFactory{
- ServiceRegistry: ®istry,
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
}
pod, err := factory.MakeBoundPod("machine", &api.Pod{
+ ObjectMeta: api.ObjectMeta{Name: "foobar", Namespace: "test"},
Spec: api.PodSpec{
Containers: []api.Container{
{
@@ -220,3 +215,238 @@ func TestMakeBoundPodServicesExistingEnvVar(t *testing.T) {
}
}
}
+
+func TestMakeBoundPodOnlyVisibleServices(t *testing.T) {
+ registry := registrytest.ServiceRegistry{
+ List: api.ServiceList{
+ Items: []api.Service{
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{
+ Port: 8080,
+ PortalIP: "1.2.3.4",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8081,
+ PortalIP: "1.2.3.5",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test3", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8083,
+ PortalIP: "1.2.3.7",
+ },
+ },
+ },
+ },
+ }
+ factory := &BasicBoundPodFactory{
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
+ }
+
+ pod, err := factory.MakeBoundPod("machine", &api.Pod{
+ ObjectMeta: api.ObjectMeta{Name: "foobar", Namespace: "test"},
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ },
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ container := pod.Spec.Containers[0]
+ envs := map[string]string{
+ "TEST_SERVICE_HOST": "1.2.3.5",
+ "TEST_SERVICE_PORT": "8081",
+ "TEST_PORT": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP_PROTO": "tcp",
+ "TEST_PORT_8081_TCP_PORT": "8081",
+ "TEST_PORT_8081_TCP_ADDR": "1.2.3.5",
+ "TEST3_SERVICE_HOST": "1.2.3.7",
+ "TEST3_SERVICE_PORT": "8083",
+ "TEST3_PORT": "tcp://1.2.3.7:8083",
+ "TEST3_PORT_8083_TCP": "tcp://1.2.3.7:8083",
+ "TEST3_PORT_8083_TCP_PROTO": "tcp",
+ "TEST3_PORT_8083_TCP_PORT": "8083",
+ "TEST3_PORT_8083_TCP_ADDR": "1.2.3.7",
+ }
+
+ if len(container.Env) != len(envs) {
+ t.Fatalf("Expected %d env vars, got %d: %#v", len(envs), len(container.Env), pod)
+ }
+ for _, env := range container.Env {
+ expectedValue := envs[env.Name]
+ if expectedValue != env.Value {
+ t.Errorf("expected env %v value %v, got %v", env.Name, expectedValue, env.Value)
+ }
+ }
+}
+
+func TestMakeBoundPodMasterServices(t *testing.T) {
+ registry := registrytest.ServiceRegistry{
+ List: api.ServiceList{
+ Items: []api.Service{
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{
+ Port: 8080,
+ PortalIP: "1.2.3.4",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8081,
+ PortalIP: "1.2.3.5",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test3", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8083,
+ PortalIP: "1.2.3.7",
+ },
+ },
+ },
+ },
+ }
+ factory := &BasicBoundPodFactory{
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
+ }
+
+ pod, err := factory.MakeBoundPod("machine", &api.Pod{
+ ObjectMeta: api.ObjectMeta{Name: "foobar", Namespace: "test"},
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ },
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ container := pod.Spec.Containers[0]
+ envs := map[string]string{
+ "TEST_SERVICE_HOST": "1.2.3.5",
+ "TEST_SERVICE_PORT": "8081",
+ "TEST_PORT": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP_PROTO": "tcp",
+ "TEST_PORT_8081_TCP_PORT": "8081",
+ "TEST_PORT_8081_TCP_ADDR": "1.2.3.5",
+ "TEST3_SERVICE_HOST": "1.2.3.7",
+ "TEST3_SERVICE_PORT": "8083",
+ "TEST3_PORT": "tcp://1.2.3.7:8083",
+ "TEST3_PORT_8083_TCP": "tcp://1.2.3.7:8083",
+ "TEST3_PORT_8083_TCP_PROTO": "tcp",
+ "TEST3_PORT_8083_TCP_PORT": "8083",
+ "TEST3_PORT_8083_TCP_ADDR": "1.2.3.7",
+ "KUBERNETES_SERVICE_HOST": "1.2.3.4",
+ "KUBERNETES_SERVICE_PORT": "8080",
+ "KUBERNETES_PORT": "tcp://1.2.3.4:8080",
+ "KUBERNETES_PORT_8080_TCP": "tcp://1.2.3.4:8080",
+ "KUBERNETES_PORT_8080_TCP_PROTO": "tcp",
+ "KUBERNETES_PORT_8080_TCP_PORT": "8080",
+ "KUBERNETES_PORT_8080_TCP_ADDR": "1.2.3.4",
+ }
+
+ if len(container.Env) != len(envs) {
+ t.Fatalf("Expected %d env vars, got %d: %#v", len(envs), len(container.Env), pod)
+ }
+ for _, env := range container.Env {
+ expectedValue := envs[env.Name]
+ if expectedValue != env.Value {
+ t.Errorf("expected env %v value %v, got %v", env.Name, expectedValue, env.Value)
+ }
+ }
+}
+
+func TestMakeBoundPodMasterServiceInNs(t *testing.T) {
+ registry := registrytest.ServiceRegistry{
+ List: api.ServiceList{
+ Items: []api.Service{
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: api.NamespaceDefault},
+ Spec: api.ServiceSpec{
+ Port: 8080,
+ PortalIP: "1.2.3.4",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "test", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8081,
+ PortalIP: "1.2.3.5",
+ },
+ },
+ {
+ ObjectMeta: api.ObjectMeta{Name: "kubernetes", Namespace: "test"},
+ Spec: api.ServiceSpec{
+ Port: 8083,
+ PortalIP: "1.2.3.7",
+ },
+ },
+ },
+ },
+ }
+ factory := &BasicBoundPodFactory{
+ ServiceRegistry: ®istry,
+ MasterServiceNamespace: api.NamespaceDefault,
+ }
+
+ pod, err := factory.MakeBoundPod("machine", &api.Pod{
+ ObjectMeta: api.ObjectMeta{Name: "foobar", Namespace: "test"},
+ Spec: api.PodSpec{
+ Containers: []api.Container{
+ {
+ Name: "foo",
+ },
+ },
+ },
+ })
+ if err != nil {
+ t.Fatalf("unexpected error: %v", err)
+ }
+
+ container := pod.Spec.Containers[0]
+ envs := map[string]string{
+ "TEST_SERVICE_HOST": "1.2.3.5",
+ "TEST_SERVICE_PORT": "8081",
+ "TEST_PORT": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP": "tcp://1.2.3.5:8081",
+ "TEST_PORT_8081_TCP_PROTO": "tcp",
+ "TEST_PORT_8081_TCP_PORT": "8081",
+ "TEST_PORT_8081_TCP_ADDR": "1.2.3.5",
+ "KUBERNETES_SERVICE_HOST": "1.2.3.7",
+ "KUBERNETES_SERVICE_PORT": "8083",
+ "KUBERNETES_PORT": "tcp://1.2.3.7:8083",
+ "KUBERNETES_PORT_8083_TCP": "tcp://1.2.3.7:8083",
+ "KUBERNETES_PORT_8083_TCP_PROTO": "tcp",
+ "KUBERNETES_PORT_8083_TCP_PORT": "8083",
+ "KUBERNETES_PORT_8083_TCP_ADDR": "1.2.3.7",
+ }
+
+ if len(container.Env) != len(envs) {
+ t.Fatalf("Expected %d env vars, got %d: %#v", len(envs), len(container.Env), pod)
+ }
+ for _, env := range container.Env {
+ expectedValue := envs[env.Name]
+ if expectedValue != env.Value {
+ t.Errorf("expected env %v value %v, got %v", env.Name, expectedValue, env.Value)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/rest.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/rest.go
index 99bc58ff4b0c..d6a3f47145ef 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/rest.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/pod/rest.go
@@ -62,7 +62,7 @@ func (rs *REST) Create(ctx api.Context, obj runtime.Object) (<-chan apiserver.RE
if len(pod.Name) == 0 {
// TODO properly handle auto-generated names.
// See https://github.com/GoogleCloudPlatform/kubernetes/issues/148 170 & 1135
- pod.Name = pod.UID
+ pod.Name = string(pod.UID)
}
if errs := validation.ValidatePod(pod); len(errs) > 0 {
return nil, errors.NewInvalid("pod", pod.Name, errs)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/rest_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/rest_test.go
index 36f16eeb9836..ab288b0ae91a 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/rest_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/registry/service/rest_test.go
@@ -347,13 +347,13 @@ func TestServiceRegistryList(t *testing.T) {
machines := []string{"foo", "bar", "baz"}
storage := NewREST(registry, fakeCloud, registrytest.NewMinionRegistry(machines, api.NodeResources{}), makeIPNet(t))
registry.CreateService(ctx, &api.Service{
- ObjectMeta: api.ObjectMeta{Name: "foo"},
+ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
},
})
registry.CreateService(ctx, &api.Service{
- ObjectMeta: api.ObjectMeta{Name: "foo2"},
+ ObjectMeta: api.ObjectMeta{Name: "foo2", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar2": "baz2"},
},
@@ -585,7 +585,7 @@ func TestServiceRegistryIPReloadFromStorage(t *testing.T) {
rest1.portalMgr.randomAttempts = 0
svc := &api.Service{
- ObjectMeta: api.ObjectMeta{Name: "foo"},
+ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
Port: 6502,
@@ -595,7 +595,7 @@ func TestServiceRegistryIPReloadFromStorage(t *testing.T) {
c, _ := rest1.Create(ctx, svc)
<-c
svc = &api.Service{
- ObjectMeta: api.ObjectMeta{Name: "foo"},
+ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
Port: 6502,
@@ -609,7 +609,7 @@ func TestServiceRegistryIPReloadFromStorage(t *testing.T) {
rest2.portalMgr.randomAttempts = 0
svc = &api.Service{
- ObjectMeta: api.ObjectMeta{Name: "foo"},
+ ObjectMeta: api.ObjectMeta{Name: "foo", Namespace: api.NamespaceDefault},
Spec: api.ServiceSpec{
Selector: map[string]string{"bar": "baz"},
Port: 6502,
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/runtime/scheme.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/runtime/scheme.go
index 0a3f8d040afa..447ad523f6c4 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/runtime/scheme.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/runtime/scheme.go
@@ -360,6 +360,9 @@ func (s *Scheme) Decode(data []byte) (Object, error) {
// pointer to an api type.
// If obj's APIVersion doesn't match that in data, an attempt will be made to convert
// data into obj's version.
+// TODO: allow Decode/DecodeInto to take a default apiVersion and a default kind, to
+// be applied if the provided object does not have either field (integrate external
+// apis into the decoding scheme).
func (s *Scheme) DecodeInto(data []byte, obj Object) error {
return s.raw.DecodeInto(data, obj)
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/listers.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/listers.go
index 6c893dcce93f..10d887c94178 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/listers.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/listers.go
@@ -17,6 +17,8 @@ limitations under the License.
package scheduler
import (
+ "fmt"
+
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
)
@@ -52,3 +54,40 @@ func (f FakePodLister) List(s labels.Selector) (selected []api.Pod, err error) {
}
return selected, nil
}
+
+// ServiceLister interface represents anything that can produce a list of services; the list is consumed by a scheduler.
+type ServiceLister interface {
+ // Lists all the services
+ List() (api.ServiceList, error)
+ // Gets the services for the given pod
+ GetPodServices(api.Pod) ([]api.Service, error)
+}
+
+// FakeServiceLister implements ServiceLister on []api.Service for test purposes.
+type FakeServiceLister []api.Service
+
+// FakeServiceLister returns api.ServiceList, the list of all services.
+func (f FakeServiceLister) List() (api.ServiceList, error) {
+ return api.ServiceList{Items: f}, nil
+}
+
+// GetPodServices gets the services that have the selector that match the labels on the given pod
+func (f FakeServiceLister) GetPodServices(pod api.Pod) (services []api.Service, err error) {
+ var selector labels.Selector
+
+ for _, service := range f {
+ // consider only services that are in the same namespace as the pod
+ if service.Namespace != pod.Namespace {
+ continue
+ }
+ selector = labels.Set(service.Spec.Selector).AsSelector()
+ if selector.Matches(labels.Set(pod.Labels)) {
+ services = append(services, service)
+ }
+ }
+ if len(services) == 0 {
+ err = fmt.Errorf("Could not find service for pod %s in namespace %s with labels: %v", pod.Name, pod.Namespace, pod.Labels)
+ }
+
+ return
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates.go
index fefe23c9fe15..65698c6ced66 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates.go
@@ -125,7 +125,7 @@ func (r *ResourceFit) PodFitsResources(pod api.Pod, existingPods []api.Pod, node
fitsCPU := totalMilliCPU == 0 || (totalMilliCPU-milliCPURequested) >= podRequest.milliCPU
fitsMemory := totalMemory == 0 || (totalMemory-memoryRequested) >= podRequest.memory
- glog.V(3).Infof("Calculated fit: cpu: %s, memory %s", fitsCPU, fitsMemory)
+ glog.V(3).Infof("Calculated fit: cpu: %v, memory %v", fitsCPU, fitsMemory)
return fitsCPU && fitsMemory, nil
}
@@ -167,6 +167,137 @@ func PodFitsHost(pod api.Pod, existingPods []api.Pod, node string) (bool, error)
return pod.Spec.Host == node, nil
}
+type NodeLabelChecker struct {
+ info NodeInfo
+ labels []string
+ presence bool
+}
+
+func NewNodeLabelPredicate(info NodeInfo, labels []string, presence bool) FitPredicate {
+ labelChecker := &NodeLabelChecker{
+ info: info,
+ labels: labels,
+ presence: presence,
+ }
+ return labelChecker.CheckNodeLabelPresence
+}
+
+// CheckNodeLabelPresence checks whether all of the specified labels exists on a minion or not, regardless of their value
+// If "presence" is false, then returns false if any of the requested labels matches any of the minion's labels,
+// otherwise returns true.
+// If "presence" is true, then returns false if any of the requested labels does not match any of the minion's labels,
+// otherwise returns true.
+//
+// Consider the cases where the minions are placed in regions/zones/racks and these are identified by labels
+// In some cases, it is required that only minions that are part of ANY of the defined regions/zones/racks be selected
+//
+// Alternately, eliminating minions that have a certain label, regardless of value, is also useful
+// A minion may have a label with "retiring" as key and the date as the value
+// and it may be desirable to avoid scheduling new pods on this minion
+func (n *NodeLabelChecker) CheckNodeLabelPresence(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
+ var exists bool
+ minion, err := n.info.GetNodeInfo(node)
+ if err != nil {
+ return false, err
+ }
+ minionLabels := labels.Set(minion.Labels)
+ for _, label := range n.labels {
+ exists = minionLabels.Has(label)
+ if (exists && !n.presence) || (!exists && n.presence) {
+ return false, nil
+ }
+ }
+ return true, nil
+}
+
+type ServiceAffinity struct {
+ podLister PodLister
+ serviceLister ServiceLister
+ nodeInfo NodeInfo
+ labels []string
+}
+
+func NewServiceAffinityPredicate(podLister PodLister, serviceLister ServiceLister, nodeInfo NodeInfo, labels []string) FitPredicate {
+ affinity := &ServiceAffinity{
+ podLister: podLister,
+ serviceLister: serviceLister,
+ nodeInfo: nodeInfo,
+ labels: labels,
+ }
+ return affinity.CheckServiceAffinity
+}
+
+// CheckServiceAffinity ensures that only the minions that match the specified labels are considered for scheduling.
+// The set of labels to be considered are provided to the struct (ServiceAffinity).
+// The pod is checked for the labels and any missing labels are then checked in the minion
+// that hosts the service pods (peers) for the given pod.
+//
+// We add an implicit selector requiring some particular value V for label L to a pod, if:
+// - L is listed in the ServiceAffinity object that is passed into the function
+// - the pod does not have any NodeSelector for L
+// - some other pod from the same service is already scheduled onto a minion that has value V for label L
+func (s *ServiceAffinity) CheckServiceAffinity(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
+ var affinitySelector labels.Selector
+
+ // check if the pod being scheduled has the affinity labels specified in its NodeSelector
+ affinityLabels := map[string]string{}
+ nodeSelector := labels.Set(pod.Spec.NodeSelector)
+ labelsExist := true
+ for _, l := range s.labels {
+ if nodeSelector.Has(l) {
+ affinityLabels[l] = nodeSelector.Get(l)
+ } else {
+ // the current pod does not specify all the labels, look in the existing service pods
+ labelsExist = false
+ }
+ }
+
+ // skip looking at other pods in the service if the current pod defines all the required affinity labels
+ if !labelsExist {
+ services, err := s.serviceLister.GetPodServices(pod)
+ if err == nil {
+ // just use the first service and get the other pods within the service
+ // TODO: a separate predicate can be created that tries to handle all services for the pod
+ selector := labels.SelectorFromSet(services[0].Spec.Selector)
+ servicePods, err := s.podLister.List(selector)
+ if err != nil {
+ return false, err
+ }
+ if len(servicePods) > 0 {
+ // consider any service pod and fetch the minion its hosted on
+ otherMinion, err := s.nodeInfo.GetNodeInfo(servicePods[0].Status.Host)
+ if err != nil {
+ return false, err
+ }
+ for _, l := range s.labels {
+ // If the pod being scheduled has the label value specified, do not override it
+ if _, exists := affinityLabels[l]; exists {
+ continue
+ }
+ if labels.Set(otherMinion.Labels).Has(l) {
+ affinityLabels[l] = labels.Set(otherMinion.Labels).Get(l)
+ }
+ }
+ }
+ }
+ }
+
+ // if there are no existing pods in the service, consider all minions
+ if len(affinityLabels) == 0 {
+ affinitySelector = labels.Everything()
+ } else {
+ affinitySelector = labels.Set(affinityLabels).AsSelector()
+ }
+
+ minion, err := s.nodeInfo.GetNodeInfo(node)
+ if err != nil {
+ return false, err
+ }
+
+ // check if the minion matches the selector
+ return affinitySelector.Matches(labels.Set(minion.Labels)), nil
+}
+
func PodFitsPorts(pod api.Pod, existingPods []api.Pod, node string) (bool, error) {
existingPorts := getUsedPorts(existingPods...)
wantPorts := getUsedPorts(pod)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates_test.go
index 5fe556207f10..9ef6c11b5b81 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/predicates_test.go
@@ -17,6 +17,7 @@ limitations under the License.
package scheduler
import (
+ "fmt"
"reflect"
"testing"
@@ -31,6 +32,17 @@ func (n FakeNodeInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
return &node, nil
}
+type FakeNodeListInfo []api.Node
+
+func (nodes FakeNodeListInfo) GetNodeInfo(nodeName string) (*api.Node, error) {
+ for _, node := range nodes {
+ if node.Name == nodeName {
+ return &node, nil
+ }
+ }
+ return nil, fmt.Errorf("Unable to find node: %s", nodeName)
+}
+
func makeResources(milliCPU int64, memory int64) api.NodeResources {
return api.NodeResources{
Capacity: api.ResourceList{
@@ -260,7 +272,7 @@ func TestDiskConflicts(t *testing.T) {
volState := api.PodSpec{
Volumes: []api.Volume{
{
- Source: &api.VolumeSource{
+ Source: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDisk{
PDName: "foo",
},
@@ -271,7 +283,7 @@ func TestDiskConflicts(t *testing.T) {
volState2 := api.PodSpec{
Volumes: []api.Volume{
{
- Source: &api.VolumeSource{
+ Source: api.VolumeSource{
GCEPersistentDisk: &api.GCEPersistentDisk{
PDName: "bar",
},
@@ -386,3 +398,175 @@ func TestPodFitsSelector(t *testing.T) {
}
}
}
+
+func TestNodeLabelPresence(t *testing.T) {
+ label := map[string]string{"foo": "bar", "bar": "foo"}
+ tests := []struct {
+ pod api.Pod
+ existingPods []api.Pod
+ labels []string
+ presence bool
+ fits bool
+ test string
+ }{
+ {
+ labels: []string{"baz"},
+ presence: true,
+ fits: false,
+ test: "label does not match, presence true",
+ },
+ {
+ labels: []string{"baz"},
+ presence: false,
+ fits: true,
+ test: "label does not match, presence false",
+ },
+ {
+ labels: []string{"foo", "baz"},
+ presence: true,
+ fits: false,
+ test: "one label matches, presence true",
+ },
+ {
+ labels: []string{"foo", "baz"},
+ presence: false,
+ fits: false,
+ test: "one label matches, presence false",
+ },
+ {
+ labels: []string{"foo", "bar"},
+ presence: true,
+ fits: true,
+ test: "all labels match, presence true",
+ },
+ {
+ labels: []string{"foo", "bar"},
+ presence: false,
+ fits: false,
+ test: "all labels match, presence false",
+ },
+ }
+ for _, test := range tests {
+ node := api.Node{ObjectMeta: api.ObjectMeta{Labels: label}}
+ labelChecker := NodeLabelChecker{FakeNodeInfo(node), test.labels, test.presence}
+ fits, err := labelChecker.CheckNodeLabelPresence(test.pod, test.existingPods, "machine")
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if fits != test.fits {
+ t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
+ }
+ }
+}
+
+func TestServiceAffinity(t *testing.T) {
+ selector := map[string]string{"foo": "bar"}
+ labels1 := map[string]string{
+ "region": "r1",
+ "zone": "z11",
+ }
+ labels2 := map[string]string{
+ "region": "r1",
+ "zone": "z12",
+ }
+ labels3 := map[string]string{
+ "region": "r2",
+ "zone": "z21",
+ }
+ labels4 := map[string]string{
+ "region": "r2",
+ "zone": "z22",
+ }
+ node1 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: labels1}}
+ node2 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: labels2}}
+ node3 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: labels3}}
+ node4 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine4", Labels: labels4}}
+ node5 := api.Node{ObjectMeta: api.ObjectMeta{Name: "machine5", Labels: labels4}}
+ tests := []struct {
+ pod api.Pod
+ pods []api.Pod
+ services []api.Service
+ node string
+ labels []string
+ fits bool
+ test string
+ }{
+ {
+ node: "machine1",
+ fits: true,
+ labels: []string{"region"},
+ test: "nothing scheduled",
+ },
+ {
+ pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r1"}}},
+ node: "machine1",
+ fits: true,
+ labels: []string{"region"},
+ test: "pod with region label match",
+ },
+ {
+ pod: api.Pod{Spec: api.PodSpec{NodeSelector: map[string]string{"region": "r2"}}},
+ node: "machine1",
+ fits: false,
+ labels: []string{"region"},
+ test: "pod with region label mismatch",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
+ pods: []api.Pod{{Status: api.PodStatus{Host: "machine1"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
+ node: "machine1",
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
+ fits: true,
+ labels: []string{"region"},
+ test: "service pod on same minion",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
+ pods: []api.Pod{{Status: api.PodStatus{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
+ node: "machine1",
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
+ fits: true,
+ labels: []string{"region"},
+ test: "service pod on different minion, region match",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
+ pods: []api.Pod{{Status: api.PodStatus{Host: "machine3"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
+ node: "machine1",
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
+ fits: false,
+ labels: []string{"region"},
+ test: "service pod on different minion, region mismatch",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
+ pods: []api.Pod{{Status: api.PodStatus{Host: "machine2"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
+ node: "machine1",
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
+ fits: false,
+ labels: []string{"region", "zone"},
+ test: "service pod on different minion, multiple labels, not all match",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: selector}},
+ pods: []api.Pod{{Status: api.PodStatus{Host: "machine5"}, ObjectMeta: api.ObjectMeta{Labels: selector}}},
+ node: "machine4",
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: selector}}},
+ fits: true,
+ labels: []string{"region", "zone"},
+ test: "service pod on different minion, multiple labels, all match",
+ },
+ }
+
+ for _, test := range tests {
+ nodes := []api.Node{node1, node2, node3, node4, node5}
+ serviceAffinity := ServiceAffinity{FakePodLister(test.pods), FakeServiceLister(test.services), FakeNodeListInfo(nodes), test.labels}
+ fits, err := serviceAffinity.CheckServiceAffinity(test.pod, []api.Pod{}, test.node)
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ if fits != test.fits {
+ t.Errorf("%s: expected: %v got %v", test.test, test.fits, fits)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities.go
index dc3250cb8a2c..6685358c949d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities.go
@@ -18,6 +18,7 @@ package scheduler
import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/golang/glog"
)
@@ -88,3 +89,46 @@ func LeastRequestedPriority(pod api.Pod, podLister PodLister, minionLister Minio
}
return list, nil
}
+
+type NodeLabelPrioritizer struct {
+ label string
+ presence bool
+}
+
+func NewNodeLabelPriority(label string, presence bool) PriorityFunction {
+ labelPrioritizer := &NodeLabelPrioritizer{
+ label: label,
+ presence: presence,
+ }
+ return labelPrioritizer.CalculateNodeLabelPriority
+}
+
+// CalculateNodeLabelPriority checks whether a particular label exists on a minion or not, regardless of its value.
+// If presence is true, prioritizes minions that have the specified label, regardless of value.
+// If presence is false, prioritizes minions that do not have the specified label.
+func (n *NodeLabelPrioritizer) CalculateNodeLabelPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
+ var score int
+ minions, err := minionLister.List()
+ if err != nil {
+ return nil, err
+ }
+
+ labeledMinions := map[string]bool{}
+ for _, minion := range minions.Items {
+ exists := labels.Set(minion.Labels).Has(n.label)
+ labeledMinions[minion.Name] = (exists && n.presence) || (!exists && !n.presence)
+ }
+
+ result := []HostPriority{}
+ //score int - scale of 0-10
+ // 0 being the lowest priority and 10 being the highest
+ for minionName, success := range labeledMinions {
+ if success {
+ score = 10
+ } else {
+ score = 0
+ }
+ result = append(result, HostPriority{host: minionName, score: score})
+ }
+ return result, nil
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities_test.go
index 23f932e0a0d8..8aa878586b4d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/priorities_test.go
@@ -18,6 +18,7 @@ package scheduler
import (
"reflect"
+ "sort"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@@ -238,3 +239,102 @@ func TestLeastRequested(t *testing.T) {
}
}
}
+
+func TestNewNodeLabelPriority(t *testing.T) {
+ label1 := map[string]string{"foo": "bar"}
+ label2 := map[string]string{"bar": "foo"}
+ label3 := map[string]string{"bar": "baz"}
+ tests := []struct {
+ pod api.Pod
+ pods []api.Pod
+ nodes []api.Node
+ label string
+ presence bool
+ expectedList HostPriorityList
+ test string
+ }{
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}, {"machine3", 0}},
+ label: "baz",
+ presence: true,
+ test: "no match found, presence true",
+ },
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}, {"machine3", 10}},
+ label: "baz",
+ presence: false,
+ test: "no match found, presence false",
+ },
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
+ label: "foo",
+ presence: true,
+ test: "one match found, presence true",
+ },
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
+ label: "foo",
+ presence: false,
+ test: "one match found, presence false",
+ },
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 0}, {"machine2", 10}, {"machine3", 10}},
+ label: "bar",
+ presence: true,
+ test: "two matches found, presence true",
+ },
+ {
+ nodes: []api.Node{
+ {ObjectMeta: api.ObjectMeta{Name: "machine1", Labels: label1}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine2", Labels: label2}},
+ {ObjectMeta: api.ObjectMeta{Name: "machine3", Labels: label3}},
+ },
+ expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}, {"machine3", 0}},
+ label: "bar",
+ presence: false,
+ test: "two matches found, presence false",
+ },
+ }
+
+ for _, test := range tests {
+ prioritizer := NodeLabelPrioritizer{
+ label: test.label,
+ presence: test.presence,
+ }
+ list, err := prioritizer.CalculateNodeLabelPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(api.NodeList{Items: test.nodes}))
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ // sort the two lists to avoid failures on account of different ordering
+ sort.Sort(test.expectedList)
+ sort.Sort(list)
+ if !reflect.DeepEqual(test.expectedList, list) {
+ t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading.go
index 0c831484104d..f72f19c93ff6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading.go
@@ -17,28 +17,44 @@ limitations under the License.
package scheduler
import (
- "math/rand"
-
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
)
-// CalculateSpreadPriority spreads pods by minimizing the number of pods on the same machine with the same labels.
-// Importantly, if there are services in the system that span multiple heterogenous sets of pods, this spreading priority
-// may not provide optimal spreading for the members of that Service.
-// TODO: consider if we want to include Service label sets in the scheduling priority.
-func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
- pods, err := podLister.List(labels.SelectorFromSet(pod.Labels))
- if err != nil {
- return nil, err
+type ServiceSpread struct {
+ serviceLister ServiceLister
+}
+
+func NewServiceSpreadPriority(serviceLister ServiceLister) PriorityFunction {
+ serviceSpread := &ServiceSpread{
+ serviceLister: serviceLister,
+ }
+ return serviceSpread.CalculateSpreadPriority
+}
+
+// CalculateSpreadPriority spreads pods by minimizing the number of pods belonging to the same service
+// on the same machine.
+func (s *ServiceSpread) CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
+ var maxCount int
+ var pods []api.Pod
+ var err error
+
+ services, err := s.serviceLister.GetPodServices(pod)
+ if err == nil {
+ // just use the first service and get the other pods within the service
+ // TODO: a separate predicate can be created that tries to handle all services for the pod
+ selector := labels.SelectorFromSet(services[0].Spec.Selector)
+ pods, err = podLister.List(selector)
+ if err != nil {
+ return nil, err
+ }
}
+
minions, err := minionLister.List()
if err != nil {
return nil, err
}
- var maxCount int
- var fScore float32 = 10.0
counts := map[string]int{}
if len(pods) > 0 {
for _, pod := range pods {
@@ -54,6 +70,8 @@ func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister Mini
//score int - scale of 0-10
// 0 being the lowest priority and 10 being the highest
for _, minion := range minions.Items {
+ // initializing to the default/max minion score of 10
+ fScore := float32(10)
if maxCount > 0 {
fScore = 10 * (float32(maxCount-counts[minion.Name]) / float32(maxCount))
}
@@ -62,6 +80,78 @@ func CalculateSpreadPriority(pod api.Pod, podLister PodLister, minionLister Mini
return result, nil
}
-func NewSpreadingScheduler(podLister PodLister, minionLister MinionLister, predicates []FitPredicate, random *rand.Rand) Scheduler {
- return NewGenericScheduler(predicates, []PriorityConfig{{Function: CalculateSpreadPriority, Weight: 1}}, podLister, random)
+type ServiceAntiAffinity struct {
+ serviceLister ServiceLister
+ label string
+}
+
+func NewServiceAntiAffinityPriority(serviceLister ServiceLister, label string) PriorityFunction {
+ antiAffinity := &ServiceAntiAffinity{
+ serviceLister: serviceLister,
+ label: label,
+ }
+ return antiAffinity.CalculateAntiAffinityPriority
+}
+
+// CalculateAntiAffinityPriority spreads pods by minimizing the number of pods belonging to the same service
+// on machines with the same value for a particular label.
+// The label to be considered is provided to the struct (ServiceAntiAffinity).
+func (s *ServiceAntiAffinity) CalculateAntiAffinityPriority(pod api.Pod, podLister PodLister, minionLister MinionLister) (HostPriorityList, error) {
+ var pods []api.Pod
+
+ services, err := s.serviceLister.GetPodServices(pod)
+ if err == nil {
+ // just use the first service and get the other pods within the service
+ // TODO: a separate predicate can be created that tries to handle all services for the pod
+ selector := labels.SelectorFromSet(services[0].Spec.Selector)
+ pods, err = podLister.List(selector)
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ minions, err := minionLister.List()
+ if err != nil {
+ return nil, err
+ }
+
+ // separate out the minions that have the label from the ones that don't
+ otherMinions := []string{}
+ labeledMinions := map[string]string{}
+ for _, minion := range minions.Items {
+ if labels.Set(minion.Labels).Has(s.label) {
+ label := labels.Set(minion.Labels).Get(s.label)
+ labeledMinions[minion.Name] = label
+ } else {
+ otherMinions = append(otherMinions, minion.Name)
+ }
+ }
+
+ podCounts := map[string]int{}
+ for _, pod := range pods {
+ label, exists := labeledMinions[pod.Status.Host]
+ if !exists {
+ continue
+ }
+ podCounts[label]++
+ }
+
+ numServicePods := len(pods)
+ result := []HostPriority{}
+ //score int - scale of 0-10
+ // 0 being the lowest priority and 10 being the highest
+ for minion := range labeledMinions {
+ // initializing to the default/max minion score of 10
+ fScore := float32(10)
+ if numServicePods > 0 {
+ fScore = 10 * (float32(numServicePods-podCounts[labeledMinions[minion]]) / float32(numServicePods))
+ }
+ result = append(result, HostPriority{host: minion, score: int(fScore)})
+ }
+ // add the open minions with a score of 0
+ for _, minion := range otherMinions {
+ result = append(result, HostPriority{host: minion, score: 0})
+ }
+
+ return result, nil
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading_test.go
index 0e9c90934642..fe891cf25046 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler/spreading_test.go
@@ -18,12 +18,13 @@ package scheduler
import (
"reflect"
+ "sort"
"testing"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
)
-func TestSpreadPriority(t *testing.T) {
+func TestServiceSpreadPriority(t *testing.T) {
labels1 := map[string]string{
"foo": "bar",
"baz": "blah",
@@ -32,16 +33,17 @@ func TestSpreadPriority(t *testing.T) {
"bar": "foo",
"baz": "blah",
}
- machine1Status := api.PodStatus{
+ zone1Status := api.PodStatus{
Host: "machine1",
}
- machine2Status := api.PodStatus{
+ zone2Status := api.PodStatus{
Host: "machine2",
}
tests := []struct {
pod api.Pod
pods []api.Pod
nodes []string
+ services []api.Service
expectedList HostPriorityList
test string
}{
@@ -52,55 +54,72 @@ func TestSpreadPriority(t *testing.T) {
},
{
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
- pods: []api.Pod{{Status: machine1Status}},
+ pods: []api.Pod{{Status: zone1Status}},
nodes: []string{"machine1", "machine2"},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
- test: "no labels",
+ test: "no services",
},
{
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
- pods: []api.Pod{{Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
+ pods: []api.Pod{{Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
nodes: []string{"machine1", "machine2"},
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 10}},
- test: "different labels",
+ test: "different services",
},
{
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []api.Pod{
- {Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
- {Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 10}, {"machine2", 0}},
- test: "one label match",
+ test: "two pods, one service pod",
},
{
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []api.Pod{
- {Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
- {Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
- {Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 0}, {"machine2", 0}},
- test: "two label matches on different machines",
+ test: "three pods, two service pods on different machines",
},
{
pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
pods: []api.Pod{
- {Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
- {Status: machine1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
- {Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
- {Status: machine2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
},
nodes: []string{"machine1", "machine2"},
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
expectedList: []HostPriority{{"machine1", 5}, {"machine2", 0}},
- test: "three label matches",
+ test: "four pods, three service pods",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: []string{"machine1", "machine2"},
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
+ expectedList: []HostPriority{{"machine1", 0}, {"machine2", 5}},
+ test: "service with partial pod label matches",
},
}
for _, test := range tests {
- list, err := CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeMinionList(test.nodes)))
+ serviceSpread := ServiceSpread{serviceLister: FakeServiceLister(test.services)}
+ list, err := serviceSpread.CalculateSpreadPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeMinionList(test.nodes)))
if err != nil {
t.Errorf("unexpected error: %v", err)
}
@@ -109,3 +128,166 @@ func TestSpreadPriority(t *testing.T) {
}
}
}
+
+func TestZoneSpreadPriority(t *testing.T) {
+ labels1 := map[string]string{
+ "foo": "bar",
+ "baz": "blah",
+ }
+ labels2 := map[string]string{
+ "bar": "foo",
+ "baz": "blah",
+ }
+ zone1 := map[string]string{
+ "zone": "zone1",
+ }
+ zone2 := map[string]string{
+ "zone": "zone2",
+ }
+ nozone := map[string]string{
+ "name": "value",
+ }
+ zone0Status := api.PodStatus{
+ Host: "machine01",
+ }
+ zone1Status := api.PodStatus{
+ Host: "machine11",
+ }
+ zone2Status := api.PodStatus{
+ Host: "machine21",
+ }
+ labeledNodes := map[string]map[string]string{
+ "machine01": nozone, "machine02": nozone,
+ "machine11": zone1, "machine12": zone1,
+ "machine21": zone2, "machine22": zone2,
+ }
+ tests := []struct {
+ pod api.Pod
+ pods []api.Pod
+ nodes map[string]map[string]string
+ services []api.Service
+ expectedList HostPriorityList
+ test string
+ }{
+ {
+ nodes: labeledNodes,
+ expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
+ {"machine21", 10}, {"machine22", 10},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "nothing scheduled",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{{Status: zone1Status}},
+ nodes: labeledNodes,
+ expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
+ {"machine21", 10}, {"machine22", 10},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "no services",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{{Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}}},
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"key": "value"}}}},
+ expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
+ {"machine21", 10}, {"machine22", 10},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "different services",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone0Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
+ expectedList: []HostPriority{{"machine11", 10}, {"machine12", 10},
+ {"machine21", 0}, {"machine22", 0},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "three pods, one service pod",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
+ expectedList: []HostPriority{{"machine11", 5}, {"machine12", 5},
+ {"machine21", 5}, {"machine22", 5},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "three pods, two service pods on different machines",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
+ expectedList: []HostPriority{{"machine11", 6}, {"machine12", 6},
+ {"machine21", 3}, {"machine22", 3},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "four pods, three service pods",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels2}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: map[string]string{"baz": "blah"}}}},
+ expectedList: []HostPriority{{"machine11", 3}, {"machine12", 3},
+ {"machine21", 6}, {"machine22", 6},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "service with partial pod label matches",
+ },
+ {
+ pod: api.Pod{ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ pods: []api.Pod{
+ {Status: zone0Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone1Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ {Status: zone2Status, ObjectMeta: api.ObjectMeta{Labels: labels1}},
+ },
+ nodes: labeledNodes,
+ services: []api.Service{{Spec: api.ServiceSpec{Selector: labels1}}},
+ expectedList: []HostPriority{{"machine11", 7}, {"machine12", 7},
+ {"machine21", 5}, {"machine22", 5},
+ {"machine01", 0}, {"machine02", 0}},
+ test: "service pod on non-zoned minion",
+ },
+ }
+
+ for _, test := range tests {
+ zoneSpread := ServiceAntiAffinity{serviceLister: FakeServiceLister(test.services), label: "zone"}
+ list, err := zoneSpread.CalculateAntiAffinityPriority(test.pod, FakePodLister(test.pods), FakeMinionLister(makeLabeledMinionList(test.nodes)))
+ if err != nil {
+ t.Errorf("unexpected error: %v", err)
+ }
+ // sort the two lists to avoid failures on account of different ordering
+ sort.Sort(test.expectedList)
+ sort.Sort(list)
+ if !reflect.DeepEqual(test.expectedList, list) {
+ t.Errorf("%s: expected %#v, got %#v", test.test, test.expectedList, list)
+ }
+ }
+}
+
+func makeLabeledMinionList(nodeMap map[string]map[string]string) (result api.NodeList) {
+ nodes := []api.Node{}
+ for nodeName, labels := range nodeMap {
+ nodes = append(nodes, api.Node{ObjectMeta: api.ObjectMeta{Name: nodeName, Labels: labels}})
+ }
+ return api.NodeList{Items: nodes}
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/service/endpoints_controller.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/service/endpoints_controller.go
index 95fb15be4d5a..d205914a3cc8 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/service/endpoints_controller.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/service/endpoints_controller.go
@@ -56,10 +56,10 @@ func (e *EndpointController) SyncServiceEndpoints() error {
continue
}
- glog.V(3).Infof("About to update endpoints for service %v", service.Name)
+ glog.V(5).Infof("About to update endpoints for service %s/%s", service.Namespace, service.Name)
pods, err := e.client.Pods(service.Namespace).List(labels.Set(service.Spec.Selector).AsSelector())
if err != nil {
- glog.Errorf("Error syncing service: %#v, skipping.", service)
+ glog.Errorf("Error syncing service: %s/%s, skipping", service.Namespace, service.Name)
resultErr = err
continue
}
@@ -68,11 +68,11 @@ func (e *EndpointController) SyncServiceEndpoints() error {
for _, pod := range pods.Items {
port, err := findPort(&pod, service.Spec.ContainerPort)
if err != nil {
- glog.Errorf("Failed to find port for service: %v, %v", service, err)
+ glog.Errorf("Failed to find port for service %s/%s: %v", service.Namespace, service.Name, err)
continue
}
if len(pod.Status.PodIP) == 0 {
- glog.Errorf("Failed to find an IP for pod: %v", pod)
+ glog.Errorf("Failed to find an IP for pod %s/%s", pod.Namespace, pod.Name)
continue
}
endpoints = append(endpoints, net.JoinHostPort(pod.Status.PodIP, strconv.Itoa(port)))
@@ -100,7 +100,7 @@ func (e *EndpointController) SyncServiceEndpoints() error {
} else {
// Pre-existing
if endpointsEqual(currentEndpoints, endpoints) {
- glog.V(3).Infof("endpoints are equal for %s, skipping update", service.Name)
+ glog.V(5).Infof("endpoints are equal for %s/%s, skipping update", service.Namespace, service.Name)
continue
}
_, err = e.client.Endpoints(service.Namespace).Update(newEndpoints)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch.go
index eab0b1753c18..6e5027acbb87 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch.go
@@ -61,7 +61,7 @@ func ParseWatchResourceVersion(resourceVersion, kind string) (uint64, error) {
// watch.Interface. resourceVersion may be used to specify what version to begin
// watching (e.g., for reconnecting without missing any updates).
func (h *EtcdHelper) WatchList(key string, resourceVersion uint64, filter FilterFunc) (watch.Interface, error) {
- w := newEtcdWatcher(true, filter, h.Codec, h.ResourceVersioner, nil)
+ w := newEtcdWatcher(true, exceptKey(key), filter, h.Codec, h.ResourceVersioner, nil)
go w.etcdWatch(h.Client, key, resourceVersion)
return w, nil
}
@@ -90,7 +90,7 @@ func (h *EtcdHelper) Watch(key string, resourceVersion uint64) watch.Interface {
//
// Errors will be sent down the channel.
func (h *EtcdHelper) WatchAndTransform(key string, resourceVersion uint64, transform TransformFunc) watch.Interface {
- w := newEtcdWatcher(false, Everything, h.Codec, h.ResourceVersioner, transform)
+ w := newEtcdWatcher(false, nil, Everything, h.Codec, h.ResourceVersioner, transform)
go w.etcdWatch(h.Client, key, resourceVersion)
return w
}
@@ -98,14 +98,25 @@ func (h *EtcdHelper) WatchAndTransform(key string, resourceVersion uint64, trans
// TransformFunc attempts to convert an object to another object for use with a watcher.
type TransformFunc func(runtime.Object) (runtime.Object, error)
+// includeFunc returns true if the given key should be considered part of a watch
+type includeFunc func(key string) bool
+
+// exceptKey is an includeFunc that returns false when the provided key matches the watched key
+func exceptKey(except string) includeFunc {
+ return func(key string) bool {
+ return key != except
+ }
+}
+
// etcdWatcher converts a native etcd watch to a watch.Interface.
type etcdWatcher struct {
encoding runtime.Codec
versioner EtcdResourceVersioner
transform TransformFunc
- list bool // If we're doing a recursive watch, should be true.
- filter FilterFunc
+ list bool // If we're doing a recursive watch, should be true.
+ include includeFunc
+ filter FilterFunc
etcdIncoming chan *etcd.Response
etcdError chan error
@@ -126,12 +137,13 @@ const watchWaitDuration = 100 * time.Millisecond
// newEtcdWatcher returns a new etcdWatcher; if list is true, watch sub-nodes. If you provide a transform
// and a versioner, the versioner must be able to handle the objects that transform creates.
-func newEtcdWatcher(list bool, filter FilterFunc, encoding runtime.Codec, versioner EtcdResourceVersioner, transform TransformFunc) *etcdWatcher {
+func newEtcdWatcher(list bool, include includeFunc, filter FilterFunc, encoding runtime.Codec, versioner EtcdResourceVersioner, transform TransformFunc) *etcdWatcher {
w := &etcdWatcher{
encoding: encoding,
versioner: versioner,
transform: transform,
list: list,
+ include: include,
filter: filter,
etcdIncoming: make(chan *etcd.Response),
etcdError: make(chan error, 1),
@@ -258,6 +270,9 @@ func (w *etcdWatcher) sendAdd(res *etcd.Response) {
glog.Errorf("unexpected nil node: %#v", res)
return
}
+ if w.include != nil && !w.include(res.Node.Key) {
+ return
+ }
data := []byte(res.Node.Value)
obj, err := w.decodeObject(data, res.Node.ModifiedIndex)
if err != nil {
@@ -285,6 +300,9 @@ func (w *etcdWatcher) sendModify(res *etcd.Response) {
glog.Errorf("unexpected nil node: %#v", res)
return
}
+ if w.include != nil && !w.include(res.Node.Key) {
+ return
+ }
curData := []byte(res.Node.Value)
curObj, err := w.decodeObject(curData, res.Node.ModifiedIndex)
if err != nil {
@@ -331,6 +349,9 @@ func (w *etcdWatcher) sendDelete(res *etcd.Response) {
glog.Errorf("unexpected nil prev node: %#v", res)
return
}
+ if w.include != nil && !w.include(res.PrevNode.Key) {
+ return
+ }
data := []byte(res.PrevNode.Value)
index := res.PrevNode.ModifiedIndex
if res.Node != nil {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch_test.go
index ad5bd0716343..67e508c67736 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/tools/etcd_tools_watch_test.go
@@ -113,7 +113,7 @@ func TestWatchInterpretations(t *testing.T) {
for name, item := range table {
for _, action := range item.actions {
- w := newEtcdWatcher(true, firstLetterIsB, codec, versioner, nil)
+ w := newEtcdWatcher(true, nil, firstLetterIsB, codec, versioner, nil)
emitCalled := false
w.emit = func(event watch.Event) {
emitCalled = true
@@ -151,7 +151,7 @@ func TestWatchInterpretations(t *testing.T) {
}
func TestWatchInterpretation_ResponseNotSet(t *testing.T) {
- w := newEtcdWatcher(false, Everything, codec, versioner, nil)
+ w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil)
w.emit = func(e watch.Event) {
t.Errorf("Unexpected emit: %v", e)
}
@@ -165,7 +165,7 @@ func TestWatchInterpretation_ResponseNotSet(t *testing.T) {
func TestWatchInterpretation_ResponseNoNode(t *testing.T) {
actions := []string{"create", "set", "compareAndSwap", "delete"}
for _, action := range actions {
- w := newEtcdWatcher(false, Everything, codec, versioner, nil)
+ w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil)
w.emit = func(e watch.Event) {
t.Errorf("Unexpected emit: %v", e)
}
@@ -179,7 +179,7 @@ func TestWatchInterpretation_ResponseNoNode(t *testing.T) {
func TestWatchInterpretation_ResponseBadData(t *testing.T) {
actions := []string{"create", "set", "compareAndSwap", "delete"}
for _, action := range actions {
- w := newEtcdWatcher(false, Everything, codec, versioner, nil)
+ w := newEtcdWatcher(false, nil, Everything, codec, versioner, nil)
w.emit = func(e watch.Event) {
t.Errorf("Unexpected emit: %v", e)
}
@@ -524,6 +524,51 @@ func TestWatchListFromZeroIndex(t *testing.T) {
watching.Stop()
}
+func TestWatchListIgnoresRootKey(t *testing.T) {
+ codec := latest.Codec
+ pod := &api.Pod{ObjectMeta: api.ObjectMeta{Name: "foo"}}
+
+ fakeClient := NewFakeEtcdClient(t)
+ h := EtcdHelper{fakeClient, codec, versioner}
+
+ watching, err := h.WatchList("/some/key", 1, Everything)
+ if err != nil {
+ t.Fatalf("Unexpected error: %v", err)
+ }
+ fakeClient.WaitForWatchCompletion()
+
+ // This is the root directory of the watch, which happens to have a value encoded
+ fakeClient.WatchResponse <- &etcd.Response{
+ Action: "delete",
+ PrevNode: &etcd.Node{
+ Key: "/some/key",
+ Value: runtime.EncodeOrDie(codec, pod),
+ CreatedIndex: 1,
+ ModifiedIndex: 1,
+ },
+ }
+ // Delete of the parent directory of a key is an event that a list watch would receive,
+ // but will have no value so the decode will fail.
+ fakeClient.WatchResponse <- &etcd.Response{
+ Action: "delete",
+ PrevNode: &etcd.Node{
+ Key: "/some/key",
+ Value: "",
+ CreatedIndex: 1,
+ ModifiedIndex: 1,
+ },
+ }
+ close(fakeClient.WatchStop)
+
+ // the existing node is detected and the index set
+ _, open := <-watching.ResultChan()
+ if open {
+ t.Fatalf("unexpected channel open")
+ }
+
+ watching.Stop()
+}
+
func TestWatchFromNotFound(t *testing.T) {
fakeClient := NewFakeEtcdClient(t)
fakeClient.Data["/some/key"] = EtcdResponseWithError{
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/doc.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/doc.go
new file mode 100644
index 000000000000..8fc2e325e7a5
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/doc.go
@@ -0,0 +1,18 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// Package types implements various generic types used throughout kubernetes.
+package types
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/uid.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/uid.go
new file mode 100644
index 000000000000..63e9a92bcb44
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/types/uid.go
@@ -0,0 +1,22 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package types
+
+// UID is a type that holds unique ID values, including UUIDs. Because we
+// don't ONLY use UUIDs, this is an alias to string. Being a type captures
+// intent and helps make sure that UIDs and names do not get conflated.
+type UID string
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors/errors.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors/errors.go
index 69e4ed9c8239..e530176206f1 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors/errors.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/errors/errors.go
@@ -27,7 +27,7 @@ type Aggregate interface {
// NewAggregate converts a slice of errors into an Aggregate interface, which
// is itself an implementation of the error interface. If the slice is empty,
-// this returs nil.
+// this returns nil.
func NewAggregate(errlist []error) Aggregate {
if len(errlist) == 0 {
return nil
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_unsupported.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/flags.go
similarity index 58%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_unsupported.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/flags.go
index d861045fc8b4..80687267f92a 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_unsupported.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/flags.go
@@ -1,5 +1,3 @@
-// +build !linux
-
/*
Copyright 2014 Google Inc. All rights reserved.
@@ -16,21 +14,14 @@ See the License for the specific language governing permissions and
limitations under the License.
*/
-package volume
-
-const MOUNT_MS_BIND = 0
-const MOUNT_MS_RDONLY = 0
-
-type DiskMounter struct{}
+package util
-func (mounter *DiskMounter) Mount(source string, target string, fstype string, flags uintptr, data string) error {
- return nil
-}
-
-func (mounter *DiskMounter) Unmount(target string, flags int) error {
- return nil
-}
+import (
+ flag "github.com/spf13/pflag"
+)
-func (mounter *DiskMounter) RefCount(PD Interface) (string, int, error) {
- return "", 0, nil
+// InitFlags normalizes and parses the command line flags
+func InitFlags() {
+ AddAllFlagsToPFlags()
+ flag.Parse()
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/logs.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/logs.go
index 8049b6ece818..2f331dcb62a6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/logs.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/logs.go
@@ -22,9 +22,10 @@ import (
"time"
"github.com/golang/glog"
+ "github.com/spf13/pflag"
)
-var logFlushFreq = flag.Duration("log_flush_frequency", 5*time.Second, "Maximum number of seconds between log flushes")
+var logFlushFreq = pflag.Duration("log_flush_frequency", 5*time.Second, "Maximum number of seconds between log flushes")
// TODO(thockin): This is temporary until we agree on log dirs and put those into each cmd.
func init() {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/net_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/net_test.go
index 972770f1260c..afdb802684d6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/net_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/net_test.go
@@ -17,8 +17,9 @@ limitations under the License.
package util
import (
- "flag"
"testing"
+
+ flag "github.com/spf13/pflag"
)
func TestIP(t *testing.T) {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/plog_import.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/pflag_import.go
similarity index 74%
rename from Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/plog_import.go
rename to Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/pflag_import.go
index e0f6afc18a49..e45d88ef23c2 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/plog_import.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/pflag_import.go
@@ -42,7 +42,13 @@ func wrapFlagValue(v flag.Value) pflag.Value {
pv := &flagValueWrapper{
inner: v,
}
- pv.flagType = reflect.TypeOf(v).Elem().Name()
+
+ t := reflect.TypeOf(v)
+ if t.Kind() == reflect.Interface || t.Kind() == reflect.Ptr {
+ t = t.Elem()
+ }
+
+ pv.flagType = t.Name()
pv.flagType = strings.TrimSuffix(pv.flagType, "Value")
return pv
}
@@ -59,6 +65,18 @@ func (v *flagValueWrapper) Type() string {
return v.flagType
}
+type boolFlag interface {
+ flag.Value
+ IsBoolFlag() bool
+}
+
+func (v *flagValueWrapper) IsBoolFlag() bool {
+ if bv, ok := v.inner.(boolFlag); ok {
+ return bv.IsBoolFlag()
+ }
+ return false
+}
+
// Imports a 'flag.Flag' into a 'pflag.FlagSet'. The "short" option is unset
// and the type is inferred using reflection.
func AddFlagToPFlagSet(f *flag.Flag, fs *pflag.FlagSet) {
@@ -76,3 +94,17 @@ func AddFlagSetToPFlagSet(fsIn *flag.FlagSet, fsOut *pflag.FlagSet) {
func AddAllFlagsToPFlagSet(fs *pflag.FlagSet) {
AddFlagSetToPFlagSet(flag.CommandLine, fs)
}
+
+// Add al of the top level 'flag' package flags to the top level 'pflag' flags.
+func AddAllFlagsToPFlags() {
+ AddFlagSetToPFlagSet(flag.CommandLine, pflag.CommandLine)
+}
+
+// Merge all of the flags from fsFrom into fsTo.
+func AddPFlagSetToPFlagSet(fsFrom *pflag.FlagSet, fsTo *pflag.FlagSet) {
+ fsFrom.VisitAll(func(f *pflag.Flag) {
+ if fsTo.Lookup(f.Name) == nil {
+ fsTo.AddFlag(f)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set.go
index bc50c86cba32..8445fb61044e 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set.go
@@ -39,9 +39,11 @@ func (s StringSet) Insert(items ...string) {
}
}
-// Delete removes item from the set.
-func (s StringSet) Delete(item string) {
- delete(s, item)
+// Delete removes all items from the set.
+func (s StringSet) Delete(items ...string) {
+ for _, item := range items {
+ delete(s, item)
+ }
}
// Has returns true iff item is contained in the set.
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set_test.go
index 9b35aedc3d8d..6fb02355700c 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/set_test.go
@@ -59,6 +59,29 @@ func TestStringSet(t *testing.T) {
}
}
+func TestStringSetDeleteMultiples(t *testing.T) {
+ s := StringSet{}
+ s.Insert("a", "b", "c")
+ if len(s) != 3 {
+ t.Errorf("Expected len=3: %d", len(s))
+ }
+
+ s.Delete("a", "c")
+ if len(s) != 1 {
+ t.Errorf("Expected len=1: %d", len(s))
+ }
+ if s.Has("a") {
+ t.Errorf("Unexpected contents: %#v", s)
+ }
+ if s.Has("c") {
+ t.Errorf("Unexpected contents: %#v", s)
+ }
+ if !s.Has("b") {
+ t.Errorf("Missing contents: %#v", s)
+ }
+
+}
+
func TestNewStringSet(t *testing.T) {
s := NewStringSet("a", "b", "c")
if len(s) != 3 {
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/uuid.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/uuid.go
index bfd01b22ba45..ffc6aaa046f6 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/uuid.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/util/uuid.go
@@ -21,12 +21,9 @@ import (
"time"
"code.google.com/p/go-uuid/uuid"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
)
-type UUID interface {
- String() string
-}
-
var uuidLock sync.Mutex
/**
@@ -35,12 +32,12 @@ var uuidLock sync.Mutex
* Blocks in a go routine, so that the caller doesn't have to wait.
* TODO: save old unused UUIDs so that no one has to block.
*/
-func NewUUID() UUID {
+func NewUUID() types.UID {
uuidLock.Lock()
result := uuid.NewUUID()
go func() {
time.Sleep(200 * time.Nanosecond)
uuidLock.Unlock()
}()
- return result
+ return types.UID(result.String())
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/base.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/base.go
index 66568d563874..6efc00e19b06 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/base.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/base.go
@@ -36,8 +36,8 @@ package version
var (
// TODO: Deprecate gitMajor and gitMinor, use only gitVersion instead.
gitMajor string = "0" // major version, always numeric
- gitMinor string = "8.0+" // minor version, numeric possibly followed by "+"
- gitVersion string = "v0.8.0-dev" // version from git, output of $(git describe)
+ gitMinor string = "9.0+" // minor version, numeric possibly followed by "+"
+ gitVersion string = "v0.9.0-dev" // version from git, output of $(git describe)
gitCommit string = "" // sha1 from git, output of $(git rev-parse HEAD)
gitTreeState string = "not a git tree" // state of git tree, either "clean" or "dirty"
)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag/verflag.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag/verflag.go
index 85c47bc52143..5231ea2d5bf5 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag/verflag.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/version/verflag/verflag.go
@@ -19,12 +19,12 @@ limitations under the License.
package verflag
import (
- "flag"
"fmt"
"os"
"strconv"
"github.com/GoogleCloudPlatform/kubernetes/pkg/version"
+ flag "github.com/spf13/pflag"
)
type versionValue int
@@ -66,6 +66,11 @@ func (v *versionValue) String() string {
return fmt.Sprintf("%v", bool(*v == VersionTrue))
}
+// The type of the flag as requred by the pflag.Value interface
+func (v *versionValue) Type() string {
+ return "version"
+}
+
func VersionVar(p *versionValue, name string, value versionValue, usage string) {
*p = value
flag.Var(p, name, usage)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_linux.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_linux.go
deleted file mode 100644
index 9141bdeb6ca8..000000000000
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/mounter_linux.go
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "bufio"
- "io"
- "os"
- "regexp"
- "strings"
- "syscall"
-
- "github.com/golang/glog"
-)
-
-const MOUNT_MS_BIND = syscall.MS_BIND
-const MOUNT_MS_RDONLY = syscall.MS_RDONLY
-
-type DiskMounter struct{}
-
-// Wraps syscall.Mount()
-func (mounter *DiskMounter) Mount(source string, target string, fstype string, flags uintptr, data string) error {
- glog.V(5).Infof("Mounting %s %s %s %d %s", source, target, fstype, flags, data)
- return syscall.Mount(source, target, fstype, flags, data)
-}
-
-// Wraps syscall.Unmount()
-func (mounter *DiskMounter) Unmount(target string, flags int) error {
- return syscall.Unmount(target, flags)
-}
-
-// Examines /proc/mounts to find the source device of the PD resource and the
-// number of references to that device. Returns both the full device path under
-// the /dev tree and the number of references.
-func (mounter *DiskMounter) RefCount(mount Interface) (string, int, error) {
- // TODO(jonesdl) This can be split up into two procedures, finding the device path
- // and finding the number of references. The parsing could also be separated and another
- // utility could determine if a volume's path is an active mount point.
- file, err := os.Open("/proc/mounts")
- if err != nil {
- return "", -1, err
- }
- defer file.Close()
- scanner := bufio.NewReader(file)
- refCount := 0
- var deviceName string
- // Find the actual device path.
- for {
- line, err := scanner.ReadString('\n')
- if err == io.EOF {
- break
- }
- success, err := regexp.MatchString(mount.GetPath(), line)
- if err != nil {
- return "", -1, err
- }
- if success {
- deviceName = strings.Split(line, " ")[0]
- }
- }
- file.Close()
- file, err = os.Open("/proc/mounts")
- scanner.Reset(bufio.NewReader(file))
- // Find the number of references to the device.
- for {
- line, err := scanner.ReadString('\n')
- if err == io.EOF {
- break
- }
- if strings.Split(line, " ")[0] == deviceName {
- refCount++
- }
- }
- return deviceName, refCount, nil
-}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume.go
deleted file mode 100644
index 0007a10c47a9..000000000000
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume.go
+++ /dev/null
@@ -1,438 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "errors"
- "fmt"
- "io/ioutil"
- "os"
- "path"
- "strconv"
-
- "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
- "github.com/golang/glog"
-)
-
-var ErrUnsupportedVolumeType = errors.New("unsupported volume type")
-
-// Interface is a directory used by pods or hosts.
-// All method implementations of methods in the volume interface must be idempotent.
-type Interface interface {
- // GetPath returns the directory path the volume is mounted to.
- GetPath() string
-}
-
-// Builder interface provides method to set up/mount the volume.
-type Builder interface {
- // Uses Interface to provide the path for Docker binds.
- Interface
- // SetUp prepares and mounts/unpacks the volume to a directory path.
- SetUp() error
-}
-
-// Cleaner interface provides method to cleanup/unmount the volumes.
-type Cleaner interface {
- // TearDown unmounts the volume and removes traces of the SetUp procedure.
- TearDown() error
-}
-
-type gcePersistentDiskUtil interface {
- // Attaches the disk to the kubelet's host machine.
- AttachDisk(PD *GCEPersistentDisk) error
- // Detaches the disk from the kubelet's host machine.
- DetachDisk(PD *GCEPersistentDisk, devicePath string) error
-}
-
-// Mounters wrap os/system specific calls to perform mounts.
-type mounter interface {
- Mount(source string, target string, fstype string, flags uintptr, data string) error
- Unmount(target string, flags int) error
- // RefCount returns the device path for the source disk of a volume, and
- // the number of references to that target disk.
- RefCount(vol Interface) (string, int, error)
-}
-
-// HostDir volumes represent a bare host directory mount.
-// The directory in Path will be directly exposed to the container.
-type HostDir struct {
- Path string
-}
-
-// SetUp implements interface definitions, even though host directory
-// mounts don't require any setup or cleanup.
-func (hostVol *HostDir) SetUp() error {
- return nil
-}
-
-func (hostVol *HostDir) GetPath() string {
- return hostVol.Path
-}
-
-type execInterface interface {
- ExecCommand(cmd []string, dir string) ([]byte, error)
-}
-
-type GitDir struct {
- Source string
- Revision string
- PodID string
- RootDir string
- Name string
- exec exec.Interface
-}
-
-func newGitRepo(volume *api.Volume, podID, rootDir string) *GitDir {
- return &GitDir{
- Source: volume.Source.GitRepo.Repository,
- Revision: volume.Source.GitRepo.Revision,
- PodID: podID,
- RootDir: rootDir,
- Name: volume.Name,
- exec: exec.New(),
- }
-}
-
-func (g *GitDir) ExecCommand(command string, args []string, dir string) ([]byte, error) {
- cmd := g.exec.Command(command, args...)
- cmd.SetDir(dir)
- return cmd.CombinedOutput()
-}
-
-func (g *GitDir) SetUp() error {
- volumePath := g.GetPath()
- if err := os.MkdirAll(volumePath, 0750); err != nil {
- return err
- }
- if _, err := g.ExecCommand("git", []string{"clone", g.Source}, g.GetPath()); err != nil {
- return err
- }
- files, err := ioutil.ReadDir(g.GetPath())
- if err != nil {
- return err
- }
- if len(g.Revision) == 0 {
- return nil
- }
-
- if len(files) != 1 {
- return fmt.Errorf("unexpected directory contents: %v", files)
- }
- dir := path.Join(g.GetPath(), files[0].Name())
- if _, err := g.ExecCommand("git", []string{"checkout", g.Revision}, dir); err != nil {
- return err
- }
- if _, err := g.ExecCommand("git", []string{"reset", "--hard"}, dir); err != nil {
- return err
- }
- return nil
-}
-
-func (g *GitDir) GetPath() string {
- return path.Join(g.RootDir, g.PodID, "volumes", "git", g.Name)
-}
-
-// TearDown simply deletes everything in the directory.
-func (g *GitDir) TearDown() error {
- tmpDir, err := renameDirectory(g.GetPath(), g.Name+"~deleting")
- if err != nil {
- return err
- }
- err = os.RemoveAll(tmpDir)
- if err != nil {
- return err
- }
- return nil
-}
-
-// EmptyDir volumes are temporary directories exposed to the pod.
-// These do not persist beyond the lifetime of a pod.
-type EmptyDir struct {
- Name string
- PodID string
- RootDir string
-}
-
-// SetUp creates new directory.
-func (emptyDir *EmptyDir) SetUp() error {
- path := emptyDir.GetPath()
- return os.MkdirAll(path, 0750)
-}
-
-func (emptyDir *EmptyDir) GetPath() string {
- return path.Join(emptyDir.RootDir, emptyDir.PodID, "volumes", "empty", emptyDir.Name)
-}
-
-func renameDirectory(oldPath, newName string) (string, error) {
- newPath, err := ioutil.TempDir(path.Dir(oldPath), newName)
- if err != nil {
- return "", err
- }
- err = os.Rename(oldPath, newPath)
- if err != nil {
- return "", err
- }
- return newPath, nil
-}
-
-// TearDown simply deletes everything in the directory.
-func (emptyDir *EmptyDir) TearDown() error {
- tmpDir, err := renameDirectory(emptyDir.GetPath(), emptyDir.Name+".deleting~")
- if err != nil {
- return err
- }
- err = os.RemoveAll(tmpDir)
- if err != nil {
- return err
- }
- return nil
-}
-
-// createHostDir interprets API volume as a HostDir.
-func createHostDir(volume *api.Volume) *HostDir {
- return &HostDir{volume.Source.HostDir.Path}
-}
-
-// GCEPersistentDisk volumes are disk resources provided by Google Compute Engine
-// that are attached to the kubelet's host machine and exposed to the pod.
-type GCEPersistentDisk struct {
- Name string
- PodID string
- RootDir string
- // Unique identifier of the PD, used to find the disk resource in the provider.
- PDName string
- // Filesystem type, optional.
- FSType string
- // Specifies the partition to mount
- Partition string
- // Specifies whether the disk will be attached as ReadOnly.
- ReadOnly bool
- // Utility interface that provides API calls to the provider to attach/detach disks.
- util gcePersistentDiskUtil
- // Mounter interface that provides system calls to mount the disks.
- mounter mounter
-}
-
-func (PD *GCEPersistentDisk) GetPath() string {
- return path.Join(PD.RootDir, PD.PodID, "volumes", "gce-pd", PD.Name)
-}
-
-// Attaches the disk and bind mounts to the volume path.
-func (PD *GCEPersistentDisk) SetUp() error {
- // TODO: handle failed mounts here.
- mountpoint, err := isMountPoint(PD.GetPath())
- glog.V(4).Infof("PersistentDisk set up: %s %v %v", PD.GetPath(), mountpoint, err)
- if err != nil && !os.IsNotExist(err) {
- return err
- }
- if mountpoint {
- return nil
- }
- if err := PD.util.AttachDisk(PD); err != nil {
- return err
- }
- flags := uintptr(0)
- if PD.ReadOnly {
- flags = MOUNT_MS_RDONLY
- }
- //Perform a bind mount to the full path to allow duplicate mounts of the same PD.
- if _, err = os.Stat(PD.GetPath()); os.IsNotExist(err) {
- err = os.MkdirAll(PD.GetPath(), 0750)
- if err != nil {
- return err
- }
- globalPDPath := makeGlobalPDName(PD.RootDir, PD.PDName, PD.ReadOnly)
- err = PD.mounter.Mount(globalPDPath, PD.GetPath(), "", MOUNT_MS_BIND|flags, "")
- if err != nil {
- os.RemoveAll(PD.GetPath())
- return err
- }
- }
- return nil
-}
-
-// Unmounts the bind mount, and detaches the disk only if the PD
-// resource was the last reference to that disk on the kubelet.
-func (PD *GCEPersistentDisk) TearDown() error {
- mountpoint, err := isMountPoint(PD.GetPath())
- if err != nil {
- return err
- }
- if !mountpoint {
- return os.RemoveAll(PD.GetPath())
- }
- devicePath, refCount, err := PD.mounter.RefCount(PD)
- if err != nil {
- return err
- }
- if err := PD.mounter.Unmount(PD.GetPath(), 0); err != nil {
- return err
- }
- refCount--
- if err := os.RemoveAll(PD.GetPath()); err != nil {
- return err
- }
- // If refCount is 1, then all bind mounts have been removed, and the
- // remaining reference is the global mount. It is safe to detach.
- if refCount == 1 {
- if err := PD.util.DetachDisk(PD, devicePath); err != nil {
- return err
- }
- }
- return nil
-}
-
-//TODO(jonesdl) prevent name collisions by using designated pod space as well.
-// Ex. (ROOT_DIR)/pods/...
-func makeGlobalPDName(rootDir, devName string, readOnly bool) string {
- var mode string
- if readOnly {
- mode = "ro"
- } else {
- mode = "rw"
- }
- return path.Join(rootDir, "global", "pd", mode, devName)
-}
-
-// createEmptyDir interprets API volume as an EmptyDir.
-func createEmptyDir(volume *api.Volume, podID string, rootDir string) *EmptyDir {
- return &EmptyDir{volume.Name, podID, rootDir}
-}
-
-// Interprets API volume as a PersistentDisk
-func createGCEPersistentDisk(volume *api.Volume, podID string, rootDir string) (*GCEPersistentDisk, error) {
- PDName := volume.Source.GCEPersistentDisk.PDName
- FSType := volume.Source.GCEPersistentDisk.FSType
- partition := strconv.Itoa(volume.Source.GCEPersistentDisk.Partition)
- if partition == "0" {
- partition = ""
- }
- readOnly := volume.Source.GCEPersistentDisk.ReadOnly
- // TODO: move these up into the Kubelet.
- util := &GCEDiskUtil{}
- mounter := &DiskMounter{}
- return &GCEPersistentDisk{
- Name: volume.Name,
- PodID: podID,
- RootDir: rootDir,
- PDName: PDName,
- FSType: FSType,
- Partition: partition,
- ReadOnly: readOnly,
- util: util,
- mounter: mounter}, nil
-}
-
-// CreateVolumeBuilder returns a Builder capable of mounting a volume described by an
-// *api.Volume, or an error.
-func CreateVolumeBuilder(volume *api.Volume, podID string, rootDir string) (Builder, error) {
- source := volume.Source
- // TODO(jonesdl) We will want to throw an error here when we no longer
- // support the default behavior.
- if source == nil {
- return nil, nil
- }
- var vol Builder
- var err error
- // TODO(jonesdl) We should probably not check every pointer and directly
- // resolve these types instead.
- if source.HostDir != nil {
- vol = createHostDir(volume)
- } else if source.EmptyDir != nil {
- vol = createEmptyDir(volume, podID, rootDir)
- } else if source.GCEPersistentDisk != nil {
- vol, err = createGCEPersistentDisk(volume, podID, rootDir)
- if err != nil {
- return nil, err
- }
- } else if source.GitRepo != nil {
- vol = newGitRepo(volume, podID, rootDir)
- } else {
- return nil, ErrUnsupportedVolumeType
- }
- return vol, nil
-}
-
-// CreateVolumeCleaner returns a Cleaner capable of tearing down a volume.
-func CreateVolumeCleaner(kind string, name string, podID string, rootDir string) (Cleaner, error) {
- switch kind {
- case "empty":
- return &EmptyDir{name, podID, rootDir}, nil
- case "gce-pd":
- return &GCEPersistentDisk{
- Name: name,
- PodID: podID,
- RootDir: rootDir,
- util: &GCEDiskUtil{},
- mounter: &DiskMounter{}}, nil
- case "git":
- return &GitDir{
- Name: name,
- PodID: podID,
- RootDir: rootDir,
- }, nil
- default:
- return nil, ErrUnsupportedVolumeType
- }
-}
-
-// GetCurrentVolumes examines directory structure to determine volumes that are
-// presently active and mounted. Returns a map of Cleaner types.
-func GetCurrentVolumes(rootDirectory string) map[string]Cleaner {
- currentVolumes := make(map[string]Cleaner)
- podIDDirs, err := ioutil.ReadDir(rootDirectory)
- if err != nil {
- glog.Errorf("Could not read directory %s: %v", rootDirectory, err)
- }
- // Volume information is extracted from the directory structure:
- // (ROOT_DIR)/(POD_ID)/volumes/(VOLUME_KIND)/(VOLUME_NAME)
- for _, podIDDir := range podIDDirs {
- if !podIDDir.IsDir() {
- continue
- }
- podID := podIDDir.Name()
- podIDPath := path.Join(rootDirectory, podID, "volumes")
- if _, err := os.Stat(podIDPath); os.IsNotExist(err) {
- continue
- }
- volumeKindDirs, err := ioutil.ReadDir(podIDPath)
- if err != nil {
- glog.Errorf("Could not read directory %s: %v", podIDPath, err)
- }
- for _, volumeKindDir := range volumeKindDirs {
- volumeKind := volumeKindDir.Name()
- volumeKindPath := path.Join(podIDPath, volumeKind)
- volumeNameDirs, err := ioutil.ReadDir(volumeKindPath)
- if err != nil {
- glog.Errorf("Could not read directory %s: %v", volumeKindPath, err)
- }
- for _, volumeNameDir := range volumeNameDirs {
- volumeName := volumeNameDir.Name()
- identifier := path.Join(podID, volumeName)
- // TODO(thockin) This should instead return a reference to an extant volume object
- cleaner, err := CreateVolumeCleaner(volumeKind, volumeName, podID, rootDirectory)
- if err != nil {
- glog.Errorf("Could not create volume cleaner for %s: %v", volumeNameDir.Name(), err)
- continue
- }
- currentVolumes[identifier] = cleaner
- }
- }
- }
- return currentVolumes
-}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume_test.go
deleted file mode 100644
index 87bf22c65e4f..000000000000
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/volume/volume_test.go
+++ /dev/null
@@ -1,295 +0,0 @@
-/*
-Copyright 2014 Google Inc. All rights reserved.
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package volume
-
-import (
- "io/ioutil"
- "os"
- "path"
- "reflect"
- "testing"
-
- "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
-)
-
-type MockDiskUtil struct{}
-
-// TODO(jonesdl) To fully test this, we could create a loopback device
-// and mount that instead.
-func (util *MockDiskUtil) AttachDisk(PD *GCEPersistentDisk) error {
- err := os.MkdirAll(path.Join(PD.RootDir, "global", "pd", PD.PDName), 0750)
- if err != nil {
- return err
- }
- return nil
-}
-
-func (util *MockDiskUtil) DetachDisk(PD *GCEPersistentDisk, devicePath string) error {
- err := os.RemoveAll(path.Join(PD.RootDir, "global", "pd", PD.PDName))
- if err != nil {
- return err
- }
- return nil
-}
-
-type MockMounter struct{}
-
-func (mounter *MockMounter) Mount(source string, target string, fstype string, flags uintptr, data string) error {
- return nil
-}
-
-func (mounter *MockMounter) Unmount(target string, flags int) error {
- return nil
-}
-
-func (mounter *MockMounter) RefCount(vol Interface) (string, int, error) {
- return "", 0, nil
-}
-
-func TestCreateVolumeBuilders(t *testing.T) {
- tempDir := "CreateVolumes"
- createVolumesTests := []struct {
- volume api.Volume
- path string
- podID string
- }{
- {
- api.Volume{
- Name: "host-dir",
- Source: &api.VolumeSource{
- HostDir: &api.HostDir{"/dir/path"},
- },
- },
- "/dir/path",
- "",
- },
- {
- api.Volume{
- Name: "empty-dir",
- Source: &api.VolumeSource{
- EmptyDir: &api.EmptyDir{},
- },
- },
- path.Join(tempDir, "/my-id/volumes/empty/empty-dir"),
- "my-id",
- },
- {
- api.Volume{
- Name: "gce-pd",
- Source: &api.VolumeSource{
- GCEPersistentDisk: &api.GCEPersistentDisk{"my-disk", "ext4", 0, false},
- },
- },
- path.Join(tempDir, "/my-id/volumes/gce-pd/gce-pd"),
- "my-id",
- },
- {api.Volume{}, "", ""},
- {
- api.Volume{
- Name: "empty-dir",
- Source: &api.VolumeSource{},
- },
- "",
- "",
- },
- }
- for _, createVolumesTest := range createVolumesTests {
- tt := createVolumesTest
- vb, err := CreateVolumeBuilder(&tt.volume, tt.podID, tempDir)
- if tt.volume.Source == nil {
- if vb != nil {
- t.Errorf("Expected volume to be nil")
- }
- continue
- }
- if tt.volume.Source.HostDir == nil && tt.volume.Source.EmptyDir == nil && tt.volume.Source.GCEPersistentDisk == nil {
- if err != ErrUnsupportedVolumeType {
- t.Errorf("Unexpected error: %v", err)
- }
- continue
- }
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- path := vb.GetPath()
- if path != tt.path {
- t.Errorf("Unexpected bind path. Expected %v, got %v", tt.path, path)
- }
- }
-}
-
-func TestCreateVolumeCleaners(t *testing.T) {
- tempDir := "CreateVolumeCleaners"
- createVolumeCleanerTests := []struct {
- kind string
- name string
- podID string
- }{
- {"empty", "empty-vol", "my-id"},
- {"", "", ""},
- {"gce-pd", "gce-pd-vol", "my-id"},
- }
- for _, tt := range createVolumeCleanerTests {
- vol, err := CreateVolumeCleaner(tt.kind, tt.name, tt.podID, tempDir)
- if tt.kind == "" && err != nil && vol == nil {
- continue
- }
- if err != nil {
- t.Errorf("Unexpected error occured: %v", err)
- }
- actualKind := reflect.TypeOf(vol).Elem().Name()
- if tt.kind == "empty" && actualKind != "EmptyDir" {
- t.Errorf("CreateVolumeCleaner returned invalid type. Expected EmptyDirectory, got %v, %v", tt.kind, actualKind)
- }
- if tt.kind == "gce-pd" && actualKind != "GCEPersistentDisk" {
- t.Errorf("CreateVolumeCleaner returned invalid type. Expected PersistentDisk, got %v, %v", tt.kind, actualKind)
- }
- }
-}
-
-func TestSetUpAndTearDown(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "CreateVolumes")
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- defer os.RemoveAll(tempDir)
- fakeID := "my-id"
- type VolumeTester interface {
- Builder
- Cleaner
- }
- volumes := []VolumeTester{
- &EmptyDir{"empty", fakeID, tempDir},
- &GCEPersistentDisk{"pd", fakeID, tempDir, "pd-disk", "ext4", "", false, &MockDiskUtil{}, &MockMounter{}},
- }
-
- for _, vol := range volumes {
- err = vol.SetUp()
- path := vol.GetPath()
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if _, err := os.Stat(path); os.IsNotExist(err) {
- t.Errorf("SetUp() failed, volume path not created: %v", path)
- }
- err = vol.TearDown()
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- if _, err := os.Stat(path); !os.IsNotExist(err) {
- t.Errorf("TearDown() failed, original volume path not properly removed: %v", path)
- }
- }
-}
-
-func TestGetActiveVolumes(t *testing.T) {
- tempDir, err := ioutil.TempDir("", "CreateVolumes")
- if err != nil {
- t.Errorf("Unexpected error: %v", err)
- }
- defer os.RemoveAll(tempDir)
- getActiveVolumesTests := []struct {
- name string
- podID string
- kind string
- identifier string
- }{
- {"fakeName", "fakeID", "empty", "fakeID/fakeName"},
- {"fakeName2", "fakeID2", "empty", "fakeID2/fakeName2"},
- }
- expectedIdentifiers := []string{}
- for _, test := range getActiveVolumesTests {
- volumeDir := path.Join(tempDir, test.podID, "volumes", test.kind, test.name)
- os.MkdirAll(volumeDir, 0750)
- expectedIdentifiers = append(expectedIdentifiers, test.identifier)
- }
- volumeMap := GetCurrentVolumes(tempDir)
- for _, name := range expectedIdentifiers {
- if _, ok := volumeMap[name]; !ok {
- t.Errorf("Expected volume map entry not found: %v", name)
- }
- }
-}
-
-type fakeExec struct {
- cmds [][]string
- dirs []string
- data []byte
- err error
- action func([]string, string)
-}
-
-func (f *fakeExec) ExecCommand(cmd []string, dir string) ([]byte, error) {
- f.cmds = append(f.cmds, cmd)
- f.dirs = append(f.dirs, dir)
- f.action(cmd, dir)
- return f.data, f.err
-}
-
-func TestGitVolume(t *testing.T) {
- var fcmd exec.FakeCmd
- fcmd = exec.FakeCmd{
- CombinedOutputScript: []exec.FakeCombinedOutputAction{
- func() ([]byte, error) {
- os.MkdirAll(path.Join(fcmd.Dirs[0], "kubernetes"), 0750)
- return []byte{}, nil
- },
- func() ([]byte, error) { return []byte{}, nil },
- func() ([]byte, error) { return []byte{}, nil },
- },
- }
- fake := exec.FakeExec{
- CommandScript: []exec.FakeCommandAction{
- func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
- func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
- func(cmd string, args ...string) exec.Cmd { return exec.InitFakeCmd(&fcmd, cmd, args...) },
- },
- }
- dir := os.TempDir() + "/git"
- g := GitDir{
- Source: "https://github.com/GoogleCloudPlatform/kubernetes.git",
- Revision: "2a30ce65c5ab586b98916d83385c5983edd353a1",
- PodID: "foo",
- RootDir: dir,
- Name: "test-pod",
- exec: &fake,
- }
- err := g.SetUp()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
- expectedCmds := [][]string{
- {"git", "clone", g.Source},
- {"git", "checkout", g.Revision},
- {"git", "reset", "--hard"},
- }
- if fake.CommandCalls != len(expectedCmds) {
- t.Errorf("unexpected command calls: expected 3, saw: %d", fake.CommandCalls)
- }
- if !reflect.DeepEqual(expectedCmds, fcmd.CombinedOutputLog) {
- t.Errorf("unexpected commands: %v, expected: %v", fcmd.CombinedOutputLog, expectedCmds)
- }
- expectedDirs := []string{g.GetPath(), g.GetPath() + "/kubernetes", g.GetPath() + "/kubernetes"}
- if len(fcmd.Dirs) != 3 || !reflect.DeepEqual(expectedDirs, fcmd.Dirs) {
- t.Errorf("unexpected directories: %v, expected: %v", fcmd.Dirs, expectedDirs)
- }
- err = g.TearDown()
- if err != nil {
- t.Errorf("unexpected error: %v", err)
- }
-}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux.go
index e4d8bef16051..ed5c09e51290 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux.go
@@ -22,6 +22,20 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
)
+// FullChannelBehavior controls how the Broadcaster reacts if a watcher's watch
+// channel is full.
+type FullChannelBehavior int
+
+const (
+ WaitIfChannelFull FullChannelBehavior = iota
+ DropIfChannelFull
+)
+
+// Buffer the incoming queue a little bit even though it should rarely ever accumulate
+// anything, just in case a few events are received in such a short window that
+// Broadcaster can't move them onto the watchers' queues fast enough.
+const incomingQueueLength = 25
+
// Broadcaster distributes event notifications among any number of watchers. Every event
// is delivered to every watcher.
type Broadcaster struct {
@@ -31,17 +45,27 @@ type Broadcaster struct {
nextWatcher int64
incoming chan Event
+
+ // How large to make watcher's channel.
+ watchQueueLength int
+ // If one of the watch channels is full, don't wait for it to become empty.
+ // Instead just deliver it to the watchers that do have space in their
+ // channels and move on to the next event.
+ // It's more fair to do this on a per-watcher basis than to do it on the
+ // "incoming" channel, which would allow one slow watcher to prevent all
+ // other watchers from getting new events.
+ fullChannelBehavior FullChannelBehavior
}
-// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue.
-// When queueLength is 0, Action will block until any prior event has been
-// completely distributed. It is guaranteed that events will be distibuted in the
-// order in which they ocurr, but the order in which a single event is distributed
-// among all of the watchers is unspecified.
-func NewBroadcaster(queueLength int) *Broadcaster {
+// NewBroadcaster creates a new Broadcaster. queueLength is the maximum number of events to queue per watcher.
+// It is guaranteed that events will be distibuted in the order in which they ocur,
+// but the order in which a single event is distributed among all of the watchers is unspecified.
+func NewBroadcaster(queueLength int, fullChannelBehavior FullChannelBehavior) *Broadcaster {
m := &Broadcaster{
- watchers: map[int64]*broadcasterWatcher{},
- incoming: make(chan Event, queueLength),
+ watchers: map[int64]*broadcasterWatcher{},
+ incoming: make(chan Event, incomingQueueLength),
+ watchQueueLength: queueLength,
+ fullChannelBehavior: fullChannelBehavior,
}
go m.loop()
return m
@@ -56,7 +80,7 @@ func (m *Broadcaster) Watch() Interface {
id := m.nextWatcher
m.nextWatcher++
w := &broadcasterWatcher{
- result: make(chan Event),
+ result: make(chan Event, m.watchQueueLength),
stopped: make(chan struct{}),
id: id,
m: m,
@@ -119,10 +143,20 @@ func (m *Broadcaster) loop() {
func (m *Broadcaster) distribute(event Event) {
m.lock.Lock()
defer m.lock.Unlock()
- for _, w := range m.watchers {
- select {
- case w.result <- event:
- case <-w.stopped:
+ if m.fullChannelBehavior == DropIfChannelFull {
+ for _, w := range m.watchers {
+ select {
+ case w.result <- event:
+ case <-w.stopped:
+ default: // Don't block if the event can't be queued.
+ }
+ }
+ } else {
+ for _, w := range m.watchers {
+ select {
+ case w.result <- event:
+ case <-w.stopped:
+ }
}
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux_test.go
index 895846187178..a662605efbe4 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/watch/mux_test.go
@@ -39,7 +39,7 @@ func TestBroadcaster(t *testing.T) {
}
// The broadcaster we're testing
- m := NewBroadcaster(0)
+ m := NewBroadcaster(0, WaitIfChannelFull)
// Add a bunch of watchers
const testWatchers = 2
@@ -77,7 +77,7 @@ func TestBroadcaster(t *testing.T) {
}
func TestBroadcasterWatcherClose(t *testing.T) {
- m := NewBroadcaster(0)
+ m := NewBroadcaster(0, WaitIfChannelFull)
w := m.Watch()
w2 := m.Watch()
w.Stop()
@@ -95,7 +95,7 @@ func TestBroadcasterWatcherClose(t *testing.T) {
func TestBroadcasterWatcherStopDeadlock(t *testing.T) {
done := make(chan bool)
- m := NewBroadcaster(0)
+ m := NewBroadcaster(0, WaitIfChannelFull)
go func(w0, w1 Interface) {
// We know Broadcaster is in the distribute loop once one watcher receives
// an event. Stop the other watcher while distribute is trying to
@@ -116,3 +116,52 @@ func TestBroadcasterWatcherStopDeadlock(t *testing.T) {
}
m.Shutdown()
}
+
+func TestBroadcasterDropIfChannelFull(t *testing.T) {
+ m := NewBroadcaster(1, DropIfChannelFull)
+
+ event1 := Event{Added, &myType{"foo", "hello world 1"}}
+ event2 := Event{Added, &myType{"bar", "hello world 2"}}
+
+ // Add a couple watchers
+ const testWatchers = 2
+ watches := make([]Interface, testWatchers)
+ for i := 0; i < testWatchers; i++ {
+ watches[i] = m.Watch()
+ }
+
+ // Send a couple events before closing the broadcast channel.
+ t.Log("Sending event 1")
+ m.Action(event1.Type, event1.Object)
+ t.Log("Sending event 2")
+ m.Action(event2.Type, event2.Object)
+ m.Shutdown()
+
+ // Pull events from the queue.
+ wg := sync.WaitGroup{}
+ wg.Add(testWatchers)
+ for i := 0; i < testWatchers; i++ {
+ // Verify that each watcher only gets the first event because its watch
+ // queue of length one was full from the first one.
+ go func(watcher int, w Interface) {
+ defer wg.Done()
+ e1, ok := <-w.ResultChan()
+ if !ok {
+ t.Error("Watcher %v failed to retrieve first event.")
+ return
+ }
+ if e, a := event1, e1; !reflect.DeepEqual(e, a) {
+ t.Errorf("Watcher %v: Expected (%v, %#v), got (%v, %#v)",
+ watcher, e.Type, e.Object, a.Type, a.Object)
+ } else {
+ t.Logf("Got (%v, %#v)", e1.Type, e1.Object)
+ }
+ e2, ok := <-w.ResultChan()
+ if ok {
+ t.Error("Watcher %v received second event (%v, %#v) even though it shouldn't have.",
+ watcher, e2.Type, e2.Object)
+ }
+ }(i, watches[i])
+ }
+ wg.Wait()
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity/affinity.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity/affinity.go
new file mode 100644
index 000000000000..ee023355d473
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity/affinity.go
@@ -0,0 +1,55 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+// This algorithm provider has predicates and priorities related to affinity/anti-affinity for the scheduler.
+package affinity
+
+import (
+ algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/util"
+ "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory"
+)
+
+const AffinityProvider string = "AffinityProvider"
+
+func init() {
+ factory.RegisterAlgorithmProvider(AffinityProvider, affinityPredicates(), affinityPriorities())
+}
+
+func affinityPredicates() util.StringSet {
+ return util.NewStringSet(
+ "HostName",
+ "MatchNodeSelector",
+ "PodFitsPorts",
+ "PodFitsResources",
+ "NoDiskConflict",
+ // Ensures that all pods within the same service are hosted on minions within the same region as defined by the "region" label
+ factory.RegisterFitPredicate("ServiceAffinity", algorithm.NewServiceAffinityPredicate(factory.PodLister, factory.ServiceLister, factory.MinionLister, []string{"region"})),
+ // Fit is defined based on the presence of the "region" label on a minion, regardless of value.
+ factory.RegisterFitPredicate("NodeLabelPredicate", algorithm.NewNodeLabelPredicate(factory.MinionLister, []string{"region"}, true)),
+ )
+}
+
+func affinityPriorities() util.StringSet {
+ return util.NewStringSet(
+ "LeastRequestedPriority",
+ "ServiceSpreadingPriority",
+ // spreads pods belonging to the same service across minions in different zones
+ factory.RegisterPriorityFunction("ZoneSpreadingPriority", algorithm.NewServiceAntiAffinityPriority(factory.ServiceLister, "zone"), 2),
+ // Prioritize nodes based on the presence of the "zone" label on a minion, regardless of value.
+ factory.RegisterPriorityFunction("NodeLabelPriority", algorithm.NewNodeLabelPriority("zone", true), 1),
+ )
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
index e93af9a4e329..c97f933884b0 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults/defaults.go
@@ -31,11 +31,11 @@ func defaultPredicates() util.StringSet {
return util.NewStringSet(
// Fit is defined based on the absence of port conflicts.
factory.RegisterFitPredicate("PodFitsPorts", algorithm.PodFitsPorts),
- // Fit is determined by resource availability
+ // Fit is determined by resource availability.
factory.RegisterFitPredicate("PodFitsResources", algorithm.NewResourceFitPredicate(factory.MinionLister)),
- // Fit is determined by non-conflicting disk volumes
+ // Fit is determined by non-conflicting disk volumes.
factory.RegisterFitPredicate("NoDiskConflict", algorithm.NoDiskConflict),
- // Fit is determined by node selector query
+ // Fit is determined by node selector query.
factory.RegisterFitPredicate("MatchNodeSelector", algorithm.NewSelectorMatchPredicate(factory.MinionLister)),
// Fit is determined by the presence of the Host parameter and a string match
factory.RegisterFitPredicate("HostName", algorithm.PodFitsHost),
@@ -46,8 +46,8 @@ func defaultPriorities() util.StringSet {
return util.NewStringSet(
// Prioritize nodes by least requested utilization.
factory.RegisterPriorityFunction("LeastRequestedPriority", algorithm.LeastRequestedPriority, 1),
- // spreads pods by minimizing the number of pods on the same minion with the same labels.
- factory.RegisterPriorityFunction("SpreadingPriority", algorithm.CalculateSpreadPriority, 1),
+ // spreads pods by minimizing the number of pods (belonging to the same service) on the same minion.
+ factory.RegisterPriorityFunction("ServiceSpreadingPriority", algorithm.NewServiceSpreadPriority(factory.ServiceLister), 1),
// EqualPriority is a prioritizer function that gives an equal weight of one to all minions
factory.RegisterPriorityFunction("EqualPriority", algorithm.EqualPriority, 0),
)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go
index d534b05c0bf2..ac7123efe268 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins.go
@@ -18,5 +18,6 @@ limitations under the License.
package algorithmprovider
import (
+ _ "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity"
_ "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/defaults"
)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins_test.go
index 965635d8e7f0..b70d55fc7897 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins_test.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/plugins_test.go
@@ -19,19 +19,21 @@ package algorithmprovider
import (
"testing"
+ "github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/algorithmprovider/affinity"
"github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory"
)
var (
algorithmProviderNames = []string{
factory.DefaultProvider,
+ affinity.AffinityProvider,
}
)
func TestDefaultConfigExists(t *testing.T) {
p, err := factory.GetAlgorithmProvider(factory.DefaultProvider)
if err != nil {
- t.Errorf("error retrivieving default provider: %v", err)
+ t.Errorf("error retrieving default provider: %v", err)
}
if p == nil {
t.Error("algorithm provider config should not be nil")
@@ -45,7 +47,7 @@ func TestAlgorithmProviders(t *testing.T) {
for _, pn := range algorithmProviderNames {
p, err := factory.GetAlgorithmProvider(pn)
if err != nil {
- t.Errorf("error retrivieving '%s' provider: %v", pn, err)
+ t.Errorf("error retrieving '%s' provider: %v", pn, err)
break
}
if len(p.PriorityFunctionKeys) == 0 {
@@ -53,12 +55,12 @@ func TestAlgorithmProviders(t *testing.T) {
}
for _, pf := range p.PriorityFunctionKeys.List() {
if !factory.IsPriorityFunctionRegistered(pf) {
- t.Errorf("priority function %s is not registerd but is used in the %s algorithm provider", pf, pn)
+ t.Errorf("priority function %s is not registered but is used in the %s algorithm provider", pf, pn)
}
}
for _, fp := range p.FitPredicateKeys.List() {
if !factory.IsFitPredicateRegistered(fp) {
- t.Errorf("fit predicate %s is not registerd but is used in the %s algorithm provider", fp, pn)
+ t.Errorf("fit predicate %s is not registered but is used in the %s algorithm provider", fp, pn)
}
}
}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/factory.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/factory.go
index 14fcb5c22014..d8410c4a2f4a 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/factory.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/factory.go
@@ -35,8 +35,9 @@ import (
)
var (
- PodLister = &cache.StoreToPodLister{cache.NewStore()}
- MinionLister = &cache.StoreToNodeLister{cache.NewStore()}
+ PodLister = &cache.StoreToPodLister{cache.NewStore()}
+ MinionLister = &cache.StoreToNodeLister{cache.NewStore()}
+ ServiceLister = &cache.StoreToServiceLister{cache.NewStore()}
)
// ConfigFactory knows how to fill out a scheduler config with its support functions.
@@ -48,15 +49,18 @@ type ConfigFactory struct {
PodLister *cache.StoreToPodLister
// a means to list all minions
MinionLister *cache.StoreToNodeLister
+ // a means to list all services
+ ServiceLister *cache.StoreToServiceLister
}
// NewConfigFactory initializes the factory.
func NewConfigFactory(client *client.Client) *ConfigFactory {
return &ConfigFactory{
- Client: client,
- PodQueue: cache.NewFIFO(),
- PodLister: PodLister,
- MinionLister: MinionLister,
+ Client: client,
+ PodQueue: cache.NewFIFO(),
+ PodLister: PodLister,
+ MinionLister: MinionLister,
+ ServiceLister: ServiceLister,
}
}
@@ -106,6 +110,11 @@ func (f *ConfigFactory) CreateFromKeys(predicateKeys, priorityKeys util.StringSe
cache.NewPoller(f.pollMinions, 10*time.Second, f.MinionLister.Store).Run()
}
+ // Watch and cache all service objects. Scheduler needs to find all pods
+ // created by the same service, so that it can spread them correctly.
+ // Cache this locally.
+ cache.NewReflector(f.createServiceLW(), &api.Service{}, f.ServiceLister.Store).Run()
+
r := rand.New(rand.NewSource(time.Now().UnixNano()))
algo := algorithm.NewGenericScheduler(predicateFuncs, priorityConfigs, f.PodLister, r)
@@ -205,6 +214,15 @@ func (factory *ConfigFactory) pollMinions() (cache.Enumerator, error) {
return &nodeEnumerator{nodes}, nil
}
+// createServiceLW returns a cache.ListWatch that gets all changes to services.
+func (factory *ConfigFactory) createServiceLW() *cache.ListWatch {
+ return &cache.ListWatch{
+ Client: factory.Client,
+ FieldSelector: parseSelectorOrDie(""),
+ Resource: "services",
+ }
+}
+
func (factory *ConfigFactory) makeDefaultErrorFunc(backoff *podBackoff, podQueue *cache.FIFO) func(pod *api.Pod, err error) {
return func(pod *api.Pod, err error) {
glog.Errorf("Error scheduling %v %v: %v; retrying", pod.Namespace, pod.Name, err)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins.go
index af62439cc552..8cfcf3fce013 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins.go
@@ -18,6 +18,7 @@ package factory
import (
"fmt"
+ "regexp"
"sync"
algorithm "github.com/GoogleCloudPlatform/kubernetes/pkg/scheduler"
@@ -36,7 +37,7 @@ var (
)
const (
- DefaultProvider = "default"
+ DefaultProvider = "DefaultProvider"
)
type AlgorithmProviderConfig struct {
@@ -44,47 +45,49 @@ type AlgorithmProviderConfig struct {
PriorityFunctionKeys util.StringSet
}
-// RegisterFitPredicate registers a fit predicate with the algorithm registry. Returns the key,
+// RegisterFitPredicate registers a fit predicate with the algorithm registry. Returns the name,
// with which the predicate was registered.
-func RegisterFitPredicate(key string, predicate algorithm.FitPredicate) string {
+func RegisterFitPredicate(name string, predicate algorithm.FitPredicate) string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
- fitPredicateMap[key] = predicate
- return key
+ validateAlgorithmNameOrDie(name)
+ fitPredicateMap[name] = predicate
+ return name
}
// IsFitPredicateRegistered check is useful for testing providers.
-func IsFitPredicateRegistered(key string) bool {
+func IsFitPredicateRegistered(name string) bool {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
- _, ok := fitPredicateMap[key]
+ _, ok := fitPredicateMap[name]
return ok
}
-// RegisterFitPredicate registers a priority function with the algorithm registry. Returns the key,
+// RegisterFitPredicate registers a priority function with the algorithm registry. Returns the name,
// with which the function was registered.
-func RegisterPriorityFunction(key string, function algorithm.PriorityFunction, weight int) string {
+func RegisterPriorityFunction(name string, function algorithm.PriorityFunction, weight int) string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
- priorityFunctionMap[key] = algorithm.PriorityConfig{Function: function, Weight: weight}
- return key
+ validateAlgorithmNameOrDie(name)
+ priorityFunctionMap[name] = algorithm.PriorityConfig{Function: function, Weight: weight}
+ return name
}
// IsPriorityFunctionRegistered check is useful for testing providers.
-func IsPriorityFunctionRegistered(key string) bool {
+func IsPriorityFunctionRegistered(name string) bool {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
- _, ok := priorityFunctionMap[key]
+ _, ok := priorityFunctionMap[name]
return ok
}
// SetPriorityFunctionWeight sets the weight of an already registered priority function.
-func SetPriorityFunctionWeight(key string, weight int) {
+func SetPriorityFunctionWeight(name string, weight int) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
- config, ok := priorityFunctionMap[key]
+ config, ok := priorityFunctionMap[name]
if !ok {
- glog.Errorf("Invalid priority key %s specified - no corresponding function found", key)
+ glog.Errorf("Invalid priority name %s specified - no corresponding function found", name)
return
}
config.Weight = weight
@@ -95,6 +98,7 @@ func SetPriorityFunctionWeight(key string, weight int) {
func RegisterAlgorithmProvider(name string, predicateKeys, priorityKeys util.StringSet) string {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
+ validateAlgorithmNameOrDie(name)
algorithmProviderMap[name] = AlgorithmProviderConfig{
FitPredicateKeys: predicateKeys,
PriorityFunctionKeys: priorityKeys,
@@ -116,32 +120,40 @@ func GetAlgorithmProvider(name string) (*AlgorithmProviderConfig, error) {
return &provider, nil
}
-func getFitPredicateFunctions(keys util.StringSet) ([]algorithm.FitPredicate, error) {
+func getFitPredicateFunctions(names util.StringSet) ([]algorithm.FitPredicate, error) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
predicates := []algorithm.FitPredicate{}
- for _, key := range keys.List() {
- function, ok := fitPredicateMap[key]
+ for _, name := range names.List() {
+ function, ok := fitPredicateMap[name]
if !ok {
- return nil, fmt.Errorf("Invalid predicate key %q specified - no corresponding function found", key)
+ return nil, fmt.Errorf("Invalid predicate name %q specified - no corresponding function found", name)
}
predicates = append(predicates, function)
}
return predicates, nil
}
-func getPriorityFunctionConfigs(keys util.StringSet) ([]algorithm.PriorityConfig, error) {
+func getPriorityFunctionConfigs(names util.StringSet) ([]algorithm.PriorityConfig, error) {
schedulerFactoryMutex.Lock()
defer schedulerFactoryMutex.Unlock()
configs := []algorithm.PriorityConfig{}
- for _, key := range keys.List() {
- config, ok := priorityFunctionMap[key]
+ for _, name := range names.List() {
+ config, ok := priorityFunctionMap[name]
if !ok {
- return nil, fmt.Errorf("Invalid priority key %s specified - no corresponding function found", key)
+ return nil, fmt.Errorf("Invalid priority name %s specified - no corresponding function found", name)
}
configs = append(configs, config)
}
return configs, nil
}
+
+var validName = regexp.MustCompile("^[a-zA-Z0-9]([-a-zA-Z0-9]*[a-zA-Z0-9])$")
+
+func validateAlgorithmNameOrDie(name string) {
+ if !validName.MatchString(name) {
+ glog.Fatalf("algorithm name %v does not match the name validation regexp \"%v\".", name, validName)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins_test.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins_test.go
new file mode 100644
index 000000000000..3df2c6d28d95
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/factory/plugins_test.go
@@ -0,0 +1,41 @@
+/*
+Copyright 2015 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package factory
+
+import "testing"
+
+func TestAlgorithmNameValidation(t *testing.T) {
+ algorithmNamesShouldValidate := []string{
+ "1SomeAlgo1rithm",
+ "someAlgor-ithm1",
+ }
+ algorithmNamesShouldNotValidate := []string{
+ "-SomeAlgorithm",
+ "SomeAlgorithm-",
+ "Some,Alg:orithm",
+ }
+ for _, name := range algorithmNamesShouldValidate {
+ if !validName.MatchString(name) {
+ t.Errorf("%v should be a valid algorithm name but is not valid.", name)
+ }
+ }
+ for _, name := range algorithmNamesShouldNotValidate {
+ if validName.MatchString(name) {
+ t.Errorf("%v should be an invalid algorithm name but is valid.", name)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/scheduler.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/scheduler.go
index 724e6706cd3b..03a2cdacc0cd 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/scheduler.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/plugin/pkg/scheduler/scheduler.go
@@ -72,7 +72,7 @@ func (s *Scheduler) scheduleOne() {
dest, err := s.config.Algorithm.Schedule(*pod, s.config.MinionLister)
if err != nil {
glog.V(1).Infof("Failed to schedule: %v", pod)
- record.Eventf(pod, string(api.PodPending), "failedScheduling", "Error scheduling: %v", err)
+ record.Eventf(pod, "failedScheduling", "Error scheduling: %v", err)
s.config.Error(pod, err)
return
}
@@ -83,9 +83,9 @@ func (s *Scheduler) scheduleOne() {
}
if err := s.config.Binder.Bind(b); err != nil {
glog.V(1).Infof("Failed to bind pod: %v", err)
- record.Eventf(pod, string(api.PodPending), "failedScheduling", "Binding rejected: %v", err)
+ record.Eventf(pod, "failedScheduling", "Binding rejected: %v", err)
s.config.Error(pod, err)
return
}
- record.Eventf(pod, string(api.PodPending), "scheduled", "Successfully assigned %v to %v", pod.Name, dest)
+ record.Eventf(pod, "scheduled", "Successfully assigned %v to %v", pod.Name, dest)
}
diff --git a/Godeps/_workspace/src/github.com/coreos/etcd/mod/mod.go b/Godeps/_workspace/src/github.com/coreos/etcd/mod/mod.go
index c811539cc24d..59358a0c97de 100644
--- a/Godeps/_workspace/src/github.com/coreos/etcd/mod/mod.go
+++ b/Godeps/_workspace/src/github.com/coreos/etcd/mod/mod.go
@@ -5,7 +5,7 @@ import (
"net/http"
"path"
- //"github.com/coreos/etcd/mod/dashboard"
+ "github.com/coreos/etcd/mod/dashboard"
leader2 "github.com/coreos/etcd/mod/leader/v2"
lock2 "github.com/coreos/etcd/mod/lock/v2"
"github.com/coreos/etcd/third_party/github.com/gorilla/mux"
@@ -20,10 +20,10 @@ func addSlash(w http.ResponseWriter, req *http.Request) {
func HttpHandler(addr string) http.Handler {
r := mux.NewRouter()
- //r.HandleFunc("/dashboard", addSlash)
+ r.HandleFunc("/dashboard", addSlash)
- //r.PathPrefix("/dashboard/static/").Handler(http.StripPrefix("/dashboard/static/", dashboard.HttpHandler()))
- //r.HandleFunc("/dashboard{path:.*}", dashboard.IndexPage)
+ r.PathPrefix("/dashboard/static/").Handler(http.StripPrefix("/dashboard/static/", dashboard.HttpHandler()))
+ r.HandleFunc("/dashboard{path:.*}", dashboard.IndexPage)
r.PathPrefix("/v2/lock").Handler(http.StripPrefix("/v2/lock", lock2.NewHandler(addr)))
r.PathPrefix("/v2/leader").Handler(http.StripPrefix("/v2/leader", leader2.NewHandler(addr)))
diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
index 02d4c9d39966..983d23f0e421 100644
--- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
+++ b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump.go
@@ -367,6 +367,12 @@ func (d *dumpState) dump(v reflect.Value) {
// been handled above.
case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ d.w.Write(nilAngleBytes)
+ break
+ }
+
d.w.Write(openBraceNewlineBytes)
d.depth++
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
index f1a5644f358c..9e0e65f03929 100644
--- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
+++ b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/dump_test.go
@@ -547,6 +547,7 @@ func addMapDumpTests() {
klen := fmt.Sprintf("%d", len(k)) // not kLen to shut golint up
kkLen := fmt.Sprintf("%d", len(kk))
mLen := fmt.Sprintf("%d", len(m))
+ nilMap := map[string]int(nil)
nm := (*map[string]int)(nil)
pm := &m
mAddr := fmt.Sprintf("%p", pm)
@@ -566,6 +567,7 @@ func addMapDumpTests() {
addDumpTest(&pm, "(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms+")\n",
"(**"+mt+")("+pmAddr+"->"+mAddr+")("+ms2+")\n")
addDumpTest(nm, "(*"+mt+")()\n")
+ addDumpTest(nilMap, "("+mt+") \n")
// Map with custom formatter type on pointer receiver only keys and vals.
k2 := pstringer("one")
@@ -574,6 +576,7 @@ func addMapDumpTests() {
k2Len := fmt.Sprintf("%d", len(k2))
v2Len := fmt.Sprintf("%d", len(v2))
m2Len := fmt.Sprintf("%d", len(m2))
+ nilMap2 := map[pstringer]pstringer(nil)
nm2 := (*map[pstringer]pstringer)(nil)
pm2 := &m2
m2Addr := fmt.Sprintf("%p", pm2)
@@ -587,12 +590,14 @@ func addMapDumpTests() {
addDumpTest(pm2, "(*"+m2t+")("+m2Addr+")("+m2s+")\n")
addDumpTest(&pm2, "(**"+m2t+")("+pm2Addr+"->"+m2Addr+")("+m2s+")\n")
addDumpTest(nm2, "(*"+m2t+")()\n")
+ addDumpTest(nilMap2, "("+m2t+") \n")
// Map with interface keys and values.
k3 := "one"
k3Len := fmt.Sprintf("%d", len(k3))
m3 := map[interface{}]interface{}{k3: 1}
m3Len := fmt.Sprintf("%d", len(m3))
+ nilMap3 := map[interface{}]interface{}(nil)
nm3 := (*map[interface{}]interface{})(nil)
pm3 := &m3
m3Addr := fmt.Sprintf("%p", pm3)
@@ -606,12 +611,14 @@ func addMapDumpTests() {
addDumpTest(pm3, "(*"+m3t+")("+m3Addr+")("+m3s+")\n")
addDumpTest(&pm3, "(**"+m3t+")("+pm3Addr+"->"+m3Addr+")("+m3s+")\n")
addDumpTest(nm3, "(*"+m3t+")()\n")
+ addDumpTest(nilMap3, "("+m3t+") \n")
// Map with nil interface value.
k4 := "nil"
k4Len := fmt.Sprintf("%d", len(k4))
m4 := map[string]interface{}{k4: nil}
m4Len := fmt.Sprintf("%d", len(m4))
+ nilMap4 := map[string]interface{}(nil)
nm4 := (*map[string]interface{})(nil)
pm4 := &m4
m4Addr := fmt.Sprintf("%p", pm4)
@@ -625,6 +632,7 @@ func addMapDumpTests() {
addDumpTest(pm4, "(*"+m4t+")("+m4Addr+")("+m4s+")\n")
addDumpTest(&pm4, "(**"+m4t+")("+pm4Addr+"->"+m4Addr+")("+m4s+")\n")
addDumpTest(nm4, "(*"+m4t+")()\n")
+ addDumpTest(nilMap4, "("+m4t+") \n")
}
func addStructDumpTests() {
diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
index b6b1fb0d0fd5..cc152ae3456a 100644
--- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
+++ b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format.go
@@ -296,6 +296,12 @@ func (f *formatState) format(v reflect.Value) {
// been handled above.
case reflect.Map:
+ // nil maps should be indicated as different than empty maps
+ if v.IsNil() {
+ f.fs.Write(nilAngleBytes)
+ break
+ }
+
f.fs.Write(openMapBytes)
f.depth++
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
diff --git a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
index 80c5ef929528..4dd0ac2af64d 100644
--- a/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
+++ b/Godeps/_workspace/src/github.com/davecgh/go-spew/spew/format_test.go
@@ -762,6 +762,7 @@ func addInterfaceFormatterTests() {
func addMapFormatterTests() {
// Map with string keys and int vals.
v := map[string]int{"one": 1, "two": 2}
+ nilMap := map[string]int(nil)
nv := (*map[string]int)(nil)
pv := &v
vAddr := fmt.Sprintf("%p", pv)
@@ -772,21 +773,25 @@ func addMapFormatterTests() {
addFormatterTest("%v", v, vs, vs2)
addFormatterTest("%v", pv, "<*>"+vs, "<*>"+vs2)
addFormatterTest("%v", &pv, "<**>"+vs, "<**>"+vs2)
+ addFormatterTest("%+v", nilMap, "")
addFormatterTest("%+v", nv, "")
addFormatterTest("%+v", v, vs, vs2)
addFormatterTest("%+v", pv, "<*>("+vAddr+")"+vs, "<*>("+vAddr+")"+vs2)
addFormatterTest("%+v", &pv, "<**>("+pvAddr+"->"+vAddr+")"+vs,
"<**>("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%+v", nilMap, "")
addFormatterTest("%+v", nv, "")
addFormatterTest("%#v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
addFormatterTest("%#v", pv, "(*"+vt+")"+vs, "(*"+vt+")"+vs2)
addFormatterTest("%#v", &pv, "(**"+vt+")"+vs, "(**"+vt+")"+vs2)
+ addFormatterTest("%#v", nilMap, "("+vt+")"+"")
addFormatterTest("%#v", nv, "(*"+vt+")"+"")
addFormatterTest("%#+v", v, "("+vt+")"+vs, "("+vt+")"+vs2)
addFormatterTest("%#+v", pv, "(*"+vt+")("+vAddr+")"+vs,
"(*"+vt+")("+vAddr+")"+vs2)
addFormatterTest("%#+v", &pv, "(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs,
"(**"+vt+")("+pvAddr+"->"+vAddr+")"+vs2)
+ addFormatterTest("%#+v", nilMap, "("+vt+")"+"")
addFormatterTest("%#+v", nv, "(*"+vt+")"+"")
// Map with custom formatter type on pointer receiver only keys and vals.
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
index ec45d8546dcd..35566520b1cd 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive.go
@@ -30,11 +30,11 @@ type (
ArchiveReader io.Reader
Compression int
TarOptions struct {
- Includes []string
- Excludes []string
- Compression Compression
- NoLchown bool
- Name string
+ IncludeFiles []string
+ ExcludePatterns []string
+ Compression Compression
+ NoLchown bool
+ Name string
}
// Archiver allows the reuse of most utility functions of this package
@@ -378,7 +378,7 @@ func escapeName(name string) string {
}
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
-// paths are included in `options.Includes` (if non-nil) or not in `options.Excludes`.
+// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
pipeReader, pipeWriter := io.Pipe()
@@ -401,12 +401,14 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
// mutating the filesystem and we can see transient errors
// from this
- if options.Includes == nil {
- options.Includes = []string{"."}
+ if options.IncludeFiles == nil {
+ options.IncludeFiles = []string{"."}
}
+ seen := make(map[string]bool)
+
var renamedRelFilePath string // For when tar.Options.Name is set
- for _, include := range options.Includes {
+ for _, include := range options.IncludeFiles {
filepath.Walk(filepath.Join(srcPath, include), func(filePath string, f os.FileInfo, err error) error {
if err != nil {
log.Debugf("Tar: Can't stat file %s to tar: %s", srcPath, err)
@@ -420,10 +422,19 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return nil
}
- skip, err := fileutils.Matches(relFilePath, options.Excludes)
- if err != nil {
- log.Debugf("Error matching %s", relFilePath, err)
- return err
+ skip := false
+
+ // If "include" is an exact match for the current file
+ // then even if there's an "excludePatterns" pattern that
+ // matches it, don't skip it. IOW, assume an explicit 'include'
+ // is asking for that file no matter what - which is true
+ // for some files, like .dockerignore and Dockerfile (sometimes)
+ if include != relFilePath {
+ skip, err = fileutils.Matches(relFilePath, options.ExcludePatterns)
+ if err != nil {
+ log.Debugf("Error matching %s", relFilePath, err)
+ return err
+ }
}
if skip {
@@ -433,6 +444,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
return nil
}
+ if seen[relFilePath] {
+ return nil
+ }
+ seen[relFilePath] = true
+
// Rename the base resource
if options.Name != "" && filePath == srcPath+"/"+filepath.Base(relFilePath) {
renamedRelFilePath = relFilePath
@@ -487,7 +503,7 @@ loop:
// This keeps "../" as-is, but normalizes "/../" to "/"
hdr.Name = filepath.Clean(hdr.Name)
- for _, exclude := range options.Excludes {
+ for _, exclude := range options.ExcludePatterns {
if strings.HasPrefix(hdr.Name, exclude) {
continue loop
}
@@ -563,8 +579,8 @@ func Untar(archive io.Reader, dest string, options *TarOptions) error {
if options == nil {
options = &TarOptions{}
}
- if options.Excludes == nil {
- options.Excludes = []string{}
+ if options.ExcludePatterns == nil {
+ options.ExcludePatterns = []string{}
}
decompressedArchive, err := DecompressStream(archive)
if err != nil {
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
index fdba6fb87cbf..6cd95d5ad530 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/archive_test.go
@@ -165,8 +165,8 @@ func TestTarUntar(t *testing.T) {
Gzip,
} {
changes, err := tarUntar(t, origin, &TarOptions{
- Compression: c,
- Excludes: []string{"3"},
+ Compression: c,
+ ExcludePatterns: []string{"3"},
})
if err != nil {
@@ -196,8 +196,8 @@ func TestTarWithOptions(t *testing.T) {
opts *TarOptions
numChanges int
}{
- {&TarOptions{Includes: []string{"1"}}, 1},
- {&TarOptions{Excludes: []string{"2"}}, 1},
+ {&TarOptions{IncludeFiles: []string{"1"}}, 1},
+ {&TarOptions{ExcludePatterns: []string{"2"}}, 1},
}
for _, testCase := range cases {
changes, err := tarUntar(t, origin, testCase.opts)
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
index 34c0f0da646f..6b8f2354b8af 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/changes_test.go
@@ -286,7 +286,7 @@ func TestApplyLayer(t *testing.T) {
t.Fatal(err)
}
- if err := ApplyLayer(src, layerCopy); err != nil {
+ if _, err := ApplyLayer(src, layerCopy); err != nil {
t.Fatal(err)
}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
index ba22c41f3cd0..ca282071f5cd 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/diff.go
@@ -15,7 +15,7 @@ import (
"github.com/docker/docker/pkg/system"
)
-func UnpackLayer(dest string, layer ArchiveReader) error {
+func UnpackLayer(dest string, layer ArchiveReader) (size int64, err error) {
tr := tar.NewReader(layer)
trBuf := pools.BufioReader32KPool.Get(tr)
defer pools.BufioReader32KPool.Put(trBuf)
@@ -33,9 +33,11 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
break
}
if err != nil {
- return err
+ return 0, err
}
+ size += hdr.Size
+
// Normalize name, for safety and for a simple is-root check
hdr.Name = filepath.Clean(hdr.Name)
@@ -48,7 +50,7 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
err = os.MkdirAll(parentPath, 0600)
if err != nil {
- return err
+ return 0, err
}
}
}
@@ -63,12 +65,12 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
aufsHardlinks[basename] = hdr
if aufsTempdir == "" {
if aufsTempdir, err = ioutil.TempDir("", "dockerplnk"); err != nil {
- return err
+ return 0, err
}
defer os.RemoveAll(aufsTempdir)
}
if err := createTarFile(filepath.Join(aufsTempdir, basename), dest, hdr, tr, true); err != nil {
- return err
+ return 0, err
}
}
continue
@@ -77,10 +79,10 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
path := filepath.Join(dest, hdr.Name)
rel, err := filepath.Rel(dest, path)
if err != nil {
- return err
+ return 0, err
}
if strings.HasPrefix(rel, "..") {
- return breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
+ return 0, breakoutError(fmt.Errorf("%q is outside of %q", hdr.Name, dest))
}
base := filepath.Base(path)
@@ -88,7 +90,7 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
originalBase := base[len(".wh."):]
originalPath := filepath.Join(filepath.Dir(path), originalBase)
if err := os.RemoveAll(originalPath); err != nil {
- return err
+ return 0, err
}
} else {
// If path exits we almost always just want to remove and replace it.
@@ -98,7 +100,7 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
if fi, err := os.Lstat(path); err == nil {
if !(fi.IsDir() && hdr.Typeflag == tar.TypeDir) {
if err := os.RemoveAll(path); err != nil {
- return err
+ return 0, err
}
}
}
@@ -113,18 +115,18 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
linkBasename := filepath.Base(hdr.Linkname)
srcHdr = aufsHardlinks[linkBasename]
if srcHdr == nil {
- return fmt.Errorf("Invalid aufs hardlink")
+ return 0, fmt.Errorf("Invalid aufs hardlink")
}
tmpFile, err := os.Open(filepath.Join(aufsTempdir, linkBasename))
if err != nil {
- return err
+ return 0, err
}
defer tmpFile.Close()
srcData = tmpFile
}
if err := createTarFile(path, dest, srcHdr, srcData, true); err != nil {
- return err
+ return 0, err
}
// Directory mtimes must be handled at the end to avoid further
@@ -139,27 +141,29 @@ func UnpackLayer(dest string, layer ArchiveReader) error {
path := filepath.Join(dest, hdr.Name)
ts := []syscall.Timespec{timeToTimespec(hdr.AccessTime), timeToTimespec(hdr.ModTime)}
if err := syscall.UtimesNano(path, ts); err != nil {
- return err
+ return 0, err
}
}
- return nil
+
+ return size, nil
}
// ApplyLayer parses a diff in the standard layer format from `layer`, and
-// applies it to the directory `dest`.
-func ApplyLayer(dest string, layer ArchiveReader) error {
+// applies it to the directory `dest`. Returns the size in bytes of the
+// contents of the layer.
+func ApplyLayer(dest string, layer ArchiveReader) (int64, error) {
dest = filepath.Clean(dest)
// We need to be able to set any perms
oldmask, err := system.Umask(0)
if err != nil {
- return err
+ return 0, err
}
defer system.Umask(oldmask) // ignore err, ErrNotSupportedPlatform
layer, err = DecompressStream(layer)
if err != nil {
- return err
+ return 0, err
}
return UnpackLayer(dest, layer)
}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
index 3624fe5afa8b..9048027203a5 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/archive/utils_test.go
@@ -17,7 +17,8 @@ var testUntarFns = map[string]func(string, io.Reader) error{
return Untar(r, dest, nil)
},
"applylayer": func(dest string, r io.Reader) error {
- return ApplyLayer(dest, ArchiveReader(r))
+ _, err := ApplyLayer(dest, ArchiveReader(r))
+ return err
},
}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
index 264f388225ee..7cfb57ba51fe 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size.go
@@ -39,7 +39,7 @@ var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB",
// HumanSize returns a human-readable approximation of a size
// using SI standard (eg. "44kB", "17MB")
-func HumanSize(size int64) string {
+func HumanSize(size float64) string {
return intToString(float64(size), 1000.0, decimapAbbrs)
}
diff --git a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
index 3e410b0db85e..67c3b81e6b4e 100644
--- a/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
+++ b/Godeps/_workspace/src/github.com/docker/docker/pkg/units/size_test.go
@@ -23,9 +23,9 @@ func TestHumanSize(t *testing.T) {
assertEquals(t, "1 MB", HumanSize(1000000))
assertEquals(t, "1.049 MB", HumanSize(1048576))
assertEquals(t, "2 MB", HumanSize(2*MB))
- assertEquals(t, "3.42 GB", HumanSize(int64(float64(3.42*GB))))
- assertEquals(t, "5.372 TB", HumanSize(int64(float64(5.372*TB))))
- assertEquals(t, "2.22 PB", HumanSize(int64(float64(2.22*PB))))
+ assertEquals(t, "3.42 GB", HumanSize(float64(3.42*GB)))
+ assertEquals(t, "5.372 TB", HumanSize(float64(5.372*TB)))
+ assertEquals(t, "2.22 PB", HumanSize(float64(2.22*PB)))
}
func TestFromHumanSize(t *testing.T) {
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
index 5a19fae36fb0..592339865373 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/.travis.yml
@@ -2,6 +2,7 @@ language: go
go:
- 1.2.2
- 1.3.1
+ - 1.4
- tip
env:
- GOARCH=amd64
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
index 1470c64dfaea..698901d9f4c6 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/AUTHORS
@@ -3,6 +3,7 @@
Aldrin Leal
Andreas Jaekle
Andrews Medina
+Artem Sidorenko
Andy Goldstein
Ben McCann
Carlos Diaz-Padron
@@ -16,6 +17,7 @@ Dawn Chen
Ed
Eric Anderson
Fabio Rehm
+ Fatih Arslan
Flavia Missi
Francisco Souza
Jari Kolehmainen
@@ -25,11 +27,14 @@ Jean-Baptiste Dalido
Jeff Mitchell
Jeffrey Hulten
Johan Euphrosine
+Kamil Domanski
Karan Misra
Kim, Hirokuni
Lucas Clemente
+Martin Sweeney
Máximo Cuadros Ortiz
Mike Dillon
+Mrunal Patel
Omeid Matten
Paul Morie
Peter Jihoon Kim
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
index 0f95d1fe28bb..aa5d70ea6196 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/README.markdown
@@ -22,7 +22,7 @@ import (
func main() {
endpoint := "unix:///var/run/docker.sock"
client, _ := docker.NewClient(endpoint)
- imgs, _ := client.ListImages(true)
+ imgs, _ := client.ListImages(docker.ListImagesOptions{All: false})
for _, img := range imgs {
fmt.Println("ID: ", img.ID)
fmt.Println("RepoTags: ", img.RepoTags)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
index 3d86ff26fe1d..3b76a4a8239a 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/client.go
@@ -460,24 +460,34 @@ func (c *Client) hijack(method, path string, success chan struct{}, setRawTermin
protocol = "tcp"
address = c.endpointURL.Host
}
- dial, err := net.Dial(protocol, address)
- if err != nil {
- return err
+ var dial net.Conn
+ if c.TLSConfig != nil && protocol != "unix" {
+ dial, err = tlsDial(protocol, address, c.TLSConfig)
+ if err != nil {
+ return err
+ }
+ } else {
+ dial, err = net.Dial(protocol, address)
+ if err != nil {
+ return err
+ }
}
- defer dial.Close()
clientconn := httputil.NewClientConn(dial, nil)
+ defer clientconn.Close()
clientconn.Do(req)
if success != nil {
success <- struct{}{}
<-success
}
rwc, br := clientconn.Hijack()
+ defer rwc.Close()
errs := make(chan error, 2)
exit := make(chan bool)
go func() {
defer close(exit)
var err error
if setRawTerminal {
+ // When TTY is ON, use regular copy
_, err = io.Copy(stdout, br)
} else {
_, err = stdCopy(stdout, stderr, br)
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
index c600c84d7977..40d6cf3cf406 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/container.go
@@ -358,6 +358,7 @@ type HostConfig struct {
ExtraHosts []string `json:"ExtraHosts,omitempty" yaml:"ExtraHosts,omitempty"`
VolumesFrom []string `json:"VolumesFrom,omitempty" yaml:"VolumesFrom,omitempty"`
NetworkMode string `json:"NetworkMode,omitempty" yaml:"NetworkMode,omitempty"`
+ IpcMode string `json:"IpcMode,omitempty" yaml:"IpcMode,omitempty"`
RestartPolicy RestartPolicy `json:"RestartPolicy,omitempty" yaml:"RestartPolicy,omitempty"`
}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
index 9ce7b440d307..0659ebd0f82f 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec.go
@@ -56,6 +56,34 @@ type Exec struct {
ID string `json:"Id,omitempty" yaml:"Id,omitempty"`
}
+// ExecProcessConfig is a type describing the command associated to a Exec
+// instance. It's used in the ExecInspect type.
+//
+// See http://goo.gl/ypQULN for more details
+type ExecProcessConfig struct {
+ Privileged bool `json:"privileged,omitempty" yaml:"privileged,omitempty"`
+ User string `json:"user,omitempty" yaml:"user,omitempty"`
+ Tty bool `json:"tty,omitempty" yaml:"tty,omitempty"`
+ EntryPoint string `json:"entrypoint,omitempty" yaml:"entrypoint,omitempty"`
+ Arguments []string `json:"arguments,omitempty" yaml:"arguments,omitempty"`
+}
+
+// ExecInspect is a type with details about a exec instance, including the
+// exit code if the command has finished running. It's returned by a api
+// call to /exec/(id)/json
+//
+// See http://goo.gl/ypQULN for more details
+type ExecInspect struct {
+ ID string `json:"ID,omitempty" yaml:"ID,omitempty"`
+ Running bool `json:"Running,omitempty" yaml:"Running,omitempty"`
+ ExitCode int `json:"ExitCode,omitempty" yaml:"ExitCode,omitempty"`
+ OpenStdin bool `json:"OpenStdin,omitempty" yaml:"OpenStdin,omitempty"`
+ OpenStderr bool `json:"OpenStderr,omitempty" yaml:"OpenStderr,omitempty"`
+ OpenStdout bool `json:"OpenStdout,omitempty" yaml:"OpenStdout,omitempty"`
+ ProcessConfig ExecProcessConfig `json:"ProcessConfig,omitempty" yaml:"ProcessConfig,omitempty"`
+ Container Container `json:"Container,omitempty" yaml:"Container,omitempty"`
+}
+
// CreateExec sets up an exec instance in a running container `id`, returning the exec
// instance, or an error in case of failure.
//
@@ -119,6 +147,26 @@ func (c *Client) ResizeExecTTY(id string, height, width int) error {
return err
}
+// InspectExec returns low-level information about the exec command id.
+//
+// See http://goo.gl/ypQULN for more details
+func (c *Client) InspectExec(id string) (*ExecInspect, error) {
+ path := fmt.Sprintf("/exec/%s/json", id)
+ body, status, err := c.do("GET", path, nil)
+ if status == http.StatusNotFound {
+ return nil, &NoSuchExec{ID: id}
+ }
+ if err != nil {
+ return nil, err
+ }
+ var exec ExecInspect
+ err = json.Unmarshal(body, &exec)
+ if err != nil {
+ return nil, err
+ }
+ return &exec, nil
+}
+
// NoSuchExec is the error returned when a given exec instance does not exist.
type NoSuchExec struct {
ID string
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
index 31de1627c483..22cced52a0bc 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/exec_test.go
@@ -10,6 +10,7 @@ import (
"net/http"
"net/http/httptest"
"net/url"
+ "reflect"
"strings"
"testing"
)
@@ -126,3 +127,133 @@ func TestExecResize(t *testing.T) {
t.Errorf("ExecCreate: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
}
}
+
+func TestExecInspect(t *testing.T) {
+ jsonExec := `{
+ "ID": "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e",
+ "Running": true,
+ "ExitCode": 0,
+ "ProcessConfig": {
+ "privileged": false,
+ "user": "",
+ "tty": true,
+ "entrypoint": "bash",
+ "arguments": []
+ },
+ "OpenStdin": true,
+ "OpenStderr": true,
+ "OpenStdout": true,
+ "Container": {
+ "State": {
+ "Running": true,
+ "Paused": false,
+ "Restarting": false,
+ "OOMKilled": false,
+ "Pid": 29392,
+ "ExitCode": 0,
+ "Error": "",
+ "StartedAt": "2015-01-21T17:08:59.634662178Z",
+ "FinishedAt": "0001-01-01T00:00:00Z"
+ },
+ "ID": "922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521",
+ "Created": "2015-01-21T17:08:59.46407212Z",
+ "Path": "/bin/bash",
+ "Args": [
+ "-lc",
+ "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
+ ],
+ "Config": {
+ "Hostname": "922cd0568714",
+ "Domainname": "",
+ "User": "ubuntu",
+ "Memory": 0,
+ "MemorySwap": 0,
+ "CpuShares": 100,
+ "Cpuset": "",
+ "AttachStdin": false,
+ "AttachStdout": false,
+ "AttachStderr": false,
+ "PortSpecs": null,
+ "ExposedPorts": {
+ "8888/tcp": {}
+ },
+ "Tty": false,
+ "OpenStdin": false,
+ "StdinOnce": false,
+ "Env": [
+ "PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
+ ],
+ "Cmd": [
+ "/bin/bash",
+ "-lc",
+ "tsuru_unit_agent http://192.168.50.4:8080 689b30e0ab3adce374346de2e72512138e0e8b75 gtest /var/lib/tsuru/start && tail -f /dev/null"
+ ],
+ "Image": "tsuru/app-gtest",
+ "Volumes": null,
+ "WorkingDir": "",
+ "Entrypoint": null,
+ "NetworkDisabled": false,
+ "MacAddress": "",
+ "OnBuild": null
+ },
+ "Image": "a88060b8b54fde0f7168c86742d0ce83b80f3f10925d85c98fdad9ed00bef544",
+ "NetworkSettings": {
+ "IPAddress": "172.17.0.8",
+ "IPPrefixLen": 16,
+ "MacAddress": "02:42:ac:11:00:08",
+ "LinkLocalIPv6Address": "fe80::42:acff:fe11:8",
+ "LinkLocalIPv6PrefixLen": 64,
+ "GlobalIPv6Address": "",
+ "GlobalIPv6PrefixLen": 0,
+ "Gateway": "172.17.42.1",
+ "IPv6Gateway": "",
+ "Bridge": "docker0",
+ "PortMapping": null,
+ "Ports": {
+ "8888/tcp": [
+ {
+ "HostIp": "0.0.0.0",
+ "HostPort": "49156"
+ }
+ ]
+ }
+ },
+ "ResolvConfPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/resolv.conf",
+ "HostnamePath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hostname",
+ "HostsPath": "/var/lib/docker/containers/922cd0568714763dc725b24b7c9801016b2a3de68e2a1dc989bf5abf07740521/hosts",
+ "Name": "/c7e43b72288ee9d0270a",
+ "Driver": "aufs",
+ "ExecDriver": "native-0.2",
+ "MountLabel": "",
+ "ProcessLabel": "",
+ "AppArmorProfile": "",
+ "RestartCount": 0,
+ "UpdateDns": false,
+ "Volumes": {},
+ "VolumesRW": {}
+ }
+ }`
+ var expected ExecInspect
+ err := json.Unmarshal([]byte(jsonExec), &expected)
+ if err != nil {
+ t.Fatal(err)
+ }
+ fakeRT := &FakeRoundTripper{message: jsonExec, status: http.StatusOK}
+ client := newTestClient(fakeRT)
+ expectedID := "32adfeeec34250f9530ce1dafd40c6233832315e065ea6b362d745e2f63cde0e"
+ execObj, err := client.InspectExec(expectedID)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !reflect.DeepEqual(*execObj, expected) {
+ t.Errorf("ExecInspect: Expected %#v. Got %#v.", expected, *execObj)
+ }
+ req := fakeRT.requests[0]
+ if req.Method != "GET" {
+ t.Errorf("ExecInspect: wrong HTTP method. Want %q. Got %q.", "GET", req.Method)
+ }
+ expectedURL, _ := url.Parse(client.getURL("/exec/" + expectedID + "/json"))
+ if gotPath := fakeRT.requests[0].URL.Path; gotPath != expectedURL.Path {
+ t.Errorf("ExecInspect: Wrong path in request. Want %q. Got %q.", expectedURL.Path, gotPath)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
index 7c4a2043d9e6..200519385262 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tar.go
@@ -27,9 +27,9 @@ func createTarStream(srcPath string) (io.ReadCloser, error) {
return nil, err
}
tarOpts := &archive.TarOptions{
- Excludes: excludes,
- Compression: archive.Uncompressed,
- NoLchown: true,
+ ExcludePatterns: excludes,
+ Compression: archive.Uncompressed,
+ NoLchown: true,
}
return archive.TarWithOptions(srcPath, tarOpts)
}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
index d13bd0c1888f..b0ea012c0dbe 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/bin/fmtpolice
@@ -31,7 +31,7 @@ _lint_verbose() {
_install_linter() {
if [[ ! -x "${GOPATH}/bin/golint" ]] ; then
- go get -u github.com/golang/lint/golint
+ go get -u -f github.com/golang/lint/golint
fi
}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
index da24fb246bf6..4f8c72b4bf7b 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server.go
@@ -34,7 +34,7 @@ import (
// For more details on the remote API, check http://goo.gl/G3plxW.
type DockerServer struct {
containers []*docker.Container
- execs []*docker.Exec
+ execs []*docker.ExecInspect
cMut sync.RWMutex
images []docker.Image
iMut sync.RWMutex
@@ -99,7 +99,9 @@ func (s *DockerServer) buildMuxer() {
s.mux.Path("/containers/{id:.*}/attach").Methods("POST").HandlerFunc(s.handlerWrapper(s.attachContainer))
s.mux.Path("/containers/{id:.*}").Methods("DELETE").HandlerFunc(s.handlerWrapper(s.removeContainer))
s.mux.Path("/containers/{id:.*}/exec").Methods("POST").HandlerFunc(s.handlerWrapper(s.createExecContainer))
+ s.mux.Path("/exec/{id:.*}/resize").Methods("POST").HandlerFunc(s.handlerWrapper(s.resizeExecContainer))
s.mux.Path("/exec/{id:.*}/start").Methods("POST").HandlerFunc(s.handlerWrapper(s.startExecContainer))
+ s.mux.Path("/exec/{id:.*}/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.inspectExecContainer))
s.mux.Path("/images/create").Methods("POST").HandlerFunc(s.handlerWrapper(s.pullImage))
s.mux.Path("/build").Methods("POST").HandlerFunc(s.handlerWrapper(s.buildImage))
s.mux.Path("/images/json").Methods("GET").HandlerFunc(s.handlerWrapper(s.listImages))
@@ -724,10 +726,31 @@ func (s *DockerServer) getImage(w http.ResponseWriter, r *http.Request) {
}
func (s *DockerServer) createExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ container, _, err := s.findContainer(id)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusNotFound)
+ return
+ }
+ exec := docker.ExecInspect{
+ ID: "id-exec-created-by-test",
+ Container: *container,
+ }
+ var params docker.CreateExecOptions
+ err = json.NewDecoder(r.Body).Decode(¶ms)
+ if err != nil {
+ http.Error(w, err.Error(), http.StatusInternalServerError)
+ return
+ }
+ if len(params.Cmd) > 0 {
+ exec.ProcessConfig.EntryPoint = params.Cmd[0]
+ if len(params.Cmd) > 1 {
+ exec.ProcessConfig.Arguments = params.Cmd[1:]
+ }
+ }
+ s.execs = append(s.execs, &exec)
w.WriteHeader(http.StatusOK)
w.Header().Set("Content-Type", "application/json")
- exec := docker.Exec{ID: "id-exec-created-by-test"}
- s.execs = append(s.execs, &exec)
json.NewEncoder(w).Encode(map[string]string{"Id": exec.ID})
}
@@ -742,3 +765,27 @@ func (s *DockerServer) startExecContainer(w http.ResponseWriter, r *http.Request
}
w.WriteHeader(http.StatusNotFound)
}
+
+func (s *DockerServer) resizeExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ for _, exec := range s.execs {
+ if exec.ID == id {
+ w.WriteHeader(http.StatusOK)
+ return
+ }
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
+
+func (s *DockerServer) inspectExecContainer(w http.ResponseWriter, r *http.Request) {
+ id := mux.Vars(r)["id"]
+ for _, exec := range s.execs {
+ if exec.ID == id {
+ w.WriteHeader(http.StatusOK)
+ w.Header().Set("Content-Type", "application/json")
+ json.NewEncoder(w).Encode(exec)
+ return
+ }
+ }
+ w.WriteHeader(http.StatusNotFound)
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
index d8763cbc1bfb..8217fb1d64fa 100644
--- a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/testing/server_test.go
@@ -1089,3 +1089,83 @@ func TestDefaultHandler(t *testing.T) {
t.Fatalf("DefaultHandler: Expected to return server.mux, got: %#v", server.DefaultHandler())
}
}
+
+func TestCreateExecContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 2)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ body := `{"Cmd": ["bash", "-c", "ls"]}`
+ path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, strings.NewReader(body))
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ serverExec := server.execs[0]
+ var got docker.Exec
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if got.ID != serverExec.ID {
+ t.Errorf("CreateExec: wrong value. Want %#v. Got %#v.", serverExec.ID, got.ID)
+ }
+ expected := docker.ExecInspect{
+ ID: got.ID,
+ ProcessConfig: docker.ExecProcessConfig{
+ EntryPoint: "bash",
+ Arguments: []string{"-c", "ls"},
+ },
+ Container: *server.containers[0],
+ }
+ if !reflect.DeepEqual(*serverExec, expected) {
+ t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, *serverExec)
+ }
+}
+
+func TestInspectExecContainer(t *testing.T) {
+ server := DockerServer{}
+ addContainers(&server, 1)
+ server.buildMuxer()
+ recorder := httptest.NewRecorder()
+ body := `{"Cmd": ["bash", "-c", "ls"]}`
+ path := fmt.Sprintf("/containers/%s/exec", server.containers[0].ID)
+ request, _ := http.NewRequest("POST", path, strings.NewReader(body))
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ var got docker.Exec
+ err := json.NewDecoder(recorder.Body).Decode(&got)
+ if err != nil {
+ t.Fatal(err)
+ }
+ path = fmt.Sprintf("/exec/%s/json", got.ID)
+ request, _ = http.NewRequest("GET", path, nil)
+ server.ServeHTTP(recorder, request)
+ if recorder.Code != http.StatusOK {
+ t.Fatalf("CreateExec: wrong status. Want %d. Got %d.", http.StatusOK, recorder.Code)
+ }
+ var got2 docker.ExecInspect
+ err = json.NewDecoder(recorder.Body).Decode(&got2)
+ if err != nil {
+ t.Fatal(err)
+ }
+ expected := docker.ExecInspect{
+ ID: got.ID,
+ ProcessConfig: docker.ExecProcessConfig{
+ EntryPoint: "bash",
+ Arguments: []string{"-c", "ls"},
+ },
+ Container: *server.containers[0],
+ }
+ got2.Container.State.StartedAt = expected.Container.State.StartedAt
+ got2.Container.State.FinishedAt = expected.Container.State.FinishedAt
+ got2.Container.Config = expected.Container.Config
+ got2.Container.Created = expected.Container.Created
+ got2.Container.NetworkSettings = expected.Container.NetworkSettings
+ if !reflect.DeepEqual(got2, expected) {
+ t.Errorf("InspectContainer: wrong value. Want:\n%#v\nGot:\n%#v\n", expected, got2)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go
new file mode 100644
index 000000000000..11d571761a8c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/fsouza/go-dockerclient/tls.go
@@ -0,0 +1,100 @@
+// Copyright 2014 go-dockerclient authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The content is borrowed from Docker's own source code to provide a simple
+// tls based dialer
+
+package docker
+
+import (
+ "crypto/tls"
+ "errors"
+ "net"
+ "strings"
+ "time"
+)
+
+type tlsClientCon struct {
+ *tls.Conn
+ rawConn net.Conn
+}
+
+func (c *tlsClientCon) CloseWrite() error {
+ // Go standard tls.Conn doesn't provide the CloseWrite() method so we do it
+ // on its underlying connection.
+ if cwc, ok := c.rawConn.(interface {
+ CloseWrite() error
+ }); ok {
+ return cwc.CloseWrite()
+ }
+ return nil
+}
+
+func tlsDialWithDialer(dialer *net.Dialer, network, addr string, config *tls.Config) (net.Conn, error) {
+ // We want the Timeout and Deadline values from dialer to cover the
+ // whole process: TCP connection and TLS handshake. This means that we
+ // also need to start our own timers now.
+ timeout := dialer.Timeout
+
+ if !dialer.Deadline.IsZero() {
+ deadlineTimeout := dialer.Deadline.Sub(time.Now())
+ if timeout == 0 || deadlineTimeout < timeout {
+ timeout = deadlineTimeout
+ }
+ }
+
+ var errChannel chan error
+
+ if timeout != 0 {
+ errChannel = make(chan error, 2)
+ time.AfterFunc(timeout, func() {
+ errChannel <- errors.New("")
+ })
+ }
+
+ rawConn, err := dialer.Dial(network, addr)
+ if err != nil {
+ return nil, err
+ }
+
+ colonPos := strings.LastIndex(addr, ":")
+ if colonPos == -1 {
+ colonPos = len(addr)
+ }
+ hostname := addr[:colonPos]
+
+ // If no ServerName is set, infer the ServerName
+ // from the hostname we're connecting to.
+ if config.ServerName == "" {
+ // Make a copy to avoid polluting argument or default.
+ c := *config
+ c.ServerName = hostname
+ config = &c
+ }
+
+ conn := tls.Client(rawConn, config)
+
+ if timeout == 0 {
+ err = conn.Handshake()
+ } else {
+ go func() {
+ errChannel <- conn.Handshake()
+ }()
+
+ err = <-errChannel
+ }
+
+ if err != nil {
+ rawConn.Close()
+ return nil, err
+ }
+
+ // This is Docker difference with standard's crypto/tls package: returned a
+ // wrapper which holds both the TLS and raw connections.
+ return &tlsClientCon{conn, rawConn}, nil
+}
+
+func tlsDial(network, addr string, config *tls.Config) (net.Conn, error) {
+ return tlsDialWithDialer(new(net.Dialer), network, addr, config)
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
new file mode 100644
index 000000000000..fb838ed2d756
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/Makefile
@@ -0,0 +1,43 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+install:
+ go install
+
+test: install generate-test-pbs
+ go test
+
+
+generate-test-pbs:
+ make install
+ make -C testdata
+ make -C proto3_proto
+ make
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
new file mode 100644
index 000000000000..3fade175f728
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/all_test.go
@@ -0,0 +1,2059 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "math"
+ "math/rand"
+ "reflect"
+ "runtime/debug"
+ "strings"
+ "testing"
+ "time"
+
+ . "./testdata"
+ . "github.com/golang/protobuf/proto"
+)
+
+var globalO *Buffer
+
+func old() *Buffer {
+ if globalO == nil {
+ globalO = NewBuffer(nil)
+ }
+ globalO.Reset()
+ return globalO
+}
+
+func equalbytes(b1, b2 []byte, t *testing.T) {
+ if len(b1) != len(b2) {
+ t.Errorf("wrong lengths: 2*%d != %d", len(b1), len(b2))
+ return
+ }
+ for i := 0; i < len(b1); i++ {
+ if b1[i] != b2[i] {
+ t.Errorf("bad byte[%d]:%x %x: %s %s", i, b1[i], b2[i], b1, b2)
+ }
+ }
+}
+
+func initGoTestField() *GoTestField {
+ f := new(GoTestField)
+ f.Label = String("label")
+ f.Type = String("type")
+ return f
+}
+
+// These are all structurally equivalent but the tag numbers differ.
+// (It's remarkable that required, optional, and repeated all have
+// 8 letters.)
+func initGoTest_RequiredGroup() *GoTest_RequiredGroup {
+ return &GoTest_RequiredGroup{
+ RequiredField: String("required"),
+ }
+}
+
+func initGoTest_OptionalGroup() *GoTest_OptionalGroup {
+ return &GoTest_OptionalGroup{
+ RequiredField: String("optional"),
+ }
+}
+
+func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup {
+ return &GoTest_RepeatedGroup{
+ RequiredField: String("repeated"),
+ }
+}
+
+func initGoTest(setdefaults bool) *GoTest {
+ pb := new(GoTest)
+ if setdefaults {
+ pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted)
+ pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted)
+ pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted)
+ pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted)
+ pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted)
+ pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted)
+ pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted)
+ pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted)
+ pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted)
+ pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted)
+ pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted
+ pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted)
+ pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted)
+ }
+
+ pb.Kind = GoTest_TIME.Enum()
+ pb.RequiredField = initGoTestField()
+ pb.F_BoolRequired = Bool(true)
+ pb.F_Int32Required = Int32(3)
+ pb.F_Int64Required = Int64(6)
+ pb.F_Fixed32Required = Uint32(32)
+ pb.F_Fixed64Required = Uint64(64)
+ pb.F_Uint32Required = Uint32(3232)
+ pb.F_Uint64Required = Uint64(6464)
+ pb.F_FloatRequired = Float32(3232)
+ pb.F_DoubleRequired = Float64(6464)
+ pb.F_StringRequired = String("string")
+ pb.F_BytesRequired = []byte("bytes")
+ pb.F_Sint32Required = Int32(-32)
+ pb.F_Sint64Required = Int64(-64)
+ pb.Requiredgroup = initGoTest_RequiredGroup()
+
+ return pb
+}
+
+func fail(msg string, b *bytes.Buffer, s string, t *testing.T) {
+ data := b.Bytes()
+ ld := len(data)
+ ls := len(s) / 2
+
+ fmt.Printf("fail %s ld=%d ls=%d\n", msg, ld, ls)
+
+ // find the interesting spot - n
+ n := ls
+ if ld < ls {
+ n = ld
+ }
+ j := 0
+ for i := 0; i < n; i++ {
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ if data[i] == bs {
+ continue
+ }
+ n = i
+ break
+ }
+ l := n - 10
+ if l < 0 {
+ l = 0
+ }
+ h := n + 10
+
+ // find the interesting spot - n
+ fmt.Printf("is[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ld {
+ fmt.Printf(" --")
+ continue
+ }
+ fmt.Printf(" %.2x", data[i])
+ }
+ fmt.Printf("\n")
+
+ fmt.Printf("sb[%d]:", l)
+ for i := l; i < h; i++ {
+ if i >= ls {
+ fmt.Printf(" --")
+ continue
+ }
+ bs := hex(s[j])*16 + hex(s[j+1])
+ j += 2
+ fmt.Printf(" %.2x", bs)
+ }
+ fmt.Printf("\n")
+
+ t.Fail()
+
+ // t.Errorf("%s: \ngood: %s\nbad: %x", msg, s, b.Bytes())
+ // Print the output in a partially-decoded format; can
+ // be helpful when updating the test. It produces the output
+ // that is pasted, with minor edits, into the argument to verify().
+ // data := b.Bytes()
+ // nesting := 0
+ // for b.Len() > 0 {
+ // start := len(data) - b.Len()
+ // var u uint64
+ // u, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // wire := u & 0x7
+ // tag := u >> 3
+ // switch wire {
+ // case WireVarint:
+ // v, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on varint:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed32:
+ // v, err := DecodeFixed32(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed32:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireFixed64:
+ // v, err := DecodeFixed64(b)
+ // if err != nil {
+ // fmt.Printf("decode error on fixed64:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" // field %d, encoding %d, value %d\n",
+ // data[start:len(data)-b.Len()], tag, wire, v)
+ // case WireBytes:
+ // nb, err := DecodeVarint(b)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // after_tag := len(data) - b.Len()
+ // str := make([]byte, nb)
+ // _, err = b.Read(str)
+ // if err != nil {
+ // fmt.Printf("decode error on bytes:", err)
+ // return
+ // }
+ // fmt.Printf("\t\t\"%x\" \"%x\" // field %d, encoding %d (FIELD)\n",
+ // data[start:after_tag], str, tag, wire)
+ // case WireStartGroup:
+ // nesting++
+ // fmt.Printf("\t\t\"%x\"\t\t// start group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // case WireEndGroup:
+ // fmt.Printf("\t\t\"%x\"\t\t// end group field %d level %d\n",
+ // data[start:len(data)-b.Len()], tag, nesting)
+ // nesting--
+ // default:
+ // fmt.Printf("unrecognized wire type %d\n", wire)
+ // return
+ // }
+ // }
+}
+
+func hex(c uint8) uint8 {
+ if '0' <= c && c <= '9' {
+ return c - '0'
+ }
+ if 'a' <= c && c <= 'f' {
+ return 10 + c - 'a'
+ }
+ if 'A' <= c && c <= 'F' {
+ return 10 + c - 'A'
+ }
+ return 0
+}
+
+func equal(b []byte, s string, t *testing.T) bool {
+ if 2*len(b) != len(s) {
+ // fail(fmt.Sprintf("wrong lengths: 2*%d != %d", len(b), len(s)), b, s, t)
+ fmt.Printf("wrong lengths: 2*%d != %d\n", len(b), len(s))
+ return false
+ }
+ for i, j := 0, 0; i < len(b); i, j = i+1, j+2 {
+ x := hex(s[j])*16 + hex(s[j+1])
+ if b[i] != x {
+ // fail(fmt.Sprintf("bad byte[%d]:%x %x", i, b[i], x), b, s, t)
+ fmt.Printf("bad byte[%d]:%x %x", i, b[i], x)
+ return false
+ }
+ }
+ return true
+}
+
+func overify(t *testing.T, pb *GoTest, expected string) {
+ o := old()
+ err := o.Marshal(pb)
+ if err != nil {
+ fmt.Printf("overify marshal-1 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 1", o.Bytes())
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = o.Unmarshal(pbd)
+ if err != nil {
+ t.Fatalf("overify unmarshal err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ o.Reset()
+ err = o.Marshal(pbd)
+ if err != nil {
+ t.Errorf("overify marshal-2 err = %v", err)
+ o.DebugPrint("", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+ if !equal(o.Bytes(), expected, t) {
+ o.DebugPrint("overify neq 2", o.Bytes())
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+// Simple tests for numeric encode/decode primitives (varint, etc.)
+func TestNumericPrimitives(t *testing.T) {
+ for i := uint64(0); i < 1e6; i += 111 {
+ o := old()
+ if o.EncodeVarint(i) != nil {
+ t.Error("EncodeVarint")
+ break
+ }
+ x, e := o.DecodeVarint()
+ if e != nil {
+ t.Fatal("DecodeVarint")
+ }
+ if x != i {
+ t.Fatal("varint decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed32(i) != nil {
+ t.Fatal("encFixed32")
+ }
+ x, e = o.DecodeFixed32()
+ if e != nil {
+ t.Fatal("decFixed32")
+ }
+ if x != i {
+ t.Fatal("fixed32 decode fail:", i, x)
+ }
+
+ o = old()
+ if o.EncodeFixed64(i*1234567) != nil {
+ t.Error("encFixed64")
+ break
+ }
+ x, e = o.DecodeFixed64()
+ if e != nil {
+ t.Error("decFixed64")
+ break
+ }
+ if x != i*1234567 {
+ t.Error("fixed64 decode fail:", i*1234567, x)
+ break
+ }
+
+ o = old()
+ i32 := int32(i - 12345)
+ if o.EncodeZigzag32(uint64(i32)) != nil {
+ t.Fatal("EncodeZigzag32")
+ }
+ x, e = o.DecodeZigzag32()
+ if e != nil {
+ t.Fatal("DecodeZigzag32")
+ }
+ if x != uint64(uint32(i32)) {
+ t.Fatal("zigzag32 decode fail:", i32, x)
+ }
+
+ o = old()
+ i64 := int64(i - 12345)
+ if o.EncodeZigzag64(uint64(i64)) != nil {
+ t.Fatal("EncodeZigzag64")
+ }
+ x, e = o.DecodeZigzag64()
+ if e != nil {
+ t.Fatal("DecodeZigzag64")
+ }
+ if x != uint64(i64) {
+ t.Fatal("zigzag64 decode fail:", i64, x)
+ }
+ }
+}
+
+// fakeMarshaler is a simple struct implementing Marshaler and Message interfaces.
+type fakeMarshaler struct {
+ b []byte
+ err error
+}
+
+func (f fakeMarshaler) Marshal() ([]byte, error) {
+ return f.b, f.err
+}
+
+func (f fakeMarshaler) String() string {
+ return fmt.Sprintf("Bytes: %v Error: %v", f.b, f.err)
+}
+
+func (f fakeMarshaler) ProtoMessage() {}
+
+func (f fakeMarshaler) Reset() {}
+
+// Simple tests for proto messages that implement the Marshaler interface.
+func TestMarshalerEncoding(t *testing.T) {
+ tests := []struct {
+ name string
+ m Message
+ want []byte
+ wantErr error
+ }{
+ {
+ name: "Marshaler that fails",
+ m: fakeMarshaler{
+ err: errors.New("some marshal err"),
+ b: []byte{5, 6, 7},
+ },
+ // Since there's an error, nothing should be written to buffer.
+ want: nil,
+ wantErr: errors.New("some marshal err"),
+ },
+ {
+ name: "Marshaler that succeeds",
+ m: fakeMarshaler{
+ b: []byte{0, 1, 2, 3, 4, 127, 255},
+ },
+ want: []byte{0, 1, 2, 3, 4, 127, 255},
+ wantErr: nil,
+ },
+ }
+ for _, test := range tests {
+ b := NewBuffer(nil)
+ err := b.Marshal(test.m)
+ if !reflect.DeepEqual(test.wantErr, err) {
+ t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr)
+ }
+ if !reflect.DeepEqual(test.want, b.Bytes()) {
+ t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want)
+ }
+ }
+}
+
+// Simple tests for bytes
+func TestBytesPrimitives(t *testing.T) {
+ o := old()
+ bytes := []byte{'n', 'o', 'w', ' ', 'i', 's', ' ', 't', 'h', 'e', ' ', 't', 'i', 'm', 'e'}
+ if o.EncodeRawBytes(bytes) != nil {
+ t.Error("EncodeRawBytes")
+ }
+ decb, e := o.DecodeRawBytes(false)
+ if e != nil {
+ t.Error("DecodeRawBytes")
+ }
+ equalbytes(bytes, decb, t)
+}
+
+// Simple tests for strings
+func TestStringPrimitives(t *testing.T) {
+ o := old()
+ s := "now is the time"
+ if o.EncodeStringBytes(s) != nil {
+ t.Error("enc_string")
+ }
+ decs, e := o.DecodeStringBytes()
+ if e != nil {
+ t.Error("dec_string")
+ }
+ if s != decs {
+ t.Error("string encode/decode fail:", s, decs)
+ }
+}
+
+// Do we catch the "required bit not set" case?
+func TestRequiredBit(t *testing.T) {
+ o := old()
+ pb := new(GoTest)
+ err := o.Marshal(pb)
+ if err == nil {
+ t.Error("did not catch missing required fields")
+ } else if strings.Index(err.Error(), "Kind") < 0 {
+ t.Error("wrong error type:", err)
+ }
+}
+
+// Check that all fields are nil.
+// Clearly silly, and a residue from a more interesting test with an earlier,
+// different initialization property, but it once caught a compiler bug so
+// it lives.
+func checkInitialized(pb *GoTest, t *testing.T) {
+ if pb.F_BoolDefaulted != nil {
+ t.Error("New or Reset did not set boolean:", *pb.F_BoolDefaulted)
+ }
+ if pb.F_Int32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Int32Defaulted)
+ }
+ if pb.F_Int64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Int64Defaulted)
+ }
+ if pb.F_Fixed32Defaulted != nil {
+ t.Error("New or Reset did not set fixed32:", *pb.F_Fixed32Defaulted)
+ }
+ if pb.F_Fixed64Defaulted != nil {
+ t.Error("New or Reset did not set fixed64:", *pb.F_Fixed64Defaulted)
+ }
+ if pb.F_Uint32Defaulted != nil {
+ t.Error("New or Reset did not set uint32:", *pb.F_Uint32Defaulted)
+ }
+ if pb.F_Uint64Defaulted != nil {
+ t.Error("New or Reset did not set uint64:", *pb.F_Uint64Defaulted)
+ }
+ if pb.F_FloatDefaulted != nil {
+ t.Error("New or Reset did not set float:", *pb.F_FloatDefaulted)
+ }
+ if pb.F_DoubleDefaulted != nil {
+ t.Error("New or Reset did not set double:", *pb.F_DoubleDefaulted)
+ }
+ if pb.F_StringDefaulted != nil {
+ t.Error("New or Reset did not set string:", *pb.F_StringDefaulted)
+ }
+ if pb.F_BytesDefaulted != nil {
+ t.Error("New or Reset did not set bytes:", string(pb.F_BytesDefaulted))
+ }
+ if pb.F_Sint32Defaulted != nil {
+ t.Error("New or Reset did not set int32:", *pb.F_Sint32Defaulted)
+ }
+ if pb.F_Sint64Defaulted != nil {
+ t.Error("New or Reset did not set int64:", *pb.F_Sint64Defaulted)
+ }
+}
+
+// Does Reset() reset?
+func TestReset(t *testing.T) {
+ pb := initGoTest(true)
+ // muck with some values
+ pb.F_BoolDefaulted = Bool(false)
+ pb.F_Int32Defaulted = Int32(237)
+ pb.F_Int64Defaulted = Int64(12346)
+ pb.F_Fixed32Defaulted = Uint32(32000)
+ pb.F_Fixed64Defaulted = Uint64(666)
+ pb.F_Uint32Defaulted = Uint32(323232)
+ pb.F_Uint64Defaulted = nil
+ pb.F_FloatDefaulted = nil
+ pb.F_DoubleDefaulted = Float64(0)
+ pb.F_StringDefaulted = String("gotcha")
+ pb.F_BytesDefaulted = []byte("asdfasdf")
+ pb.F_Sint32Defaulted = Int32(123)
+ pb.F_Sint64Defaulted = Int64(789)
+ pb.Reset()
+ checkInitialized(pb, t)
+}
+
+// All required fields set, no defaults provided.
+func TestEncodeDecode1(t *testing.T) {
+ pb := initGoTest(false)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 0x20
+ "714000000000000000"+ // field 14, encoding 1, value 0x40
+ "78a019"+ // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032"+ // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2, string "string"
+ "b304"+ // field 70, encoding 3, start group
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // field 70, encoding 4, end group
+ "aa0605"+"6279746573"+ // field 101, encoding 2, string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f") // field 103, encoding 0, 0x7f zigzag64
+}
+
+// All required fields set, defaults provided.
+func TestEncodeDecode2(t *testing.T) {
+ pb := initGoTest(true)
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All default fields set to their default value by hand
+func TestEncodeDecode3(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolDefaulted = Bool(true)
+ pb.F_Int32Defaulted = Int32(32)
+ pb.F_Int64Defaulted = Int64(64)
+ pb.F_Fixed32Defaulted = Uint32(320)
+ pb.F_Fixed64Defaulted = Uint64(640)
+ pb.F_Uint32Defaulted = Uint32(3200)
+ pb.F_Uint64Defaulted = Uint64(6400)
+ pb.F_FloatDefaulted = Float32(314159)
+ pb.F_DoubleDefaulted = Float64(271828)
+ pb.F_StringDefaulted = String("hello, \"world!\"\n")
+ pb.F_BytesDefaulted = []byte("Bignose")
+ pb.F_Sint32Defaulted = Int32(-32)
+ pb.F_Sint64Defaulted = Int64(-64)
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all non-defaulted optional fields have values.
+func TestEncodeDecode4(t *testing.T) {
+ pb := initGoTest(true)
+ pb.Table = String("hello")
+ pb.Param = Int32(7)
+ pb.OptionalField = initGoTestField()
+ pb.F_BoolOptional = Bool(true)
+ pb.F_Int32Optional = Int32(32)
+ pb.F_Int64Optional = Int64(64)
+ pb.F_Fixed32Optional = Uint32(3232)
+ pb.F_Fixed64Optional = Uint64(6464)
+ pb.F_Uint32Optional = Uint32(323232)
+ pb.F_Uint64Optional = Uint64(646464)
+ pb.F_FloatOptional = Float32(32.)
+ pb.F_DoubleOptional = Float64(64.)
+ pb.F_StringOptional = String("hello")
+ pb.F_BytesOptional = []byte("Bignose")
+ pb.F_Sint32Optional = Int32(-32)
+ pb.F_Sint64Optional = Int64(-64)
+ pb.Optionalgroup = initGoTest_OptionalGroup()
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "1205"+"68656c6c6f"+ // field 2, encoding 2, string "hello"
+ "1807"+ // field 3, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "320d"+"0a056c6162656c120474797065"+ // field 6, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "f00101"+ // field 30, encoding 0, value 1
+ "f80120"+ // field 31, encoding 0, value 32
+ "800240"+ // field 32, encoding 0, value 64
+ "8d02a00c0000"+ // field 33, encoding 5, value 3232
+ "91024019000000000000"+ // field 34, encoding 1, value 6464
+ "9802a0dd13"+ // field 35, encoding 0, value 323232
+ "a002c0ba27"+ // field 36, encoding 0, value 646464
+ "ad0200000042"+ // field 37, encoding 5, value 32.0
+ "b1020000000000005040"+ // field 38, encoding 1, value 64.0
+ "ba0205"+"68656c6c6f"+ // field 39, encoding 2, string "hello"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "d305"+ // start group field 90 level 1
+ "da0508"+"6f7074696f6e616c"+ // field 91, encoding 2, string "optional"
+ "d405"+ // end group field 90 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ea1207"+"4269676e6f7365"+ // field 301, encoding 2, string "Bignose"
+ "f0123f"+ // field 302, encoding 0, value 63
+ "f8127f"+ // field 303, encoding 0, value 127
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestEncodeDecode5(t *testing.T) {
+ pb := initGoTest(true)
+ pb.RepeatedField = []*GoTestField{initGoTestField(), initGoTestField()}
+ pb.F_BoolRepeated = []bool{false, true}
+ pb.F_Int32Repeated = []int32{32, 33}
+ pb.F_Int64Repeated = []int64{64, 65}
+ pb.F_Fixed32Repeated = []uint32{3232, 3333}
+ pb.F_Fixed64Repeated = []uint64{6464, 6565}
+ pb.F_Uint32Repeated = []uint32{323232, 333333}
+ pb.F_Uint64Repeated = []uint64{646464, 656565}
+ pb.F_FloatRepeated = []float32{32., 33.}
+ pb.F_DoubleRepeated = []float64{64., 65.}
+ pb.F_StringRepeated = []string{"hello", "sailor"}
+ pb.F_BytesRepeated = [][]byte{[]byte("big"), []byte("nose")}
+ pb.F_Sint32Repeated = []int32{32, -32}
+ pb.F_Sint64Repeated = []int64{64, -64}
+ pb.Repeatedgroup = []*GoTest_RepeatedGroup{initGoTest_RepeatedGroup(), initGoTest_RepeatedGroup()}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "2a0d"+"0a056c6162656c120474797065"+ // field 5, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "a00100"+ // field 20, encoding 0, value 0
+ "a00101"+ // field 20, encoding 0, value 1
+ "a80120"+ // field 21, encoding 0, value 32
+ "a80121"+ // field 21, encoding 0, value 33
+ "b00140"+ // field 22, encoding 0, value 64
+ "b00141"+ // field 22, encoding 0, value 65
+ "bd01a00c0000"+ // field 23, encoding 5, value 3232
+ "bd01050d0000"+ // field 23, encoding 5, value 3333
+ "c1014019000000000000"+ // field 24, encoding 1, value 6464
+ "c101a519000000000000"+ // field 24, encoding 1, value 6565
+ "c801a0dd13"+ // field 25, encoding 0, value 323232
+ "c80195ac14"+ // field 25, encoding 0, value 333333
+ "d001c0ba27"+ // field 26, encoding 0, value 646464
+ "d001b58928"+ // field 26, encoding 0, value 656565
+ "dd0100000042"+ // field 27, encoding 5, value 32.0
+ "dd0100000442"+ // field 27, encoding 5, value 33.0
+ "e1010000000000005040"+ // field 28, encoding 1, value 64.0
+ "e1010000000000405040"+ // field 28, encoding 1, value 65.0
+ "ea0105"+"68656c6c6f"+ // field 29, encoding 2, string "hello"
+ "ea0106"+"7361696c6f72"+ // field 29, encoding 2, string "sailor"
+ "c00201"+ // field 40, encoding 0, value 1
+ "c80220"+ // field 41, encoding 0, value 32
+ "d00240"+ // field 42, encoding 0, value 64
+ "dd0240010000"+ // field 43, encoding 5, value 320
+ "e1028002000000000000"+ // field 44, encoding 1, value 640
+ "e8028019"+ // field 45, encoding 0, value 3200
+ "f0028032"+ // field 46, encoding 0, value 6400
+ "fd02e0659948"+ // field 47, encoding 5, value 314159.0
+ "81030000000050971041"+ // field 48, encoding 1, value 271828.0
+ "8a0310"+"68656c6c6f2c2022776f726c6421220a"+ // field 49, encoding 2 string "hello, \"world!\"\n"
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "8305"+ // start group field 80 level 1
+ "8a0508"+"7265706561746564"+ // field 81, encoding 2, string "repeated"
+ "8405"+ // end group field 80 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "ca0c03"+"626967"+ // field 201, encoding 2, string "big"
+ "ca0c04"+"6e6f7365"+ // field 201, encoding 2, string "nose"
+ "d00c40"+ // field 202, encoding 0, value 32
+ "d00c3f"+ // field 202, encoding 0, value -32
+ "d80c8001"+ // field 203, encoding 0, value 64
+ "d80c7f"+ // field 203, encoding 0, value -64
+ "8a1907"+"4269676e6f7365"+ // field 401, encoding 2, string "Bignose"
+ "90193f"+ // field 402, encoding 0, value 63
+ "98197f") // field 403, encoding 0, value 127
+
+}
+
+// All required fields set, all packed repeated fields given two values.
+func TestEncodeDecode6(t *testing.T) {
+ pb := initGoTest(false)
+ pb.F_BoolRepeatedPacked = []bool{false, true}
+ pb.F_Int32RepeatedPacked = []int32{32, 33}
+ pb.F_Int64RepeatedPacked = []int64{64, 65}
+ pb.F_Fixed32RepeatedPacked = []uint32{3232, 3333}
+ pb.F_Fixed64RepeatedPacked = []uint64{6464, 6565}
+ pb.F_Uint32RepeatedPacked = []uint32{323232, 333333}
+ pb.F_Uint64RepeatedPacked = []uint64{646464, 656565}
+ pb.F_FloatRepeatedPacked = []float32{32., 33.}
+ pb.F_DoubleRepeatedPacked = []float64{64., 65.}
+ pb.F_Sint32RepeatedPacked = []int32{32, -32}
+ pb.F_Sint64RepeatedPacked = []int64{64, -64}
+
+ overify(t, pb,
+ "0807"+ // field 1, encoding 0, value 7
+ "220d"+"0a056c6162656c120474797065"+ // field 4, encoding 2 (GoTestField)
+ "5001"+ // field 10, encoding 0, value 1
+ "5803"+ // field 11, encoding 0, value 3
+ "6006"+ // field 12, encoding 0, value 6
+ "6d20000000"+ // field 13, encoding 5, value 32
+ "714000000000000000"+ // field 14, encoding 1, value 64
+ "78a019"+ // field 15, encoding 0, value 3232
+ "8001c032"+ // field 16, encoding 0, value 6464
+ "8d0100004a45"+ // field 17, encoding 5, value 3232.0
+ "9101000000000040b940"+ // field 18, encoding 1, value 6464.0
+ "9a0106"+"737472696e67"+ // field 19, encoding 2 string "string"
+ "9203020001"+ // field 50, encoding 2, 2 bytes, value 0, value 1
+ "9a03022021"+ // field 51, encoding 2, 2 bytes, value 32, value 33
+ "a203024041"+ // field 52, encoding 2, 2 bytes, value 64, value 65
+ "aa0308"+ // field 53, encoding 2, 8 bytes
+ "a00c0000050d0000"+ // value 3232, value 3333
+ "b20310"+ // field 54, encoding 2, 16 bytes
+ "4019000000000000a519000000000000"+ // value 6464, value 6565
+ "ba0306"+ // field 55, encoding 2, 6 bytes
+ "a0dd1395ac14"+ // value 323232, value 333333
+ "c20306"+ // field 56, encoding 2, 6 bytes
+ "c0ba27b58928"+ // value 646464, value 656565
+ "ca0308"+ // field 57, encoding 2, 8 bytes
+ "0000004200000442"+ // value 32.0, value 33.0
+ "d20310"+ // field 58, encoding 2, 16 bytes
+ "00000000000050400000000000405040"+ // value 64.0, value 65.0
+ "b304"+ // start group field 70 level 1
+ "ba0408"+"7265717569726564"+ // field 71, encoding 2, string "required"
+ "b404"+ // end group field 70 level 1
+ "aa0605"+"6279746573"+ // field 101, encoding 2 string "bytes"
+ "b0063f"+ // field 102, encoding 0, 0x3f zigzag32
+ "b8067f"+ // field 103, encoding 0, 0x7f zigzag64
+ "b21f02"+ // field 502, encoding 2, 2 bytes
+ "403f"+ // value 32, value -32
+ "ba1f03"+ // field 503, encoding 2, 3 bytes
+ "80017f") // value 64, value -64
+}
+
+// Test that we can encode empty bytes fields.
+func TestEncodeDecodeBytes1(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRequired = []byte{}
+ pb.F_BytesRepeated = [][]byte{{}}
+ pb.F_BytesOptional = []byte{}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if pbd.F_BytesRequired == nil || len(pbd.F_BytesRequired) != 0 {
+ t.Error("required empty bytes field is incorrect")
+ }
+ if pbd.F_BytesRepeated == nil || len(pbd.F_BytesRepeated) == 1 && pbd.F_BytesRepeated[0] == nil {
+ t.Error("repeated empty bytes field is incorrect")
+ }
+ if pbd.F_BytesOptional == nil || len(pbd.F_BytesOptional) != 0 {
+ t.Error("optional empty bytes field is incorrect")
+ }
+}
+
+// Test that we encode nil-valued fields of a repeated bytes field correctly.
+// Since entries in a repeated field cannot be nil, nil must mean empty value.
+func TestEncodeDecodeBytes2(t *testing.T) {
+ pb := initGoTest(false)
+
+ // Create our bytes
+ pb.F_BytesRepeated = [][]byte{nil}
+
+ d, err := Marshal(pb)
+ if err != nil {
+ t.Error(err)
+ }
+
+ pbd := new(GoTest)
+ if err := Unmarshal(d, pbd); err != nil {
+ t.Error(err)
+ }
+
+ if len(pbd.F_BytesRepeated) != 1 || pbd.F_BytesRepeated[0] == nil {
+ t.Error("Unexpected value for repeated bytes field")
+ }
+}
+
+// All required fields set, defaults provided, all repeated fields given two values.
+func TestSkippingUnrecognizedFields(t *testing.T) {
+ o := old()
+ pb := initGoTestField()
+
+ // Marshal it normally.
+ o.Marshal(pb)
+
+ // Now new a GoSkipTest record.
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ // Marshal it into same buffer.
+ o.Marshal(skip)
+
+ pbd := new(GoTestField)
+ o.Unmarshal(pbd)
+
+ // The __unrecognized field should be a marshaling of GoSkipTest
+ skipd := new(GoSkipTest)
+
+ o.SetBuf(pbd.XXX_unrecognized)
+ o.Unmarshal(skipd)
+
+ if *skipd.SkipInt32 != *skip.SkipInt32 {
+ t.Error("skip int32", skipd.SkipInt32)
+ }
+ if *skipd.SkipFixed32 != *skip.SkipFixed32 {
+ t.Error("skip fixed32", skipd.SkipFixed32)
+ }
+ if *skipd.SkipFixed64 != *skip.SkipFixed64 {
+ t.Error("skip fixed64", skipd.SkipFixed64)
+ }
+ if *skipd.SkipString != *skip.SkipString {
+ t.Error("skip string", *skipd.SkipString)
+ }
+ if *skipd.Skipgroup.GroupInt32 != *skip.Skipgroup.GroupInt32 {
+ t.Error("skip group int32", skipd.Skipgroup.GroupInt32)
+ }
+ if *skipd.Skipgroup.GroupString != *skip.Skipgroup.GroupString {
+ t.Error("skip group string", *skipd.Skipgroup.GroupString)
+ }
+}
+
+// Check that unrecognized fields of a submessage are preserved.
+func TestSubmessageUnrecognizedFields(t *testing.T) {
+ nm := &NewMessage{
+ Nested: &NewMessage_Nested{
+ Name: String("Nigel"),
+ FoodGroup: String("carbs"),
+ },
+ }
+ b, err := Marshal(nm)
+ if err != nil {
+ t.Fatalf("Marshal of NewMessage: %v", err)
+ }
+
+ // Unmarshal into an OldMessage.
+ om := new(OldMessage)
+ if err := Unmarshal(b, om); err != nil {
+ t.Fatalf("Unmarshal to OldMessage: %v", err)
+ }
+ exp := &OldMessage{
+ Nested: &OldMessage_Nested{
+ Name: String("Nigel"),
+ // normal protocol buffer users should not do this
+ XXX_unrecognized: []byte("\x12\x05carbs"),
+ },
+ }
+ if !Equal(om, exp) {
+ t.Errorf("om = %v, want %v", om, exp)
+ }
+
+ // Clone the OldMessage.
+ om = Clone(om).(*OldMessage)
+ if !Equal(om, exp) {
+ t.Errorf("Clone(om) = %v, want %v", om, exp)
+ }
+
+ // Marshal the OldMessage, then unmarshal it into an empty NewMessage.
+ if b, err = Marshal(om); err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+ t.Logf("Marshal(%v) -> %q", om, b)
+ nm2 := new(NewMessage)
+ if err := Unmarshal(b, nm2); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ if !Equal(nm, nm2) {
+ t.Errorf("NewMessage round-trip: %v => %v", nm, nm2)
+ }
+}
+
+// Check that an int32 field can be upgraded to an int64 field.
+func TestNegativeInt32(t *testing.T) {
+ om := &OldMessage{
+ Num: Int32(-1),
+ }
+ b, err := Marshal(om)
+ if err != nil {
+ t.Fatalf("Marshal of OldMessage: %v", err)
+ }
+
+ // Check the size. It should be 11 bytes;
+ // 1 for the field/wire type, and 10 for the negative number.
+ if len(b) != 11 {
+ t.Errorf("%v marshaled as %q, wanted 11 bytes", om, b)
+ }
+
+ // Unmarshal into a NewMessage.
+ nm := new(NewMessage)
+ if err := Unmarshal(b, nm); err != nil {
+ t.Fatalf("Unmarshal to NewMessage: %v", err)
+ }
+ want := &NewMessage{
+ Num: Int64(-1),
+ }
+ if !Equal(nm, want) {
+ t.Errorf("nm = %v, want %v", nm, want)
+ }
+}
+
+// Check that we can grow an array (repeated field) to have many elements.
+// This test doesn't depend only on our encoding; for variety, it makes sure
+// we create, encode, and decode the correct contents explicitly. It's therefore
+// a bit messier.
+// This test also uses (and hence tests) the Marshal/Unmarshal functions
+// instead of the methods.
+func TestBigRepeated(t *testing.T) {
+ pb := initGoTest(true)
+
+ // Create the arrays
+ const N = 50 // Internally the library starts much smaller.
+ pb.Repeatedgroup = make([]*GoTest_RepeatedGroup, N)
+ pb.F_Sint64Repeated = make([]int64, N)
+ pb.F_Sint32Repeated = make([]int32, N)
+ pb.F_BytesRepeated = make([][]byte, N)
+ pb.F_StringRepeated = make([]string, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ pb.F_FloatRepeated = make([]float32, N)
+ pb.F_Uint64Repeated = make([]uint64, N)
+ pb.F_Uint32Repeated = make([]uint32, N)
+ pb.F_Fixed64Repeated = make([]uint64, N)
+ pb.F_Fixed32Repeated = make([]uint32, N)
+ pb.F_Int64Repeated = make([]int64, N)
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_BoolRepeated = make([]bool, N)
+ pb.RepeatedField = make([]*GoTestField, N)
+
+ // Fill in the arrays with checkable values.
+ igtf := initGoTestField()
+ igtrg := initGoTest_RepeatedGroup()
+ for i := 0; i < N; i++ {
+ pb.Repeatedgroup[i] = igtrg
+ pb.F_Sint64Repeated[i] = int64(i)
+ pb.F_Sint32Repeated[i] = int32(i)
+ s := fmt.Sprint(i)
+ pb.F_BytesRepeated[i] = []byte(s)
+ pb.F_StringRepeated[i] = s
+ pb.F_DoubleRepeated[i] = float64(i)
+ pb.F_FloatRepeated[i] = float32(i)
+ pb.F_Uint64Repeated[i] = uint64(i)
+ pb.F_Uint32Repeated[i] = uint32(i)
+ pb.F_Fixed64Repeated[i] = uint64(i)
+ pb.F_Fixed32Repeated[i] = uint32(i)
+ pb.F_Int64Repeated[i] = int64(i)
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_BoolRepeated[i] = i%2 == 0
+ pb.RepeatedField[i] = igtf
+ }
+
+ // Marshal.
+ buf, _ := Marshal(pb)
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ Unmarshal(buf, pbd)
+
+ // Check the checkable values
+ for i := uint64(0); i < N; i++ {
+ if pbd.Repeatedgroup[i] == nil { // TODO: more checking?
+ t.Error("pbd.Repeatedgroup bad")
+ }
+ var x uint64
+ x = uint64(pbd.F_Sint64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Sint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Sint32Repeated bad", x, i)
+ }
+ s := fmt.Sprint(i)
+ equalbytes(pbd.F_BytesRepeated[i], []byte(s), t)
+ if pbd.F_StringRepeated[i] != s {
+ t.Error("pbd.F_Sint32Repeated bad", pbd.F_StringRepeated[i], i)
+ }
+ x = uint64(pbd.F_DoubleRepeated[i])
+ if x != i {
+ t.Error("pbd.F_DoubleRepeated bad", x, i)
+ }
+ x = uint64(pbd.F_FloatRepeated[i])
+ if x != i {
+ t.Error("pbd.F_FloatRepeated bad", x, i)
+ }
+ x = pbd.F_Uint64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Uint64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Uint32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Uint32Repeated bad", x, i)
+ }
+ x = pbd.F_Fixed64Repeated[i]
+ if x != i {
+ t.Error("pbd.F_Fixed64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Fixed32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Fixed32Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int64Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int64Repeated bad", x, i)
+ }
+ x = uint64(pbd.F_Int32Repeated[i])
+ if x != i {
+ t.Error("pbd.F_Int32Repeated bad", x, i)
+ }
+ if pbd.F_BoolRepeated[i] != (i%2 == 0) {
+ t.Error("pbd.F_BoolRepeated bad", x, i)
+ }
+ if pbd.RepeatedField[i] == nil { // TODO: more checking?
+ t.Error("pbd.RepeatedField bad")
+ }
+ }
+}
+
+// Verify we give a useful message when decoding to the wrong structure type.
+func TestTypeMismatch(t *testing.T) {
+ pb1 := initGoTest(true)
+
+ // Marshal
+ o := old()
+ o.Marshal(pb1)
+
+ // Now Unmarshal it to the wrong type.
+ pb2 := initGoTestField()
+ err := o.Unmarshal(pb2)
+ if err == nil {
+ t.Error("expected error, got no error")
+ } else if !strings.Contains(err.Error(), "bad wiretype") {
+ t.Error("expected bad wiretype error, got", err)
+ }
+}
+
+func encodeDecode(t *testing.T, in, out Message, msg string) {
+ buf, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("failed marshaling %v: %v", msg, err)
+ }
+ if err := Unmarshal(buf, out); err != nil {
+ t.Fatalf("failed unmarshaling %v: %v", msg, err)
+ }
+}
+
+func TestPackedNonPackedDecoderSwitching(t *testing.T) {
+ np, p := new(NonPackedTest), new(PackedTest)
+
+ // non-packed -> packed
+ np.A = []int32{0, 1, 1, 2, 3, 5}
+ encodeDecode(t, np, p, "non-packed -> packed")
+ if !reflect.DeepEqual(np.A, p.B) {
+ t.Errorf("failed non-packed -> packed; np.A=%+v, p.B=%+v", np.A, p.B)
+ }
+
+ // packed -> non-packed
+ np.Reset()
+ p.B = []int32{3, 1, 4, 1, 5, 9}
+ encodeDecode(t, p, np, "packed -> non-packed")
+ if !reflect.DeepEqual(p.B, np.A) {
+ t.Errorf("failed packed -> non-packed; p.B=%+v, np.A=%+v", p.B, np.A)
+ }
+}
+
+func TestProto1RepeatedGroup(t *testing.T) {
+ pb := &MessageList{
+ Message: []*MessageList_Message{
+ {
+ Name: String("blah"),
+ Count: Int32(7),
+ },
+ // NOTE: pb.Message[1] is a nil
+ nil,
+ },
+ }
+
+ o := old()
+ if err := o.Marshal(pb); err != ErrRepeatedHasNil {
+ t.Fatalf("unexpected or no error when marshaling: %v", err)
+ }
+}
+
+// Test that enums work. Checks for a bug introduced by making enums
+// named types instead of int32: newInt32FromUint64 would crash with
+// a type mismatch in reflect.PointTo.
+func TestEnum(t *testing.T) {
+ pb := new(GoEnum)
+ pb.Foo = FOO_FOO1.Enum()
+ o := old()
+ if err := o.Marshal(pb); err != nil {
+ t.Fatal("error encoding enum:", err)
+ }
+ pb1 := new(GoEnum)
+ if err := o.Unmarshal(pb1); err != nil {
+ t.Fatal("error decoding enum:", err)
+ }
+ if *pb1.Foo != FOO_FOO1 {
+ t.Error("expected 7 but got ", *pb1.Foo)
+ }
+}
+
+// Enum types have String methods. Check that enum fields can be printed.
+// We don't care what the value actually is, just as long as it doesn't crash.
+func TestPrintingNilEnumFields(t *testing.T) {
+ pb := new(GoEnum)
+ fmt.Sprintf("%+v", pb)
+}
+
+// Verify that absent required fields cause Marshal/Unmarshal to return errors.
+func TestRequiredFieldEnforcement(t *testing.T) {
+ pb := new(GoTestField)
+ _, err := Marshal(pb)
+ if err == nil {
+ t.Error("marshal: expected error, got nil")
+ } else if strings.Index(err.Error(), "Label") < 0 {
+ t.Errorf("marshal: bad error type: %v", err)
+ }
+
+ // A slightly sneaky, yet valid, proto. It encodes the same required field twice,
+ // so simply counting the required fields is insufficient.
+ // field 1, encoding 2, value "hi"
+ buf := []byte("\x0A\x02hi\x0A\x02hi")
+ err = Unmarshal(buf, pb)
+ if err == nil {
+ t.Error("unmarshal: expected error, got nil")
+ } else if strings.Index(err.Error(), "{Unknown}") < 0 {
+ t.Errorf("unmarshal: bad error type: %v", err)
+ }
+}
+
+func TestTypedNilMarshal(t *testing.T) {
+ // A typed nil should return ErrNil and not crash.
+ _, err := Marshal((*GoEnum)(nil))
+ if err != ErrNil {
+ t.Errorf("Marshal: got err %v, want ErrNil", err)
+ }
+}
+
+// A type that implements the Marshaler interface, but is not nillable.
+type nonNillableInt uint64
+
+func (nni nonNillableInt) Marshal() ([]byte, error) {
+ return EncodeVarint(uint64(nni)), nil
+}
+
+type NNIMessage struct {
+ nni nonNillableInt
+}
+
+func (*NNIMessage) Reset() {}
+func (*NNIMessage) String() string { return "" }
+func (*NNIMessage) ProtoMessage() {}
+
+// A type that implements the Marshaler interface and is nillable.
+type nillableMessage struct {
+ x uint64
+}
+
+func (nm *nillableMessage) Marshal() ([]byte, error) {
+ return EncodeVarint(nm.x), nil
+}
+
+type NMMessage struct {
+ nm *nillableMessage
+}
+
+func (*NMMessage) Reset() {}
+func (*NMMessage) String() string { return "" }
+func (*NMMessage) ProtoMessage() {}
+
+// Verify a type that uses the Marshaler interface, but has a nil pointer.
+func TestNilMarshaler(t *testing.T) {
+ // Try a struct with a Marshaler field that is nil.
+ // It should be directly marshable.
+ nmm := new(NMMessage)
+ if _, err := Marshal(nmm); err != nil {
+ t.Error("unexpected error marshaling nmm: ", err)
+ }
+
+ // Try a struct with a Marshaler field that is not nillable.
+ nnim := new(NNIMessage)
+ nnim.nni = 7
+ var _ Marshaler = nnim.nni // verify it is truly a Marshaler
+ if _, err := Marshal(nnim); err != nil {
+ t.Error("unexpected error marshaling nnim: ", err)
+ }
+}
+
+func TestAllSetDefaults(t *testing.T) {
+ // Exercise SetDefaults with all scalar field types.
+ m := &Defaults{
+ // NaN != NaN, so override that here.
+ F_Nan: Float32(1.7),
+ }
+ expected := &Defaults{
+ F_Bool: Bool(true),
+ F_Int32: Int32(32),
+ F_Int64: Int64(64),
+ F_Fixed32: Uint32(320),
+ F_Fixed64: Uint64(640),
+ F_Uint32: Uint32(3200),
+ F_Uint64: Uint64(6400),
+ F_Float: Float32(314159),
+ F_Double: Float64(271828),
+ F_String: String(`hello, "world!"` + "\n"),
+ F_Bytes: []byte("Bignose"),
+ F_Sint32: Int32(-32),
+ F_Sint64: Int64(-64),
+ F_Enum: Defaults_GREEN.Enum(),
+ F_Pinf: Float32(float32(math.Inf(1))),
+ F_Ninf: Float32(float32(math.Inf(-1))),
+ F_Nan: Float32(1.7),
+ StrZero: String(""),
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("SetDefaults failed\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithSetField(t *testing.T) {
+ // Check that a set value is not overridden.
+ m := &Defaults{
+ F_Int32: Int32(12),
+ }
+ SetDefaults(m)
+ if v := m.GetF_Int32(); v != 12 {
+ t.Errorf("m.FInt32 = %v, want 12", v)
+ }
+}
+
+func TestSetDefaultsWithSubMessage(t *testing.T) {
+ m := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ },
+ }
+ expected := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("gopher"),
+ Port: Int32(4000),
+ },
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestSetDefaultsWithRepeatedSubMessage(t *testing.T) {
+ m := &MyMessage{
+ RepInner: []*InnerMessage{{}},
+ }
+ expected := &MyMessage{
+ RepInner: []*InnerMessage{{
+ Port: Int32(4000),
+ }},
+ }
+ SetDefaults(m)
+ if !Equal(m, expected) {
+ t.Errorf("\n got %v\nwant %v", m, expected)
+ }
+}
+
+func TestMaximumTagNumber(t *testing.T) {
+ m := &MaxTag{
+ LastField: String("natural goat essence"),
+ }
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal failed: %v", err)
+ }
+ m2 := new(MaxTag)
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Fatalf("proto.Unmarshal failed: %v", err)
+ }
+ if got, want := m2.GetLastField(), *m.LastField; got != want {
+ t.Errorf("got %q, want %q", got, want)
+ }
+}
+
+func TestJSON(t *testing.T) {
+ m := &MyMessage{
+ Count: Int32(4),
+ Pet: []string{"bunny", "kitty"},
+ Inner: &InnerMessage{
+ Host: String("cauchy"),
+ },
+ Bikeshed: MyMessage_GREEN.Enum(),
+ }
+ const expected = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":1}`
+
+ b, err := json.Marshal(m)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ s := string(b)
+ if s != expected {
+ t.Errorf("got %s\nwant %s", s, expected)
+ }
+
+ received := new(MyMessage)
+ if err := json.Unmarshal(b, received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+
+ // Test unmarshalling of JSON with symbolic enum name.
+ const old = `{"count":4,"pet":["bunny","kitty"],"inner":{"host":"cauchy"},"bikeshed":"GREEN"}`
+ received.Reset()
+ if err := json.Unmarshal([]byte(old), received); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !Equal(received, m) {
+ t.Fatalf("got %s, want %s", received, m)
+ }
+}
+
+func TestBadWireType(t *testing.T) {
+ b := []byte{7<<3 | 6} // field 7, wire type 6
+ pb := new(OtherMessage)
+ if err := Unmarshal(b, pb); err == nil {
+ t.Errorf("Unmarshal did not fail")
+ } else if !strings.Contains(err.Error(), "unknown wire type") {
+ t.Errorf("wrong error: %v", err)
+ }
+}
+
+func TestBytesWithInvalidLength(t *testing.T) {
+ // If a byte sequence has an invalid (negative) length, Unmarshal should not panic.
+ b := []byte{2<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestLengthOverflow(t *testing.T) {
+ // Overflowing a length should not panic.
+ b := []byte{2<<3 | WireBytes, 1, 1, 3<<3 | WireBytes, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x7f, 0x01}
+ Unmarshal(b, new(MyMessage))
+}
+
+func TestVarintOverflow(t *testing.T) {
+ // Overflowing a 64-bit length should not be allowed.
+ b := []byte{1<<3 | WireVarint, 0x01, 3<<3 | WireBytes, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x01}
+ if err := Unmarshal(b, new(MyMessage)); err == nil {
+ t.Fatalf("Overflowed uint64 length without error")
+ }
+}
+
+func TestUnmarshalFuzz(t *testing.T) {
+ const N = 1000
+ seed := time.Now().UnixNano()
+ t.Logf("RNG seed is %d", seed)
+ rng := rand.New(rand.NewSource(seed))
+ buf := make([]byte, 20)
+ for i := 0; i < N; i++ {
+ for j := range buf {
+ buf[j] = byte(rng.Intn(256))
+ }
+ fuzzUnmarshal(t, buf)
+ }
+}
+
+func TestMergeMessages(t *testing.T) {
+ pb := &MessageList{Message: []*MessageList_Message{{Name: String("x"), Count: Int32(1)}}}
+ data, err := Marshal(pb)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ pb1 := new(MessageList)
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("first Unmarshal: %v", err)
+ }
+ if err := Unmarshal(data, pb1); err != nil {
+ t.Fatalf("second Unmarshal: %v", err)
+ }
+ if len(pb1.Message) != 1 {
+ t.Errorf("two Unmarshals produced %d Messages, want 1", len(pb1.Message))
+ }
+
+ pb2 := new(MessageList)
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("first UnmarshalMerge: %v", err)
+ }
+ if err := UnmarshalMerge(data, pb2); err != nil {
+ t.Fatalf("second UnmarshalMerge: %v", err)
+ }
+ if len(pb2.Message) != 2 {
+ t.Errorf("two UnmarshalMerges produced %d Messages, want 2", len(pb2.Message))
+ }
+}
+
+func TestExtensionMarshalOrder(t *testing.T) {
+ m := &MyMessage{Count: Int(123)}
+ if err := SetExtension(m, E_Ext_More, &Ext{Data: String("alpha")}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Text, String("aleph")); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ if err := SetExtension(m, E_Ext_Number, Int32(1)); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ var orig []byte
+ for i := 0; i < 100; i++ {
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if i == 0 {
+ orig = b
+ continue
+ }
+ if !bytes.Equal(b, orig) {
+ t.Errorf("Bytes differ on attempt #%d", i)
+ }
+ }
+}
+
+// Many extensions, because small maps might not iterate differently on each iteration.
+var exts = []*ExtensionDesc{
+ E_X201,
+ E_X202,
+ E_X203,
+ E_X204,
+ E_X205,
+ E_X206,
+ E_X207,
+ E_X208,
+ E_X209,
+ E_X210,
+ E_X211,
+ E_X212,
+ E_X213,
+ E_X214,
+ E_X215,
+ E_X216,
+ E_X217,
+ E_X218,
+ E_X219,
+ E_X220,
+ E_X221,
+ E_X222,
+ E_X223,
+ E_X224,
+ E_X225,
+ E_X226,
+ E_X227,
+ E_X228,
+ E_X229,
+ E_X230,
+ E_X231,
+ E_X232,
+ E_X233,
+ E_X234,
+ E_X235,
+ E_X236,
+ E_X237,
+ E_X238,
+ E_X239,
+ E_X240,
+ E_X241,
+ E_X242,
+ E_X243,
+ E_X244,
+ E_X245,
+ E_X246,
+ E_X247,
+ E_X248,
+ E_X249,
+ E_X250,
+}
+
+func TestMessageSetMarshalOrder(t *testing.T) {
+ m := &MyMessageSet{}
+ for _, x := range exts {
+ if err := SetExtension(m, x, &Empty{}); err != nil {
+ t.Fatalf("SetExtension: %v", err)
+ }
+ }
+
+ buf, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // Serialize m several times, and check we get the same bytes each time.
+ for i := 0; i < 10; i++ {
+ b1, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ if !bytes.Equal(b1, buf) {
+ t.Errorf("Bytes differ on re-Marshal #%d", i)
+ }
+
+ m2 := &MyMessageSet{}
+ if err := Unmarshal(buf, m2); err != nil {
+ t.Errorf("Unmarshal: %v", err)
+ }
+ b2, err := Marshal(m2)
+ if err != nil {
+ t.Errorf("re-Marshal: %v", err)
+ }
+ if !bytes.Equal(b2, buf) {
+ t.Errorf("Bytes differ on round-trip #%d", i)
+ }
+ }
+}
+
+func TestUnmarshalMergesMessages(t *testing.T) {
+ // If a nested message occurs twice in the input,
+ // the fields should be merged when decoding.
+ a := &OtherMessage{
+ Key: Int64(123),
+ Inner: &InnerMessage{
+ Host: String("polhode"),
+ Port: Int32(1234),
+ },
+ }
+ aData, err := Marshal(a)
+ if err != nil {
+ t.Fatalf("Marshal(a): %v", err)
+ }
+ b := &OtherMessage{
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Connected: Bool(true),
+ },
+ }
+ bData, err := Marshal(b)
+ if err != nil {
+ t.Fatalf("Marshal(b): %v", err)
+ }
+ want := &OtherMessage{
+ Key: Int64(123),
+ Weight: Float32(1.2),
+ Inner: &InnerMessage{
+ Host: String("herpolhode"),
+ Port: Int32(1234),
+ Connected: Bool(true),
+ },
+ }
+ got := new(OtherMessage)
+ if err := Unmarshal(append(aData, bData...), got); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ if !Equal(got, want) {
+ t.Errorf("\n got %v\nwant %v", got, want)
+ }
+}
+
+func TestEncodingSizes(t *testing.T) {
+ tests := []struct {
+ m Message
+ n int
+ }{
+ {&Defaults{F_Int32: Int32(math.MaxInt32)}, 6},
+ {&Defaults{F_Int32: Int32(math.MinInt32)}, 11},
+ {&Defaults{F_Uint32: Uint32(uint32(math.MaxInt32) + 1)}, 6},
+ {&Defaults{F_Uint32: Uint32(math.MaxUint32)}, 6},
+ }
+ for _, test := range tests {
+ b, err := Marshal(test.m)
+ if err != nil {
+ t.Errorf("Marshal(%v): %v", test.m, err)
+ continue
+ }
+ if len(b) != test.n {
+ t.Errorf("Marshal(%v) yielded %d bytes, want %d bytes", test.m, len(b), test.n)
+ }
+ }
+}
+
+func TestRequiredNotSetError(t *testing.T) {
+ pb := initGoTest(false)
+ pb.RequiredField.Label = nil
+ pb.F_Int32Required = nil
+ pb.F_Int64Required = nil
+
+ expected := "0807" + // field 1, encoding 0, value 7
+ "2206" + "120474797065" + // field 4, encoding 2 (GoTestField)
+ "5001" + // field 10, encoding 0, value 1
+ "6d20000000" + // field 13, encoding 5, value 0x20
+ "714000000000000000" + // field 14, encoding 1, value 0x40
+ "78a019" + // field 15, encoding 0, value 0xca0 = 3232
+ "8001c032" + // field 16, encoding 0, value 0x1940 = 6464
+ "8d0100004a45" + // field 17, encoding 5, value 3232.0
+ "9101000000000040b940" + // field 18, encoding 1, value 6464.0
+ "9a0106" + "737472696e67" + // field 19, encoding 2, string "string"
+ "b304" + // field 70, encoding 3, start group
+ "ba0408" + "7265717569726564" + // field 71, encoding 2, string "required"
+ "b404" + // field 70, encoding 4, end group
+ "aa0605" + "6279746573" + // field 101, encoding 2, string "bytes"
+ "b0063f" + // field 102, encoding 0, 0x3f zigzag32
+ "b8067f" // field 103, encoding 0, 0x7f zigzag64
+
+ o := old()
+ bytes, err := Marshal(pb)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-1 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 1", bytes)
+ t.Fatalf("expected = %s", expected)
+ }
+
+ // Now test Unmarshal by recreating the original buffer.
+ pbd := new(GoTest)
+ err = Unmarshal(bytes, pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 {
+ t.Errorf("unmarshal wrong err msg: %v", err)
+ }
+ bytes, err = Marshal(pbd)
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err)
+ o.DebugPrint("", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+ if strings.Index(err.Error(), "RequiredField.Label") < 0 {
+ t.Errorf("marshal-2 wrong err msg: %v", err)
+ }
+ if !equal(bytes, expected, t) {
+ o.DebugPrint("neq 2", bytes)
+ t.Fatalf("string = %s", expected)
+ }
+}
+
+func fuzzUnmarshal(t *testing.T, data []byte) {
+ defer func() {
+ if e := recover(); e != nil {
+ t.Errorf("These bytes caused a panic: %+v", data)
+ t.Logf("Stack:\n%s", debug.Stack())
+ t.FailNow()
+ }
+ }()
+
+ pb := new(MyMessage)
+ Unmarshal(data, pb)
+}
+
+func TestMapFieldMarshal(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+
+ // b should be the concatenation of these three byte sequences in some order.
+ parts := []string{
+ "\n\a\b\x01\x12\x03Rob",
+ "\n\a\b\x04\x12\x03Ian",
+ "\n\b\b\x08\x12\x04Dave",
+ }
+ ok := false
+ for i := range parts {
+ for j := range parts {
+ if j == i {
+ continue
+ }
+ for k := range parts {
+ if k == i || k == j {
+ continue
+ }
+ try := parts[i] + parts[j] + parts[k]
+ if bytes.Equal(b, []byte(try)) {
+ ok = true
+ break
+ }
+ }
+ }
+ }
+ if !ok {
+ t.Fatalf("Incorrect Marshal output.\n got %q\nwant %q (or a permutation of that)", b, parts[0]+parts[1]+parts[2])
+ }
+ t.Logf("FYI b: %q", b)
+
+ (new(Buffer)).DebugPrint("Dump of b", b)
+}
+
+func TestMapFieldRoundTrips(t *testing.T) {
+ m := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Rob",
+ 4: "Ian",
+ 8: "Dave",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ 0x7001: &FloatingPoint{F: Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{
+ false: []byte("that's not right!"),
+ true: []byte("aye, 'tis true!"),
+ },
+ }
+ b, err := Marshal(m)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("FYI b: %q", b)
+ m2 := new(MessageWithMap)
+ if err := Unmarshal(b, m2); err != nil {
+ t.Fatalf("Unmarshal: %v", err)
+ }
+ for _, pair := range [][2]interface{}{
+ {m.NameMapping, m2.NameMapping},
+ {m.MsgMapping, m2.MsgMapping},
+ {m.ByteMapping, m2.ByteMapping},
+ } {
+ if !reflect.DeepEqual(pair[0], pair[1]) {
+ t.Errorf("Map did not survive a round trip.\ninitial: %v\n final: %v", pair[0], pair[1])
+ }
+ }
+}
+
+// Benchmarks
+
+func testMsg() *GoTest {
+ pb := initGoTest(true)
+ const N = 1000 // Internally the library starts much smaller.
+ pb.F_Int32Repeated = make([]int32, N)
+ pb.F_DoubleRepeated = make([]float64, N)
+ for i := 0; i < N; i++ {
+ pb.F_Int32Repeated[i] = int32(i)
+ pb.F_DoubleRepeated[i] = float64(i)
+ }
+ return pb
+}
+
+func bytesMsg() *GoTest {
+ pb := initGoTest(true)
+ buf := make([]byte, 4000)
+ for i := range buf {
+ buf[i] = byte(i)
+ }
+ pb.F_BytesDefaulted = buf
+ return pb
+}
+
+func benchmarkMarshal(b *testing.B, pb Message, marshal func(Message) ([]byte, error)) {
+ d, _ := marshal(pb)
+ b.SetBytes(int64(len(d)))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ marshal(pb)
+ }
+}
+
+func benchmarkBufferMarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ p.Reset()
+ err := p.Marshal(pb0)
+ return p.Bytes(), err
+ })
+}
+
+func benchmarkSize(b *testing.B, pb Message) {
+ benchmarkMarshal(b, pb, func(pb0 Message) ([]byte, error) {
+ Size(pb)
+ return nil, nil
+ })
+}
+
+func newOf(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+ return reflect.New(in.Type().Elem()).Interface().(Message)
+}
+
+func benchmarkUnmarshal(b *testing.B, pb Message, unmarshal func([]byte, Message) error) {
+ d, _ := Marshal(pb)
+ b.SetBytes(int64(len(d)))
+ pbd := newOf(pb)
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ unmarshal(d, pbd)
+ }
+}
+
+func benchmarkBufferUnmarshal(b *testing.B, pb Message) {
+ p := NewBuffer(nil)
+ benchmarkUnmarshal(b, pb, func(d []byte, pb0 Message) error {
+ p.SetBuf(d)
+ return p.Unmarshal(pb0)
+ })
+}
+
+// Benchmark{Marshal,BufferMarshal,Size,Unmarshal,BufferUnmarshal}{,Bytes}
+
+func BenchmarkMarshal(b *testing.B) {
+ benchmarkMarshal(b, testMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshal(b *testing.B) {
+ benchmarkBufferMarshal(b, testMsg())
+}
+
+func BenchmarkSize(b *testing.B) {
+ benchmarkSize(b, testMsg())
+}
+
+func BenchmarkUnmarshal(b *testing.B) {
+ benchmarkUnmarshal(b, testMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshal(b *testing.B) {
+ benchmarkBufferUnmarshal(b, testMsg())
+}
+
+func BenchmarkMarshalBytes(b *testing.B) {
+ benchmarkMarshal(b, bytesMsg(), Marshal)
+}
+
+func BenchmarkBufferMarshalBytes(b *testing.B) {
+ benchmarkBufferMarshal(b, bytesMsg())
+}
+
+func BenchmarkSizeBytes(b *testing.B) {
+ benchmarkSize(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalBytes(b *testing.B) {
+ benchmarkUnmarshal(b, bytesMsg(), Unmarshal)
+}
+
+func BenchmarkBufferUnmarshalBytes(b *testing.B) {
+ benchmarkBufferUnmarshal(b, bytesMsg())
+}
+
+func BenchmarkUnmarshalUnrecognizedFields(b *testing.B) {
+ b.StopTimer()
+ pb := initGoTestField()
+ skip := &GoSkipTest{
+ SkipInt32: Int32(32),
+ SkipFixed32: Uint32(3232),
+ SkipFixed64: Uint64(6464),
+ SkipString: String("skipper"),
+ Skipgroup: &GoSkipTest_SkipGroup{
+ GroupInt32: Int32(75),
+ GroupString: String("wxyz"),
+ },
+ }
+
+ pbd := new(GoTestField)
+ p := NewBuffer(nil)
+ p.Marshal(pb)
+ p.Marshal(skip)
+ p2 := NewBuffer(nil)
+
+ b.StartTimer()
+ for i := 0; i < b.N; i++ {
+ p2.SetBuf(p.Bytes())
+ p2.Unmarshal(pbd)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
new file mode 100644
index 000000000000..ae276fd77c0d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone.go
@@ -0,0 +1,197 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer deep copy.
+// TODO: MessageSet and RawMessage.
+
+package proto
+
+import (
+ "log"
+ "reflect"
+ "strings"
+)
+
+// Clone returns a deep copy of a protocol buffer.
+func Clone(pb Message) Message {
+ in := reflect.ValueOf(pb)
+ if in.IsNil() {
+ return pb
+ }
+
+ out := reflect.New(in.Type().Elem())
+ // out is empty so a merge is a deep copy.
+ mergeStruct(out.Elem(), in.Elem())
+ return out.Interface().(Message)
+}
+
+// Merge merges src into dst.
+// Required and optional fields that are set in src will be set to that value in dst.
+// Elements of repeated fields will be appended.
+// Merge panics if src and dst are not the same type, or if dst is nil.
+func Merge(dst, src Message) {
+ in := reflect.ValueOf(src)
+ out := reflect.ValueOf(dst)
+ if out.IsNil() {
+ panic("proto: nil destination")
+ }
+ if in.Type() != out.Type() {
+ // Explicit test prior to mergeStruct so that mistyped nils will fail
+ panic("proto: type mismatch")
+ }
+ if in.IsNil() {
+ // Merging nil into non-nil is a quiet no-op
+ return
+ }
+ mergeStruct(out.Elem(), in.Elem())
+}
+
+func mergeStruct(out, in reflect.Value) {
+ for i := 0; i < in.NumField(); i++ {
+ f := in.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ mergeAny(out.Field(i), in.Field(i))
+ }
+
+ if emIn, ok := in.Addr().Interface().(extendableProto); ok {
+ emOut := out.Addr().Interface().(extendableProto)
+ mergeExtension(emOut.ExtensionMap(), emIn.ExtensionMap())
+ }
+
+ uf := in.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return
+ }
+ uin := uf.Bytes()
+ if len(uin) > 0 {
+ out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...))
+ }
+}
+
+func mergeAny(out, in reflect.Value) {
+ if in.Type() == protoMessageType {
+ if !in.IsNil() {
+ if out.IsNil() {
+ out.Set(reflect.ValueOf(Clone(in.Interface().(Message))))
+ } else {
+ Merge(out.Interface().(Message), in.Interface().(Message))
+ }
+ }
+ return
+ }
+ switch in.Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(in)
+ case reflect.Map:
+ if in.Len() == 0 {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.MakeMap(in.Type()))
+ }
+ // For maps with value types of *T or []byte we need to deep copy each value.
+ elemKind := in.Type().Elem().Kind()
+ for _, key := range in.MapKeys() {
+ var val reflect.Value
+ switch elemKind {
+ case reflect.Ptr:
+ val = reflect.New(in.Type().Elem().Elem())
+ mergeAny(val, in.MapIndex(key))
+ case reflect.Slice:
+ val = in.MapIndex(key)
+ val = reflect.ValueOf(append([]byte{}, val.Bytes()...))
+ default:
+ val = in.MapIndex(key)
+ }
+ out.SetMapIndex(key, val)
+ }
+ case reflect.Ptr:
+ if in.IsNil() {
+ return
+ }
+ if out.IsNil() {
+ out.Set(reflect.New(in.Elem().Type()))
+ }
+ mergeAny(out.Elem(), in.Elem())
+ case reflect.Slice:
+ if in.IsNil() {
+ return
+ }
+ if in.Type().Elem().Kind() == reflect.Uint8 {
+ // []byte is a scalar bytes field, not a repeated field.
+ // Make a deep copy.
+ // Append to []byte{} instead of []byte(nil) so that we never end up
+ // with a nil result.
+ out.SetBytes(append([]byte{}, in.Bytes()...))
+ return
+ }
+ n := in.Len()
+ if out.IsNil() {
+ out.Set(reflect.MakeSlice(in.Type(), 0, n))
+ }
+ switch in.Type().Elem().Kind() {
+ case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64,
+ reflect.String, reflect.Uint32, reflect.Uint64:
+ out.Set(reflect.AppendSlice(out, in))
+ default:
+ for i := 0; i < n; i++ {
+ x := reflect.Indirect(reflect.New(in.Type().Elem()))
+ mergeAny(x, in.Index(i))
+ out.Set(reflect.Append(out, x))
+ }
+ }
+ case reflect.Struct:
+ mergeStruct(out, in)
+ default:
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to copy %v", in)
+ }
+}
+
+func mergeExtension(out, in map[int32]Extension) {
+ for extNum, eIn := range in {
+ eOut := Extension{desc: eIn.desc}
+ if eIn.value != nil {
+ v := reflect.New(reflect.TypeOf(eIn.value)).Elem()
+ mergeAny(v, reflect.ValueOf(eIn.value))
+ eOut.value = v.Interface()
+ }
+ if eIn.enc != nil {
+ eOut.enc = make([]byte, len(eIn.enc))
+ copy(eOut.enc, eIn.enc)
+ }
+
+ out[extNum] = eOut
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
new file mode 100644
index 000000000000..1ac177d216d9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/clone_test.go
@@ -0,0 +1,227 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "./testdata"
+)
+
+var cloneTestMessage = &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+}
+
+func init() {
+ ext := &pb.Ext{
+ Data: proto.String("extension"),
+ }
+ if err := proto.SetExtension(cloneTestMessage, pb.E_Ext_More, ext); err != nil {
+ panic("SetExtension: " + err.Error())
+ }
+}
+
+func TestClone(t *testing.T) {
+ m := proto.Clone(cloneTestMessage).(*pb.MyMessage)
+ if !proto.Equal(m, cloneTestMessage) {
+ t.Errorf("Clone(%v) = %v", cloneTestMessage, m)
+ }
+
+ // Verify it was a deep copy.
+ *m.Inner.Port++
+ if proto.Equal(m, cloneTestMessage) {
+ t.Error("Mutating clone changed the original")
+ }
+ // Byte fields and repeated fields should be copied.
+ if &m.Pet[0] == &cloneTestMessage.Pet[0] {
+ t.Error("Pet: repeated field not copied")
+ }
+ if &m.Others[0] == &cloneTestMessage.Others[0] {
+ t.Error("Others: repeated field not copied")
+ }
+ if &m.Others[0].Value[0] == &cloneTestMessage.Others[0].Value[0] {
+ t.Error("Others[0].Value: bytes field not copied")
+ }
+ if &m.RepBytes[0] == &cloneTestMessage.RepBytes[0] {
+ t.Error("RepBytes: repeated field not copied")
+ }
+ if &m.RepBytes[0][0] == &cloneTestMessage.RepBytes[0][0] {
+ t.Error("RepBytes[0]: bytes field not copied")
+ }
+}
+
+func TestCloneNil(t *testing.T) {
+ var m *pb.MyMessage
+ if c := proto.Clone(m); !proto.Equal(m, c) {
+ t.Errorf("Clone(%v) = %v", m, c)
+ }
+}
+
+var mergeTests = []struct {
+ src, dst, want proto.Message
+}{
+ {
+ src: &pb.MyMessage{
+ Count: proto.Int32(42),
+ },
+ dst: &pb.MyMessage{
+ Name: proto.String("Dave"),
+ },
+ want: &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ },
+ Pet: []string{"horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ dst: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("niles"),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {
+ // Explicitly test a src=nil field
+ Inner: nil,
+ },
+ },
+ },
+ want: &pb.MyMessage{
+ Inner: &pb.InnerMessage{
+ Host: proto.String("hey"),
+ Connected: proto.Bool(true),
+ Port: proto.Int32(9099),
+ },
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(31415926535),
+ },
+ {},
+ {
+ Value: []byte("some bytes"),
+ },
+ },
+ },
+ },
+ {
+ src: &pb.MyMessage{
+ RepBytes: [][]byte{[]byte("wow")},
+ },
+ dst: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham")},
+ },
+ want: &pb.MyMessage{
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(6),
+ },
+ RepBytes: [][]byte{[]byte("sham"), []byte("wow")},
+ },
+ },
+ // Check that a scalar bytes field replaces rather than appends.
+ {
+ src: &pb.OtherMessage{Value: []byte("foo")},
+ dst: &pb.OtherMessage{Value: []byte("bar")},
+ want: &pb.OtherMessage{Value: []byte("foo")},
+ },
+ {
+ src: &pb.MessageWithMap{
+ NameMapping: map[int32]string{6: "Nigel"},
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ dst: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Bruce", // should be overwritten
+ 7: "Andrew",
+ },
+ },
+ want: &pb.MessageWithMap{
+ NameMapping: map[int32]string{
+ 6: "Nigel",
+ 7: "Andrew",
+ },
+ MsgMapping: map[int64]*pb.FloatingPoint{
+ 0x4001: &pb.FloatingPoint{F: proto.Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{true: []byte("wowsa")},
+ },
+ },
+}
+
+func TestMerge(t *testing.T) {
+ for _, m := range mergeTests {
+ got := proto.Clone(m.dst)
+ proto.Merge(got, m.src)
+ if !proto.Equal(got, m.want) {
+ t.Errorf("Merge(%v, %v)\n got %v\nwant %v\n", m.dst, m.src, got, m.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
new file mode 100644
index 000000000000..88622c305a3b
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/decode.go
@@ -0,0 +1,823 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for decoding protocol buffer data to construct in-memory representations.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+)
+
+// errOverflow is returned when an integer is too large to be represented.
+var errOverflow = errors.New("proto: integer overflow")
+
+// The fundamental decoders that interpret bytes on the wire.
+// Those that take integer types all return uint64 and are
+// therefore of type valueDecoder.
+
+// DecodeVarint reads a varint-encoded integer from the slice.
+// It returns the integer and the number of bytes consumed, or
+// zero if there is not enough.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func DecodeVarint(buf []byte) (x uint64, n int) {
+ // x, n already 0
+ for shift := uint(0); shift < 64; shift += 7 {
+ if n >= len(buf) {
+ return 0, 0
+ }
+ b := uint64(buf[n])
+ n++
+ x |= (b & 0x7F) << shift
+ if (b & 0x80) == 0 {
+ return x, n
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ return 0, 0
+}
+
+// DecodeVarint reads a varint-encoded integer from the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) DecodeVarint() (x uint64, err error) {
+ // x, err already 0
+
+ i := p.index
+ l := len(p.buf)
+
+ for shift := uint(0); shift < 64; shift += 7 {
+ if i >= l {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ b := p.buf[i]
+ i++
+ x |= (uint64(b) & 0x7F) << shift
+ if b < 0x80 {
+ p.index = i
+ return
+ }
+ }
+
+ // The number is too large to represent in a 64-bit value.
+ err = errOverflow
+ return
+}
+
+// DecodeFixed64 reads a 64-bit integer from the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) DecodeFixed64() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 8
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-8])
+ x |= uint64(p.buf[i-7]) << 8
+ x |= uint64(p.buf[i-6]) << 16
+ x |= uint64(p.buf[i-5]) << 24
+ x |= uint64(p.buf[i-4]) << 32
+ x |= uint64(p.buf[i-3]) << 40
+ x |= uint64(p.buf[i-2]) << 48
+ x |= uint64(p.buf[i-1]) << 56
+ return
+}
+
+// DecodeFixed32 reads a 32-bit integer from the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) DecodeFixed32() (x uint64, err error) {
+ // x, err already 0
+ i := p.index + 4
+ if i < 0 || i > len(p.buf) {
+ err = io.ErrUnexpectedEOF
+ return
+ }
+ p.index = i
+
+ x = uint64(p.buf[i-4])
+ x |= uint64(p.buf[i-3]) << 8
+ x |= uint64(p.buf[i-2]) << 16
+ x |= uint64(p.buf[i-1]) << 24
+ return
+}
+
+// DecodeZigzag64 reads a zigzag-encoded 64-bit integer
+// from the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) DecodeZigzag64() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63)
+ return
+}
+
+// DecodeZigzag32 reads a zigzag-encoded 32-bit integer
+// from the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) DecodeZigzag32() (x uint64, err error) {
+ x, err = p.DecodeVarint()
+ if err != nil {
+ return
+ }
+ x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31))
+ return
+}
+
+// These are not ValueDecoders: they produce an array of bytes or a string.
+// bytes, embedded messages
+
+// DecodeRawBytes reads a count-delimited byte buffer from the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) {
+ n, err := p.DecodeVarint()
+ if err != nil {
+ return nil, err
+ }
+
+ nb := int(n)
+ if nb < 0 {
+ return nil, fmt.Errorf("proto: bad byte length %d", nb)
+ }
+ end := p.index + nb
+ if end < p.index || end > len(p.buf) {
+ return nil, io.ErrUnexpectedEOF
+ }
+
+ if !alloc {
+ // todo: check if can get more uses of alloc=false
+ buf = p.buf[p.index:end]
+ p.index += nb
+ return
+ }
+
+ buf = make([]byte, nb)
+ copy(buf, p.buf[p.index:])
+ p.index += nb
+ return
+}
+
+// DecodeStringBytes reads an encoded string from the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) DecodeStringBytes() (s string, err error) {
+ buf, err := p.DecodeRawBytes(false)
+ if err != nil {
+ return
+ }
+ return string(buf), nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+// If the protocol buffer has extensions, and the field matches, add it as an extension.
+// Otherwise, if the XXX_unrecognized field exists, append the skipped data there.
+func (o *Buffer) skipAndSave(t reflect.Type, tag, wire int, base structPointer, unrecField field) error {
+ oi := o.index
+
+ err := o.skip(t, tag, wire)
+ if err != nil {
+ return err
+ }
+
+ if !unrecField.IsValid() {
+ return nil
+ }
+
+ ptr := structPointer_Bytes(base, unrecField)
+
+ // Add the skipped field to struct field
+ obuf := o.buf
+
+ o.buf = *ptr
+ o.EncodeVarint(uint64(tag<<3 | wire))
+ *ptr = append(o.buf, obuf[oi:o.index]...)
+
+ o.buf = obuf
+
+ return nil
+}
+
+// Skip the next item in the buffer. Its wire type is decoded and presented as an argument.
+func (o *Buffer) skip(t reflect.Type, tag, wire int) error {
+
+ var u uint64
+ var err error
+
+ switch wire {
+ case WireVarint:
+ _, err = o.DecodeVarint()
+ case WireFixed64:
+ _, err = o.DecodeFixed64()
+ case WireBytes:
+ _, err = o.DecodeRawBytes(false)
+ case WireFixed32:
+ _, err = o.DecodeFixed32()
+ case WireStartGroup:
+ for {
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ fwire := int(u & 0x7)
+ if fwire == WireEndGroup {
+ break
+ }
+ ftag := int(u >> 3)
+ err = o.skip(t, ftag, fwire)
+ if err != nil {
+ break
+ }
+ }
+ default:
+ err = fmt.Errorf("proto: can't skip unknown wire type %d for %s", wire, t)
+ }
+ return err
+}
+
+// Unmarshaler is the interface representing objects that can
+// unmarshal themselves. The method should reset the receiver before
+// decoding starts. The argument points to data that may be
+// overwritten, so implementations should not keep references to the
+// buffer.
+type Unmarshaler interface {
+ Unmarshal([]byte) error
+}
+
+// Unmarshal parses the protocol buffer representation in buf and places the
+// decoded result in pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// Unmarshal resets pb before starting to unmarshal, so any
+// existing data in pb is always removed. Use UnmarshalMerge
+// to preserve and append to existing data.
+func Unmarshal(buf []byte, pb Message) error {
+ pb.Reset()
+ return UnmarshalMerge(buf, pb)
+}
+
+// UnmarshalMerge parses the protocol buffer representation in buf and
+// writes the decoded result to pb. If the struct underlying pb does not match
+// the data in buf, the results can be unpredictable.
+//
+// UnmarshalMerge merges into existing data in pb.
+// Most code should use Unmarshal instead.
+func UnmarshalMerge(buf []byte, pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ return u.Unmarshal(buf)
+ }
+ return NewBuffer(buf).Unmarshal(pb)
+}
+
+// Unmarshal parses the protocol buffer representation in the
+// Buffer and places the decoded result in pb. If the struct
+// underlying pb does not match the data in the buffer, the results can be
+// unpredictable.
+func (p *Buffer) Unmarshal(pb Message) error {
+ // If the object can unmarshal itself, let it.
+ if u, ok := pb.(Unmarshaler); ok {
+ err := u.Unmarshal(p.buf[p.index:])
+ p.index = len(p.buf)
+ return err
+ }
+
+ typ, base, err := getbase(pb)
+ if err != nil {
+ return err
+ }
+
+ err = p.unmarshalType(typ.Elem(), GetProperties(typ.Elem()), false, base)
+
+ if collectStats {
+ stats.Decode++
+ }
+
+ return err
+}
+
+// unmarshalType does the work of unmarshaling a structure.
+func (o *Buffer) unmarshalType(st reflect.Type, prop *StructProperties, is_group bool, base structPointer) error {
+ var state errorState
+ required, reqFields := prop.reqCount, uint64(0)
+
+ var err error
+ for err == nil && o.index < len(o.buf) {
+ oi := o.index
+ var u uint64
+ u, err = o.DecodeVarint()
+ if err != nil {
+ break
+ }
+ wire := int(u & 0x7)
+ if wire == WireEndGroup {
+ if is_group {
+ return nil // input is satisfied
+ }
+ return fmt.Errorf("proto: %s: wiretype end group for non-group", st)
+ }
+ tag := int(u >> 3)
+ if tag <= 0 {
+ return fmt.Errorf("proto: %s: illegal tag %d (wire type %d)", st, tag, wire)
+ }
+ fieldnum, ok := prop.decoderTags.get(tag)
+ if !ok {
+ // Maybe it's an extension?
+ if prop.extendable {
+ if e := structPointer_Interface(base, st).(extendableProto); isExtensionField(e, int32(tag)) {
+ if err = o.skip(st, tag, wire); err == nil {
+ ext := e.ExtensionMap()[int32(tag)] // may be missing
+ ext.enc = append(ext.enc, o.buf[oi:o.index]...)
+ e.ExtensionMap()[int32(tag)] = ext
+ }
+ continue
+ }
+ }
+ err = o.skipAndSave(st, tag, wire, base, prop.unrecField)
+ continue
+ }
+ p := prop.Prop[fieldnum]
+
+ if p.dec == nil {
+ fmt.Fprintf(os.Stderr, "proto: no protobuf decoder for %s.%s\n", st, st.Field(fieldnum).Name)
+ continue
+ }
+ dec := p.dec
+ if wire != WireStartGroup && wire != p.WireType {
+ if wire == WireBytes && p.packedDec != nil {
+ // a packable field
+ dec = p.packedDec
+ } else {
+ err = fmt.Errorf("proto: bad wiretype for field %s.%s: got wiretype %d, want %d", st, st.Field(fieldnum).Name, wire, p.WireType)
+ continue
+ }
+ }
+ decErr := dec(o, p, base)
+ if decErr != nil && !state.shouldContinue(decErr, p) {
+ err = decErr
+ }
+ if err == nil && p.Required {
+ // Successfully decoded a required field.
+ if tag <= 64 {
+ // use bitmap for fields 1-64 to catch field reuse.
+ var mask uint64 = 1 << uint64(tag-1)
+ if reqFields&mask == 0 {
+ // new required field
+ reqFields |= mask
+ required--
+ }
+ } else {
+ // This is imprecise. It can be fooled by a required field
+ // with a tag > 64 that is encoded twice; that's very rare.
+ // A fully correct implementation would require allocating
+ // a data structure, which we would like to avoid.
+ required--
+ }
+ }
+ }
+ if err == nil {
+ if is_group {
+ return io.ErrUnexpectedEOF
+ }
+ if state.err != nil {
+ return state.err
+ }
+ if required > 0 {
+ // Not enough information to determine the exact field. If we use extra
+ // CPU, we could determine the field only if the missing required field
+ // has a tag <= 64 and we check reqFields.
+ return &RequiredNotSetError{"{Unknown}"}
+ }
+ }
+ return err
+}
+
+// Individual type decoders
+// For each,
+// u is the decoded value,
+// v is a pointer to the field (pointer) in the struct
+
+// Sizes of the pools to allocate inside the Buffer.
+// The goal is modest amortization and allocation
+// on at least 16-byte boundaries.
+const (
+ boolPoolSize = 16
+ uint32PoolSize = 8
+ uint64PoolSize = 4
+)
+
+// Decode a bool.
+func (o *Buffer) dec_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ if len(o.bools) == 0 {
+ o.bools = make([]bool, boolPoolSize)
+ }
+ o.bools[0] = u != 0
+ *structPointer_Bool(base, p.field) = &o.bools[0]
+ o.bools = o.bools[1:]
+ return nil
+}
+
+func (o *Buffer) dec_proto3_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ *structPointer_BoolVal(base, p.field) = u != 0
+ return nil
+}
+
+// Decode an int32.
+func (o *Buffer) dec_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32_Set(structPointer_Word32(base, p.field), o, uint32(u))
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word32Val_Set(structPointer_Word32Val(base, p.field), uint32(u))
+ return nil
+}
+
+// Decode an int64.
+func (o *Buffer) dec_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64_Set(structPointer_Word64(base, p.field), o, u)
+ return nil
+}
+
+func (o *Buffer) dec_proto3_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ word64Val_Set(structPointer_Word64Val(base, p.field), o, u)
+ return nil
+}
+
+// Decode a string.
+func (o *Buffer) dec_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ sp := new(string)
+ *sp = s
+ *structPointer_String(base, p.field) = sp
+ return nil
+}
+
+func (o *Buffer) dec_proto3_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ *structPointer_StringVal(base, p.field) = s
+ return nil
+}
+
+// Decode a slice of bytes ([]byte).
+func (o *Buffer) dec_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ *structPointer_Bytes(base, p.field) = b
+ return nil
+}
+
+// Decode a slice of bools ([]bool).
+func (o *Buffer) dec_slice_bool(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BoolSlice(base, p.field)
+ *v = append(*v, u != 0)
+ return nil
+}
+
+// Decode a slice of bools ([]bool) in packed format.
+func (o *Buffer) dec_slice_packed_bool(p *Properties, base structPointer) error {
+ v := structPointer_BoolSlice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded bools
+
+ y := *v
+ for i := 0; i < nb; i++ {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ y = append(y, u != 0)
+ }
+
+ *v = y
+ return nil
+}
+
+// Decode a slice of int32s ([]int32).
+func (o *Buffer) dec_slice_int32(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ structPointer_Word32Slice(base, p.field).Append(uint32(u))
+ return nil
+}
+
+// Decode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) dec_slice_packed_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int32s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(uint32(u))
+ }
+ return nil
+}
+
+// Decode a slice of int64s ([]int64).
+func (o *Buffer) dec_slice_int64(p *Properties, base structPointer) error {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+
+ structPointer_Word64Slice(base, p.field).Append(u)
+ return nil
+}
+
+// Decode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) dec_slice_packed_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Slice(base, p.field)
+
+ nn, err := o.DecodeVarint()
+ if err != nil {
+ return err
+ }
+ nb := int(nn) // number of bytes of encoded int64s
+
+ fin := o.index + nb
+ if fin < o.index {
+ return errOverflow
+ }
+ for o.index < fin {
+ u, err := p.valDec(o)
+ if err != nil {
+ return err
+ }
+ v.Append(u)
+ }
+ return nil
+}
+
+// Decode a slice of strings ([]string).
+func (o *Buffer) dec_slice_string(p *Properties, base structPointer) error {
+ s, err := o.DecodeStringBytes()
+ if err != nil {
+ return err
+ }
+ v := structPointer_StringSlice(base, p.field)
+ *v = append(*v, s)
+ return nil
+}
+
+// Decode a slice of slice of bytes ([][]byte).
+func (o *Buffer) dec_slice_slice_byte(p *Properties, base structPointer) error {
+ b, err := o.DecodeRawBytes(true)
+ if err != nil {
+ return err
+ }
+ v := structPointer_BytesSlice(base, p.field)
+ *v = append(*v, b)
+ return nil
+}
+
+// Decode a map field.
+func (o *Buffer) dec_new_map(p *Properties, base structPointer) error {
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+ oi := o.index // index at the end of this map entry
+ o.index -= len(raw) // move buffer back to start of map entry
+
+ mptr := structPointer_Map(base, p.field, p.mtype) // *map[K]V
+ if mptr.Elem().IsNil() {
+ mptr.Elem().Set(reflect.MakeMap(mptr.Type().Elem()))
+ }
+ v := mptr.Elem() // map[K]V
+
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // See enc_new_map for why.
+ keyptr := reflect.New(reflect.PtrTo(p.mtype.Key())).Elem() // addressable *K
+ keybase := toStructPointer(keyptr.Addr()) // **K
+
+ var valbase structPointer
+ var valptr reflect.Value
+ switch p.mtype.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valptr = reflect.ValueOf(&dummy) // *[]byte
+ valbase = toStructPointer(valptr) // *[]byte
+ case reflect.Ptr:
+ // message; valptr is **Msg; need to allocate the intermediate pointer
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valptr.Set(reflect.New(valptr.Type().Elem()))
+ valbase = toStructPointer(valptr)
+ default:
+ // everything else
+ valptr = reflect.New(reflect.PtrTo(p.mtype.Elem())).Elem() // addressable *V
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+
+ // Decode.
+ // This parses a restricted wire format, namely the encoding of a message
+ // with two fields. See enc_new_map for the format.
+ for o.index < oi {
+ // tagcode for key and value properties are always a single byte
+ // because they have tags 1 and 2.
+ tagcode := o.buf[o.index]
+ o.index++
+ switch tagcode {
+ case p.mkeyprop.tagcode[0]:
+ if err := p.mkeyprop.dec(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ case p.mvalprop.tagcode[0]:
+ if err := p.mvalprop.dec(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ default:
+ // TODO: Should we silently skip this instead?
+ return fmt.Errorf("proto: bad map data tag %d", raw[0])
+ }
+ }
+
+ v.SetMapIndex(keyptr.Elem(), valptr.Elem())
+ return nil
+}
+
+// Decode a group.
+func (o *Buffer) dec_struct_group(p *Properties, base structPointer) error {
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+ return o.unmarshalType(p.stype, p.sprop, true, bas)
+}
+
+// Decode an embedded message.
+func (o *Buffer) dec_struct_message(p *Properties, base structPointer) (err error) {
+ raw, e := o.DecodeRawBytes(false)
+ if e != nil {
+ return e
+ }
+
+ bas := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(bas) {
+ // allocate new nested message
+ bas = toStructPointer(reflect.New(p.stype))
+ structPointer_SetStructPointer(base, p.field, bas)
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := structPointer_Interface(bas, p.stype)
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, false, bas)
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
+
+// Decode a slice of embedded messages.
+func (o *Buffer) dec_slice_struct_message(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, false, base)
+}
+
+// Decode a slice of embedded groups.
+func (o *Buffer) dec_slice_struct_group(p *Properties, base structPointer) error {
+ return o.dec_slice_struct(p, true, base)
+}
+
+// Decode a slice of structs ([]*struct).
+func (o *Buffer) dec_slice_struct(p *Properties, is_group bool, base structPointer) error {
+ v := reflect.New(p.stype)
+ bas := toStructPointer(v)
+ structPointer_StructPointerSlice(base, p.field).Append(bas)
+
+ if is_group {
+ err := o.unmarshalType(p.stype, p.sprop, is_group, bas)
+ return err
+ }
+
+ raw, err := o.DecodeRawBytes(false)
+ if err != nil {
+ return err
+ }
+
+ // If the object can unmarshal itself, let it.
+ if p.isUnmarshaler {
+ iv := v.Interface()
+ return iv.(Unmarshaler).Unmarshal(raw)
+ }
+
+ obuf := o.buf
+ oi := o.index
+ o.buf = raw
+ o.index = 0
+
+ err = o.unmarshalType(p.stype, p.sprop, is_group, bas)
+
+ o.buf = obuf
+ o.index = oi
+
+ return err
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
new file mode 100644
index 000000000000..1512d605b2dd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/encode.go
@@ -0,0 +1,1283 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// RequiredNotSetError is the error returned if Marshal is called with
+// a protocol buffer struct whose required fields have not
+// all been initialized. It is also the error returned if Unmarshal is
+// called with an encoded protocol buffer that does not include all the
+// required fields.
+//
+// When printed, RequiredNotSetError reports the first unset required field in a
+// message. If the field cannot be precisely determined, it is reported as
+// "{Unknown}".
+type RequiredNotSetError struct {
+ field string
+}
+
+func (e *RequiredNotSetError) Error() string {
+ return fmt.Sprintf("proto: required field %q not set", e.field)
+}
+
+var (
+ // ErrRepeatedHasNil is the error returned if Marshal is called with
+ // a struct with a repeated field containing a nil element.
+ ErrRepeatedHasNil = errors.New("proto: repeated field has nil element")
+
+ // ErrNil is the error returned if Marshal is called with nil.
+ ErrNil = errors.New("proto: Marshal called with nil")
+)
+
+// The fundamental encoders that put bytes on the wire.
+// Those that take integer types all accept uint64 and are
+// therefore of type valueEncoder.
+
+const maxVarintBytes = 10 // maximum length of a varint
+
+// EncodeVarint returns the varint encoding of x.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+// Not used by the package itself, but helpful to clients
+// wishing to use the same encoding.
+func EncodeVarint(x uint64) []byte {
+ var buf [maxVarintBytes]byte
+ var n int
+ for n = 0; x > 127; n++ {
+ buf[n] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ buf[n] = uint8(x)
+ n++
+ return buf[0:n]
+}
+
+// EncodeVarint writes a varint-encoded integer to the Buffer.
+// This is the format for the
+// int32, int64, uint32, uint64, bool, and enum
+// protocol buffer types.
+func (p *Buffer) EncodeVarint(x uint64) error {
+ for x >= 1<<7 {
+ p.buf = append(p.buf, uint8(x&0x7f|0x80))
+ x >>= 7
+ }
+ p.buf = append(p.buf, uint8(x))
+ return nil
+}
+
+func sizeVarint(x uint64) (n int) {
+ for {
+ n++
+ x >>= 7
+ if x == 0 {
+ break
+ }
+ }
+ return n
+}
+
+// EncodeFixed64 writes a 64-bit integer to the Buffer.
+// This is the format for the
+// fixed64, sfixed64, and double protocol buffer types.
+func (p *Buffer) EncodeFixed64(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24),
+ uint8(x>>32),
+ uint8(x>>40),
+ uint8(x>>48),
+ uint8(x>>56))
+ return nil
+}
+
+func sizeFixed64(x uint64) int {
+ return 8
+}
+
+// EncodeFixed32 writes a 32-bit integer to the Buffer.
+// This is the format for the
+// fixed32, sfixed32, and float protocol buffer types.
+func (p *Buffer) EncodeFixed32(x uint64) error {
+ p.buf = append(p.buf,
+ uint8(x),
+ uint8(x>>8),
+ uint8(x>>16),
+ uint8(x>>24))
+ return nil
+}
+
+func sizeFixed32(x uint64) int {
+ return 4
+}
+
+// EncodeZigzag64 writes a zigzag-encoded 64-bit integer
+// to the Buffer.
+// This is the format used for the sint64 protocol buffer type.
+func (p *Buffer) EncodeZigzag64(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+func sizeZigzag64(x uint64) int {
+ return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63))))
+}
+
+// EncodeZigzag32 writes a zigzag-encoded 32-bit integer
+// to the Buffer.
+// This is the format used for the sint32 protocol buffer type.
+func (p *Buffer) EncodeZigzag32(x uint64) error {
+ // use signed number to get arithmetic right shift.
+ return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+func sizeZigzag32(x uint64) int {
+ return sizeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31))))
+}
+
+// EncodeRawBytes writes a count-delimited byte buffer to the Buffer.
+// This is the format used for the bytes protocol buffer
+// type and for embedded messages.
+func (p *Buffer) EncodeRawBytes(b []byte) error {
+ p.EncodeVarint(uint64(len(b)))
+ p.buf = append(p.buf, b...)
+ return nil
+}
+
+func sizeRawBytes(b []byte) int {
+ return sizeVarint(uint64(len(b))) +
+ len(b)
+}
+
+// EncodeStringBytes writes an encoded string to the Buffer.
+// This is the format used for the proto2 string type.
+func (p *Buffer) EncodeStringBytes(s string) error {
+ p.EncodeVarint(uint64(len(s)))
+ p.buf = append(p.buf, s...)
+ return nil
+}
+
+func sizeStringBytes(s string) int {
+ return sizeVarint(uint64(len(s))) +
+ len(s)
+}
+
+// Marshaler is the interface representing objects that can marshal themselves.
+type Marshaler interface {
+ Marshal() ([]byte, error)
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, returning the data.
+func Marshal(pb Message) ([]byte, error) {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ return m.Marshal()
+ }
+ p := NewBuffer(nil)
+ err := p.Marshal(pb)
+ var state errorState
+ if err != nil && !state.shouldContinue(err, nil) {
+ return nil, err
+ }
+ if p.buf == nil && err == nil {
+ // Return a non-nil slice on success.
+ return []byte{}, nil
+ }
+ return p.buf, err
+}
+
+// Marshal takes the protocol buffer
+// and encodes it into the wire format, writing the result to the
+// Buffer.
+func (p *Buffer) Marshal(pb Message) error {
+ // Can the object marshal itself?
+ if m, ok := pb.(Marshaler); ok {
+ data, err := m.Marshal()
+ if err != nil {
+ return err
+ }
+ p.buf = append(p.buf, data...)
+ return nil
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return ErrNil
+ }
+ if err == nil {
+ err = p.enc_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Encode++
+ }
+
+ return err
+}
+
+// Size returns the encoded size of a protocol buffer.
+func Size(pb Message) (n int) {
+ // Can the object marshal itself? If so, Size is slow.
+ // TODO: add Size to Marshaler, or add a Sizer interface.
+ if m, ok := pb.(Marshaler); ok {
+ b, _ := m.Marshal()
+ return len(b)
+ }
+
+ t, base, err := getbase(pb)
+ if structPointer_IsNil(base) {
+ return 0
+ }
+ if err == nil {
+ n = size_struct(GetProperties(t.Elem()), base)
+ }
+
+ if collectStats {
+ stats.Size++
+ }
+
+ return
+}
+
+// Individual type encoders.
+
+// Encode a bool.
+func (o *Buffer) enc_bool(p *Properties, base structPointer) error {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := 0
+ if *v {
+ x = 1
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_bool(p *Properties, base structPointer) error {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, 1)
+ return nil
+}
+
+func size_bool(p *Properties, base structPointer) int {
+ v := *structPointer_Bool(base, p.field)
+ if v == nil {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+func size_proto3_bool(p *Properties, base structPointer) int {
+ v := *structPointer_BoolVal(base, p.field)
+ if !v {
+ return 0
+ }
+ return len(p.tagcode) + 1 // each bool takes exactly one byte
+}
+
+// Encode an int32.
+func (o *Buffer) enc_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := int32(word32_Get(v)) // permit sign extension to use full 64-bit range
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_int32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := int32(word32Val_Get(v)) // permit sign extension to use full 64-bit range
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode a uint32.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return ErrNil
+ }
+ x := word32_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func (o *Buffer) enc_proto3_uint32(p *Properties, base structPointer) error {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, uint64(x))
+ return nil
+}
+
+func size_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32(base, p.field)
+ if word32_IsNil(v) {
+ return 0
+ }
+ x := word32_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+func size_proto3_uint32(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word32Val(base, p.field)
+ x := word32Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(uint64(x))
+ return
+}
+
+// Encode an int64.
+func (o *Buffer) enc_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return ErrNil
+ }
+ x := word64_Get(v)
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_int64(p *Properties, base structPointer) error {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, x)
+ return nil
+}
+
+func size_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64(base, p.field)
+ if word64_IsNil(v) {
+ return 0
+ }
+ x := word64_Get(v)
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+func size_proto3_int64(p *Properties, base structPointer) (n int) {
+ v := structPointer_Word64Val(base, p.field)
+ x := word64Val_Get(v)
+ if x == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += p.valSize(x)
+ return
+}
+
+// Encode a string.
+func (o *Buffer) enc_string(p *Properties, base structPointer) error {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return ErrNil
+ }
+ x := *v
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(x)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_string(p *Properties, base structPointer) error {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(v)
+ return nil
+}
+
+func size_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_String(base, p.field)
+ if v == nil {
+ return 0
+ }
+ x := *v
+ n += len(p.tagcode)
+ n += sizeStringBytes(x)
+ return
+}
+
+func size_proto3_string(p *Properties, base structPointer) (n int) {
+ v := *structPointer_StringVal(base, p.field)
+ if v == "" {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeStringBytes(v)
+ return
+}
+
+// All protocol buffer fields are nillable, but be careful.
+func isNil(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ return v.IsNil()
+ }
+ return false
+}
+
+// Encode a message struct.
+func (o *Buffer) enc_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return ErrNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ return nil
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ return o.enc_len_struct(p.sprop, structp, &state)
+}
+
+func size_struct_message(p *Properties, base structPointer) int {
+ structp := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(structp) {
+ return 0
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n0 := len(p.tagcode)
+ n1 := sizeRawBytes(data)
+ return n0 + n1
+ }
+
+ n0 := len(p.tagcode)
+ n1 := size_struct(p.sprop, structp)
+ n2 := sizeVarint(uint64(n1)) // size of encoded length
+ return n0 + n1 + n2
+}
+
+// Encode a group struct.
+func (o *Buffer) enc_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return ErrNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ err := o.enc_struct(p.sprop, b)
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return state.err
+}
+
+func size_struct_group(p *Properties, base structPointer) (n int) {
+ b := structPointer_GetStructPointer(base, p.field)
+ if structPointer_IsNil(b) {
+ return 0
+ }
+
+ n += sizeVarint(uint64((p.Tag << 3) | WireStartGroup))
+ n += size_struct(p.sprop, b)
+ n += sizeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ return
+}
+
+// Encode a slice of bools ([]bool).
+func (o *Buffer) enc_slice_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ for _, x := range s {
+ o.buf = append(o.buf, p.tagcode...)
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_bool(p *Properties, base structPointer) int {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ return l * (len(p.tagcode) + 1) // each bool takes exactly one byte
+}
+
+// Encode a slice of bools ([]bool) in packed format.
+func (o *Buffer) enc_slice_packed_bool(p *Properties, base structPointer) error {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(l)) // each bool takes exactly one byte
+ for _, x := range s {
+ v := uint64(0)
+ if x {
+ v = 1
+ }
+ p.valEnc(o, v)
+ }
+ return nil
+}
+
+func size_slice_packed_bool(p *Properties, base structPointer) (n int) {
+ s := *structPointer_BoolSlice(base, p.field)
+ l := len(s)
+ if l == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(l))
+ n += l // each bool takes exactly one byte
+ return
+}
+
+// Encode a slice of bytes ([]byte).
+func (o *Buffer) enc_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func (o *Buffer) enc_proto3_slice_byte(p *Properties, base structPointer) error {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return ErrNil
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(s)
+ return nil
+}
+
+func size_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if s == nil {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+func size_proto3_slice_byte(p *Properties, base structPointer) (n int) {
+ s := *structPointer_Bytes(base, p.field)
+ if len(s) == 0 {
+ return 0
+ }
+ n += len(p.tagcode)
+ n += sizeRawBytes(s)
+ return
+}
+
+// Encode a slice of int32s ([]int32).
+func (o *Buffer) enc_slice_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of int32s ([]int32) in packed format.
+func (o *Buffer) enc_slice_packed_int32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ p.valEnc(buf, uint64(x))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ x := int32(s.Index(i)) // permit sign extension to use full 64-bit range
+ bufSize += p.valSize(uint64(x))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of uint32s ([]uint32).
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ x := s.Index(i)
+ p.valEnc(o, uint64(x))
+ }
+ return nil
+}
+
+func size_slice_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ x := s.Index(i)
+ n += p.valSize(uint64(x))
+ }
+ return
+}
+
+// Encode a slice of uint32s ([]uint32) in packed format.
+// Exactly the same as int32, except for no sign extension.
+func (o *Buffer) enc_slice_packed_uint32(p *Properties, base structPointer) error {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, uint64(s.Index(i)))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_uint32(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word32Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(uint64(s.Index(i)))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of int64s ([]int64).
+func (o *Buffer) enc_slice_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ p.valEnc(o, s.Index(i))
+ }
+ return nil
+}
+
+func size_slice_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ for i := 0; i < l; i++ {
+ n += len(p.tagcode)
+ n += p.valSize(s.Index(i))
+ }
+ return
+}
+
+// Encode a slice of int64s ([]int64) in packed format.
+func (o *Buffer) enc_slice_packed_int64(p *Properties, base structPointer) error {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return ErrNil
+ }
+ // TODO: Reuse a Buffer.
+ buf := NewBuffer(nil)
+ for i := 0; i < l; i++ {
+ p.valEnc(buf, s.Index(i))
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeVarint(uint64(len(buf.buf)))
+ o.buf = append(o.buf, buf.buf...)
+ return nil
+}
+
+func size_slice_packed_int64(p *Properties, base structPointer) (n int) {
+ s := structPointer_Word64Slice(base, p.field)
+ l := s.Len()
+ if l == 0 {
+ return 0
+ }
+ var bufSize int
+ for i := 0; i < l; i++ {
+ bufSize += p.valSize(s.Index(i))
+ }
+
+ n += len(p.tagcode)
+ n += sizeVarint(uint64(bufSize))
+ n += bufSize
+ return
+}
+
+// Encode a slice of slice of bytes ([][]byte).
+func (o *Buffer) enc_slice_slice_byte(p *Properties, base structPointer) error {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return ErrNil
+ }
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_slice_byte(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_BytesSlice(base, p.field)
+ l := len(ss)
+ if l == 0 {
+ return 0
+ }
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeRawBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of strings ([]string).
+func (o *Buffer) enc_slice_string(p *Properties, base structPointer) error {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ for i := 0; i < l; i++ {
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeStringBytes(ss[i])
+ }
+ return nil
+}
+
+func size_slice_string(p *Properties, base structPointer) (n int) {
+ ss := *structPointer_StringSlice(base, p.field)
+ l := len(ss)
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ n += sizeStringBytes(ss[i])
+ }
+ return
+}
+
+// Encode a slice of message structs ([]*struct).
+func (o *Buffer) enc_slice_struct_message(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return ErrRepeatedHasNil
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, err := m.Marshal()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ o.buf = append(o.buf, p.tagcode...)
+ o.EncodeRawBytes(data)
+ continue
+ }
+
+ o.buf = append(o.buf, p.tagcode...)
+ err := o.enc_len_struct(p.sprop, structp, &state)
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return ErrRepeatedHasNil
+ }
+ return err
+ }
+ }
+ return state.err
+}
+
+func size_slice_struct_message(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+ n += l * len(p.tagcode)
+ for i := 0; i < l; i++ {
+ structp := s.Index(i)
+ if structPointer_IsNil(structp) {
+ return // return the size up to this point
+ }
+
+ // Can the object marshal itself?
+ if p.isMarshaler {
+ m := structPointer_Interface(structp, p.stype).(Marshaler)
+ data, _ := m.Marshal()
+ n += len(p.tagcode)
+ n += sizeRawBytes(data)
+ continue
+ }
+
+ n0 := size_struct(p.sprop, structp)
+ n1 := sizeVarint(uint64(n0)) // size of encoded length
+ n += n0 + n1
+ }
+ return
+}
+
+// Encode a slice of group structs ([]*struct).
+func (o *Buffer) enc_slice_struct_group(p *Properties, base structPointer) error {
+ var state errorState
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return ErrRepeatedHasNil
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireStartGroup))
+
+ err := o.enc_struct(p.sprop, b)
+
+ if err != nil && !state.shouldContinue(err, nil) {
+ if err == ErrNil {
+ return ErrRepeatedHasNil
+ }
+ return err
+ }
+
+ o.EncodeVarint(uint64((p.Tag << 3) | WireEndGroup))
+ }
+ return state.err
+}
+
+func size_slice_struct_group(p *Properties, base structPointer) (n int) {
+ s := structPointer_StructPointerSlice(base, p.field)
+ l := s.Len()
+
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireStartGroup))
+ n += l * sizeVarint(uint64((p.Tag<<3)|WireEndGroup))
+ for i := 0; i < l; i++ {
+ b := s.Index(i)
+ if structPointer_IsNil(b) {
+ return // return size up to this point
+ }
+
+ n += size_struct(p.sprop, b)
+ }
+ return
+}
+
+// Encode an extension map.
+func (o *Buffer) enc_map(p *Properties, base structPointer) error {
+ v := *structPointer_ExtMap(base, p.field)
+ if err := encodeExtensionMap(v); err != nil {
+ return err
+ }
+ // Fast-path for common cases: zero or one extensions.
+ if len(v) <= 1 {
+ for _, e := range v {
+ o.buf = append(o.buf, e.enc...)
+ }
+ return nil
+ }
+
+ // Sort keys to provide a deterministic encoding.
+ keys := make([]int, 0, len(v))
+ for k := range v {
+ keys = append(keys, int(k))
+ }
+ sort.Ints(keys)
+
+ for _, k := range keys {
+ o.buf = append(o.buf, v[int32(k)].enc...)
+ }
+ return nil
+}
+
+func size_map(p *Properties, base structPointer) int {
+ v := *structPointer_ExtMap(base, p.field)
+ return sizeExtensionMap(v)
+}
+
+// Encode a map field.
+func (o *Buffer) enc_new_map(p *Properties, base structPointer) error {
+ var state errorState // XXX: or do we need to plumb this through?
+
+ /*
+ A map defined as
+ map map_field = N;
+ is encoded in the same way as
+ message MapFieldEntry {
+ key_type key = 1;
+ value_type value = 2;
+ }
+ repeated MapFieldEntry map_field = N;
+ */
+
+ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+ if v.Len() == 0 {
+ return nil
+ }
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ enc := func() error {
+ if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil {
+ return err
+ }
+ if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil {
+ return err
+ }
+ return nil
+ }
+
+ keys := v.MapKeys()
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := v.MapIndex(key)
+
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ o.buf = append(o.buf, p.tagcode...)
+ if err := o.enc_len_thing(enc, &state); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func size_new_map(p *Properties, base structPointer) int {
+ v := structPointer_Map(base, p.field, p.mtype).Elem() // map[K]V
+
+ keycopy, valcopy, keybase, valbase := mapEncodeScratch(p.mtype)
+
+ n := 0
+ for _, key := range v.MapKeys() {
+ val := v.MapIndex(key)
+ keycopy.Set(key)
+ valcopy.Set(val)
+
+ // Tag codes are two bytes per map entry.
+ n += 2
+ n += p.mkeyprop.size(p.mkeyprop, keybase)
+ n += p.mvalprop.size(p.mvalprop, valbase)
+ }
+ return n
+}
+
+// mapEncodeScratch returns a new reflect.Value matching the map's value type,
+// and a structPointer suitable for passing to an encoder or sizer.
+func mapEncodeScratch(mapType reflect.Type) (keycopy, valcopy reflect.Value, keybase, valbase structPointer) {
+ // Prepare addressable doubly-indirect placeholders for the key and value types.
+ // This is needed because the element-type encoders expect **T, but the map iteration produces T.
+
+ keycopy = reflect.New(mapType.Key()).Elem() // addressable K
+ keyptr := reflect.New(reflect.PtrTo(keycopy.Type())).Elem() // addressable *K
+ keyptr.Set(keycopy.Addr()) //
+ keybase = toStructPointer(keyptr.Addr()) // **K
+
+ // Value types are more varied and require special handling.
+ switch mapType.Elem().Kind() {
+ case reflect.Slice:
+ // []byte
+ var dummy []byte
+ valcopy = reflect.ValueOf(&dummy).Elem() // addressable []byte
+ valbase = toStructPointer(valcopy.Addr())
+ case reflect.Ptr:
+ // message; the generated field type is map[K]*Msg (so V is *Msg),
+ // so we only need one level of indirection.
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valbase = toStructPointer(valcopy.Addr())
+ default:
+ // everything else
+ valcopy = reflect.New(mapType.Elem()).Elem() // addressable V
+ valptr := reflect.New(reflect.PtrTo(valcopy.Type())).Elem() // addressable *V
+ valptr.Set(valcopy.Addr()) //
+ valbase = toStructPointer(valptr.Addr()) // **V
+ }
+ return
+}
+
+// Encode a struct.
+func (o *Buffer) enc_struct(prop *StructProperties, base structPointer) error {
+ var state errorState
+ // Encode fields in tag order so that decoders may use optimizations
+ // that depend on the ordering.
+ // https://developers.google.com/protocol-buffers/docs/encoding#order
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.enc != nil {
+ err := p.enc(o, p, base)
+ if err != nil {
+ if err == ErrNil {
+ if p.Required && state.err == nil {
+ state.err = &RequiredNotSetError{p.Name}
+ }
+ } else if !state.shouldContinue(err, p) {
+ return err
+ }
+ }
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ if len(v) > 0 {
+ o.buf = append(o.buf, v...)
+ }
+ }
+
+ return state.err
+}
+
+func size_struct(prop *StructProperties, base structPointer) (n int) {
+ for _, i := range prop.order {
+ p := prop.Prop[i]
+ if p.size != nil {
+ n += p.size(p, base)
+ }
+ }
+
+ // Add unrecognized fields at the end.
+ if prop.unrecField.IsValid() {
+ v := *structPointer_Bytes(base, prop.unrecField)
+ n += len(v)
+ }
+
+ return
+}
+
+var zeroes [20]byte // longer than any conceivable sizeVarint
+
+// Encode a struct, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_struct(prop *StructProperties, base structPointer, state *errorState) error {
+ return o.enc_len_thing(func() error { return o.enc_struct(prop, base) }, state)
+}
+
+// Encode something, preceded by its encoded length (as a varint).
+func (o *Buffer) enc_len_thing(enc func() error, state *errorState) error {
+ iLen := len(o.buf)
+ o.buf = append(o.buf, 0, 0, 0, 0) // reserve four bytes for length
+ iMsg := len(o.buf)
+ err := enc()
+ if err != nil && !state.shouldContinue(err, nil) {
+ return err
+ }
+ lMsg := len(o.buf) - iMsg
+ lLen := sizeVarint(uint64(lMsg))
+ switch x := lLen - (iMsg - iLen); {
+ case x > 0: // actual length is x bytes larger than the space we reserved
+ // Move msg x bytes right.
+ o.buf = append(o.buf, zeroes[:x]...)
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ case x < 0: // actual length is x bytes smaller than the space we reserved
+ // Move msg x bytes left.
+ copy(o.buf[iMsg+x:], o.buf[iMsg:iMsg+lMsg])
+ o.buf = o.buf[:len(o.buf)+x] // x is negative
+ }
+ // Encode the length in the reserved space.
+ o.buf = o.buf[:iLen]
+ o.EncodeVarint(uint64(lMsg))
+ o.buf = o.buf[:len(o.buf)+lMsg]
+ return state.err
+}
+
+// errorState maintains the first error that occurs and updates that error
+// with additional context.
+type errorState struct {
+ err error
+}
+
+// shouldContinue reports whether encoding should continue upon encountering the
+// given error. If the error is RequiredNotSetError, shouldContinue returns true
+// and, if this is the first appearance of that error, remembers it for future
+// reporting.
+//
+// If prop is not nil, it may update any error with additional context about the
+// field with the error.
+func (s *errorState) shouldContinue(err error, prop *Properties) bool {
+ // Ignore unset required fields.
+ reqNotSet, ok := err.(*RequiredNotSetError)
+ if !ok {
+ return false
+ }
+ if s.err == nil {
+ if prop != nil {
+ err = &RequiredNotSetError{prop.Name + "." + reqNotSet.field}
+ }
+ s.err = err
+ }
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
new file mode 100644
index 000000000000..d8673a3e97ae
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal.go
@@ -0,0 +1,256 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Protocol buffer comparison.
+// TODO: MessageSet.
+
+package proto
+
+import (
+ "bytes"
+ "log"
+ "reflect"
+ "strings"
+)
+
+/*
+Equal returns true iff protocol buffers a and b are equal.
+The arguments must both be pointers to protocol buffer structs.
+
+Equality is defined in this way:
+ - Two messages are equal iff they are the same type,
+ corresponding fields are equal, unknown field sets
+ are equal, and extensions sets are equal.
+ - Two set scalar fields are equal iff their values are equal.
+ If the fields are of a floating-point type, remember that
+ NaN != x for all x, including NaN.
+ - Two repeated fields are equal iff their lengths are the same,
+ and their corresponding elements are equal (a "bytes" field,
+ although represented by []byte, is not a repeated field)
+ - Two unset fields are equal.
+ - Two unknown field sets are equal if their current
+ encoded state is equal.
+ - Two extension sets are equal iff they have corresponding
+ elements that are pairwise equal.
+ - Every other combination of things are not equal.
+
+The return value is undefined if a and b are not protocol buffers.
+*/
+func Equal(a, b Message) bool {
+ if a == nil || b == nil {
+ return a == b
+ }
+ v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b)
+ if v1.Type() != v2.Type() {
+ return false
+ }
+ if v1.Kind() == reflect.Ptr {
+ if v1.IsNil() {
+ return v2.IsNil()
+ }
+ if v2.IsNil() {
+ return false
+ }
+ v1, v2 = v1.Elem(), v2.Elem()
+ }
+ if v1.Kind() != reflect.Struct {
+ return false
+ }
+ return equalStruct(v1, v2)
+}
+
+// v1 and v2 are known to have the same type.
+func equalStruct(v1, v2 reflect.Value) bool {
+ for i := 0; i < v1.NumField(); i++ {
+ f := v1.Type().Field(i)
+ if strings.HasPrefix(f.Name, "XXX_") {
+ continue
+ }
+ f1, f2 := v1.Field(i), v2.Field(i)
+ if f.Type.Kind() == reflect.Ptr {
+ if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 {
+ // both unset
+ continue
+ } else if n1 != n2 {
+ // set/unset mismatch
+ return false
+ }
+ b1, ok := f1.Interface().(raw)
+ if ok {
+ b2 := f2.Interface().(raw)
+ // RawMessage
+ if !bytes.Equal(b1.Bytes(), b2.Bytes()) {
+ return false
+ }
+ continue
+ }
+ f1, f2 = f1.Elem(), f2.Elem()
+ }
+ if !equalAny(f1, f2) {
+ return false
+ }
+ }
+
+ if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() {
+ em2 := v2.FieldByName("XXX_extensions")
+ if !equalExtensions(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) {
+ return false
+ }
+ }
+
+ uf := v1.FieldByName("XXX_unrecognized")
+ if !uf.IsValid() {
+ return true
+ }
+
+ u1 := uf.Bytes()
+ u2 := v2.FieldByName("XXX_unrecognized").Bytes()
+ if !bytes.Equal(u1, u2) {
+ return false
+ }
+
+ return true
+}
+
+// v1 and v2 are known to have the same type.
+func equalAny(v1, v2 reflect.Value) bool {
+ if v1.Type() == protoMessageType {
+ m1, _ := v1.Interface().(Message)
+ m2, _ := v2.Interface().(Message)
+ return Equal(m1, m2)
+ }
+ switch v1.Kind() {
+ case reflect.Bool:
+ return v1.Bool() == v2.Bool()
+ case reflect.Float32, reflect.Float64:
+ return v1.Float() == v2.Float()
+ case reflect.Int32, reflect.Int64:
+ return v1.Int() == v2.Int()
+ case reflect.Map:
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for _, key := range v1.MapKeys() {
+ val2 := v2.MapIndex(key)
+ if !val2.IsValid() {
+ // This key was not found in the second map.
+ return false
+ }
+ if !equalAny(v1.MapIndex(key), val2) {
+ return false
+ }
+ }
+ return true
+ case reflect.Ptr:
+ return equalAny(v1.Elem(), v2.Elem())
+ case reflect.Slice:
+ if v1.Type().Elem().Kind() == reflect.Uint8 {
+ // short circuit: []byte
+ if v1.IsNil() != v2.IsNil() {
+ return false
+ }
+ return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte))
+ }
+
+ if v1.Len() != v2.Len() {
+ return false
+ }
+ for i := 0; i < v1.Len(); i++ {
+ if !equalAny(v1.Index(i), v2.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.String:
+ return v1.Interface().(string) == v2.Interface().(string)
+ case reflect.Struct:
+ return equalStruct(v1, v2)
+ case reflect.Uint32, reflect.Uint64:
+ return v1.Uint() == v2.Uint()
+ }
+
+ // unknown type, so not a protocol buffer
+ log.Printf("proto: don't know how to compare %v", v1)
+ return false
+}
+
+// base is the struct type that the extensions are based on.
+// em1 and em2 are extension maps.
+func equalExtensions(base reflect.Type, em1, em2 map[int32]Extension) bool {
+ if len(em1) != len(em2) {
+ return false
+ }
+
+ for extNum, e1 := range em1 {
+ e2, ok := em2[extNum]
+ if !ok {
+ return false
+ }
+
+ m1, m2 := e1.value, e2.value
+
+ if m1 != nil && m2 != nil {
+ // Both are unencoded.
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ continue
+ }
+
+ // At least one is encoded. To do a semantically correct comparison
+ // we need to unmarshal them first.
+ var desc *ExtensionDesc
+ if m := extensionMaps[base]; m != nil {
+ desc = m[extNum]
+ }
+ if desc == nil {
+ log.Printf("proto: don't know how to compare extension %d of %v", extNum, base)
+ continue
+ }
+ var err error
+ if m1 == nil {
+ m1, err = decodeExtension(e1.enc, desc)
+ }
+ if m2 == nil && err == nil {
+ m2, err = decodeExtension(e2.enc, desc)
+ }
+ if err != nil {
+ // The encoded form is invalid.
+ log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err)
+ return false
+ }
+ if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2)) {
+ return false
+ }
+ }
+
+ return true
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
new file mode 100644
index 000000000000..cc25833ca444
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/equal_test.go
@@ -0,0 +1,191 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2011 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ pb "./testdata"
+ . "github.com/golang/protobuf/proto"
+)
+
+// Four identical base messages.
+// The init function adds extensions to some of them.
+var messageWithoutExtension = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension1a = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension1b = &pb.MyMessage{Count: Int32(7)}
+var messageWithExtension2 = &pb.MyMessage{Count: Int32(7)}
+
+// Two messages with non-message extensions.
+var messageWithInt32Extension1 = &pb.MyMessage{Count: Int32(8)}
+var messageWithInt32Extension2 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ ext1 := &pb.Ext{Data: String("Kirk")}
+ ext2 := &pb.Ext{Data: String("Picard")}
+
+ // messageWithExtension1a has ext1, but never marshals it.
+ if err := SetExtension(messageWithExtension1a, pb.E_Ext_More, ext1); err != nil {
+ panic("SetExtension on 1a failed: " + err.Error())
+ }
+
+ // messageWithExtension1b is the unmarshaled form of messageWithExtension1a.
+ if err := SetExtension(messageWithExtension1b, pb.E_Ext_More, ext1); err != nil {
+ panic("SetExtension on 1b failed: " + err.Error())
+ }
+ buf, err := Marshal(messageWithExtension1b)
+ if err != nil {
+ panic("Marshal of 1b failed: " + err.Error())
+ }
+ messageWithExtension1b.Reset()
+ if err := Unmarshal(buf, messageWithExtension1b); err != nil {
+ panic("Unmarshal of 1b failed: " + err.Error())
+ }
+
+ // messageWithExtension2 has ext2.
+ if err := SetExtension(messageWithExtension2, pb.E_Ext_More, ext2); err != nil {
+ panic("SetExtension on 2 failed: " + err.Error())
+ }
+
+ if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(23)); err != nil {
+ panic("SetExtension on Int32-1 failed: " + err.Error())
+ }
+ if err := SetExtension(messageWithInt32Extension1, pb.E_Ext_Number, Int32(24)); err != nil {
+ panic("SetExtension on Int32-2 failed: " + err.Error())
+ }
+}
+
+var EqualTests = []struct {
+ desc string
+ a, b Message
+ exp bool
+}{
+ {"different types", &pb.GoEnum{}, &pb.GoTestField{}, false},
+ {"equal empty", &pb.GoEnum{}, &pb.GoEnum{}, true},
+ {"nil vs nil", nil, nil, true},
+ {"typed nil vs typed nil", (*pb.GoEnum)(nil), (*pb.GoEnum)(nil), true},
+ {"typed nil vs empty", (*pb.GoEnum)(nil), &pb.GoEnum{}, false},
+ {"different typed nil", (*pb.GoEnum)(nil), (*pb.GoTestField)(nil), false},
+
+ {"one set field, one unset field", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{}, false},
+ {"one set field zero, one unset field", &pb.GoTest{Param: Int32(0)}, &pb.GoTest{}, false},
+ {"different set fields", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("bar")}, false},
+ {"equal set", &pb.GoTestField{Label: String("foo")}, &pb.GoTestField{Label: String("foo")}, true},
+
+ {"repeated, one set", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{}, false},
+ {"repeated, different length", &pb.GoTest{F_Int32Repeated: []int32{2, 3}}, &pb.GoTest{F_Int32Repeated: []int32{2}}, false},
+ {"repeated, different value", &pb.GoTest{F_Int32Repeated: []int32{2}}, &pb.GoTest{F_Int32Repeated: []int32{3}}, false},
+ {"repeated, equal", &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, &pb.GoTest{F_Int32Repeated: []int32{2, 4}}, true},
+ {"repeated, nil equal nil", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: nil}, true},
+ {"repeated, nil equal empty", &pb.GoTest{F_Int32Repeated: nil}, &pb.GoTest{F_Int32Repeated: []int32{}}, true},
+ {"repeated, empty equal nil", &pb.GoTest{F_Int32Repeated: []int32{}}, &pb.GoTest{F_Int32Repeated: nil}, true},
+
+ {
+ "nested, different",
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("foo")}},
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("bar")}},
+ false,
+ },
+ {
+ "nested, equal",
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
+ &pb.GoTest{RequiredField: &pb.GoTestField{Label: String("wow")}},
+ true,
+ },
+
+ {"bytes", &pb.OtherMessage{Value: []byte("foo")}, &pb.OtherMessage{Value: []byte("foo")}, true},
+ {"bytes, empty", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: []byte{}}, true},
+ {"bytes, empty vs nil", &pb.OtherMessage{Value: []byte{}}, &pb.OtherMessage{Value: nil}, false},
+ {
+ "repeated bytes",
+ &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
+ &pb.MyMessage{RepBytes: [][]byte{[]byte("sham"), []byte("wow")}},
+ true,
+ },
+
+ {"extension vs. no extension", messageWithoutExtension, messageWithExtension1a, false},
+ {"extension vs. same extension", messageWithExtension1a, messageWithExtension1b, true},
+ {"extension vs. different extension", messageWithExtension1a, messageWithExtension2, false},
+
+ {"int32 extension vs. itself", messageWithInt32Extension1, messageWithInt32Extension1, true},
+ {"int32 extension vs. a different int32", messageWithInt32Extension1, messageWithInt32Extension2, false},
+
+ {
+ "message with group",
+ &pb.MyMessage{
+ Count: Int32(1),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: Int32(5),
+ },
+ },
+ &pb.MyMessage{
+ Count: Int32(1),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: Int32(5),
+ },
+ },
+ true,
+ },
+
+ {
+ "map same",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ true,
+ },
+ {
+ "map different entry",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{2: "Rob"}},
+ false,
+ },
+ {
+ "map different key only",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{2: "Ken"}},
+ false,
+ },
+ {
+ "map different value only",
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Ken"}},
+ &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob"}},
+ false,
+ },
+}
+
+func TestEqual(t *testing.T) {
+ for _, tc := range EqualTests {
+ if res := Equal(tc.a, tc.b); res != tc.exp {
+ t.Errorf("%v: Equal(%v, %v) = %v, want %v", tc.desc, tc.a, tc.b, res, tc.exp)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
new file mode 100644
index 000000000000..f7667fab48c8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions.go
@@ -0,0 +1,353 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Types and routines for supporting protocol buffer extensions.
+ */
+
+import (
+ "errors"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message.
+var ErrMissingExtension = errors.New("proto: missing extension")
+
+// ExtensionRange represents a range of message extensions for a protocol buffer.
+// Used in code generated by the protocol compiler.
+type ExtensionRange struct {
+ Start, End int32 // both inclusive
+}
+
+// extendableProto is an interface implemented by any protocol buffer that may be extended.
+type extendableProto interface {
+ Message
+ ExtensionRangeArray() []ExtensionRange
+ ExtensionMap() map[int32]Extension
+}
+
+var extendableProtoType = reflect.TypeOf((*extendableProto)(nil)).Elem()
+
+// ExtensionDesc represents an extension specification.
+// Used in generated code from the protocol compiler.
+type ExtensionDesc struct {
+ ExtendedType Message // nil pointer to the type that is being extended
+ ExtensionType interface{} // nil pointer to the extension type
+ Field int32 // field number
+ Name string // fully-qualified name of extension, for text formatting
+ Tag string // protobuf tag style
+}
+
+func (ed *ExtensionDesc) repeated() bool {
+ t := reflect.TypeOf(ed.ExtensionType)
+ return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8
+}
+
+// Extension represents an extension in a message.
+type Extension struct {
+ // When an extension is stored in a message using SetExtension
+ // only desc and value are set. When the message is marshaled
+ // enc will be set to the encoded form of the message.
+ //
+ // When a message is unmarshaled and contains extensions, each
+ // extension will have only enc set. When such an extension is
+ // accessed using GetExtension (or GetExtensions) desc and value
+ // will be set.
+ desc *ExtensionDesc
+ value interface{}
+ enc []byte
+}
+
+// SetRawExtension is for testing only.
+func SetRawExtension(base extendableProto, id int32, b []byte) {
+ base.ExtensionMap()[id] = Extension{enc: b}
+}
+
+// isExtensionField returns true iff the given field number is in an extension range.
+func isExtensionField(pb extendableProto, field int32) bool {
+ for _, er := range pb.ExtensionRangeArray() {
+ if er.Start <= field && field <= er.End {
+ return true
+ }
+ }
+ return false
+}
+
+// checkExtensionTypes checks that the given extension is valid for pb.
+func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error {
+ // Check the extended type.
+ if a, b := reflect.TypeOf(pb), reflect.TypeOf(extension.ExtendedType); a != b {
+ return errors.New("proto: bad extended type; " + b.String() + " does not extend " + a.String())
+ }
+ // Check the range.
+ if !isExtensionField(pb, extension.Field) {
+ return errors.New("proto: bad extension number; not in declared ranges")
+ }
+ return nil
+}
+
+// extPropKey is sufficient to uniquely identify an extension.
+type extPropKey struct {
+ base reflect.Type
+ field int32
+}
+
+var extProp = struct {
+ sync.RWMutex
+ m map[extPropKey]*Properties
+}{
+ m: make(map[extPropKey]*Properties),
+}
+
+func extensionProperties(ed *ExtensionDesc) *Properties {
+ key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field}
+
+ extProp.RLock()
+ if prop, ok := extProp.m[key]; ok {
+ extProp.RUnlock()
+ return prop
+ }
+ extProp.RUnlock()
+
+ extProp.Lock()
+ defer extProp.Unlock()
+ // Check again.
+ if prop, ok := extProp.m[key]; ok {
+ return prop
+ }
+
+ prop := new(Properties)
+ prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil)
+ extProp.m[key] = prop
+ return prop
+}
+
+// encodeExtensionMap encodes any unmarshaled (unencoded) extensions in m.
+func encodeExtensionMap(m map[int32]Extension) error {
+ for k, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ p := NewBuffer(nil)
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ if err := props.enc(p, props, toStructPointer(x)); err != nil {
+ return err
+ }
+ e.enc = p.buf
+ m[k] = e
+ }
+ return nil
+}
+
+func sizeExtensionMap(m map[int32]Extension) (n int) {
+ for _, e := range m {
+ if e.value == nil || e.desc == nil {
+ // Extension is only in its encoded form.
+ n += len(e.enc)
+ continue
+ }
+
+ // We don't skip extensions that have an encoded form set,
+ // because the extension value may have been mutated after
+ // the last time this function was called.
+
+ et := reflect.TypeOf(e.desc.ExtensionType)
+ props := extensionProperties(e.desc)
+
+ // If e.value has type T, the encoder expects a *struct{ X T }.
+ // Pass a *T with a zero field and hope it all works out.
+ x := reflect.New(et)
+ x.Elem().Set(reflect.ValueOf(e.value))
+ n += props.size(props, toStructPointer(x))
+ }
+ return
+}
+
+// HasExtension returns whether the given extension is present in pb.
+func HasExtension(pb extendableProto, extension *ExtensionDesc) bool {
+ // TODO: Check types, field numbers, etc.?
+ _, ok := pb.ExtensionMap()[extension.Field]
+ return ok
+}
+
+// ClearExtension removes the given extension from pb.
+func ClearExtension(pb extendableProto, extension *ExtensionDesc) {
+ // TODO: Check types, field numbers, etc.?
+ delete(pb.ExtensionMap(), extension.Field)
+}
+
+// GetExtension parses and returns the given extension of pb.
+// If the extension is not present it returns ErrMissingExtension.
+func GetExtension(pb extendableProto, extension *ExtensionDesc) (interface{}, error) {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return nil, err
+ }
+
+ emap := pb.ExtensionMap()
+ e, ok := emap[extension.Field]
+ if !ok {
+ return nil, ErrMissingExtension
+ }
+ if e.value != nil {
+ // Already decoded. Check the descriptor, though.
+ if e.desc != extension {
+ // This shouldn't happen. If it does, it means that
+ // GetExtension was called twice with two different
+ // descriptors with the same field number.
+ return nil, errors.New("proto: descriptor conflict")
+ }
+ return e.value, nil
+ }
+
+ v, err := decodeExtension(e.enc, extension)
+ if err != nil {
+ return nil, err
+ }
+
+ // Remember the decoded version and drop the encoded version.
+ // That way it is safe to mutate what we return.
+ e.value = v
+ e.desc = extension
+ e.enc = nil
+ emap[extension.Field] = e
+ return e.value, nil
+}
+
+// decodeExtension decodes an extension encoded in b.
+func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) {
+ o := NewBuffer(b)
+
+ t := reflect.TypeOf(extension.ExtensionType)
+ rep := extension.repeated()
+
+ props := extensionProperties(extension)
+
+ // t is a pointer to a struct, pointer to basic type or a slice.
+ // Allocate a "field" to store the pointer/slice itself; the
+ // pointer/slice will be stored here. We pass
+ // the address of this field to props.dec.
+ // This passes a zero field and a *t and lets props.dec
+ // interpret it as a *struct{ x t }.
+ value := reflect.New(t).Elem()
+
+ for {
+ // Discard wire type and field number varint. It isn't needed.
+ if _, err := o.DecodeVarint(); err != nil {
+ return nil, err
+ }
+
+ if err := props.dec(o, props, toStructPointer(value.Addr())); err != nil {
+ return nil, err
+ }
+
+ if !rep || o.index >= len(o.buf) {
+ break
+ }
+ }
+ return value.Interface(), nil
+}
+
+// GetExtensions returns a slice of the extensions present in pb that are also listed in es.
+// The returned slice has the same length as es; missing extensions will appear as nil elements.
+func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) {
+ epb, ok := pb.(extendableProto)
+ if !ok {
+ err = errors.New("proto: not an extendable proto")
+ return
+ }
+ extensions = make([]interface{}, len(es))
+ for i, e := range es {
+ extensions[i], err = GetExtension(epb, e)
+ if err == ErrMissingExtension {
+ err = nil
+ }
+ if err != nil {
+ return
+ }
+ }
+ return
+}
+
+// SetExtension sets the specified extension of pb to the specified value.
+func SetExtension(pb extendableProto, extension *ExtensionDesc, value interface{}) error {
+ if err := checkExtensionTypes(pb, extension); err != nil {
+ return err
+ }
+ typ := reflect.TypeOf(extension.ExtensionType)
+ if typ != reflect.TypeOf(value) {
+ return errors.New("proto: bad extension value type")
+ }
+
+ pb.ExtensionMap()[extension.Field] = Extension{desc: extension, value: value}
+ return nil
+}
+
+// A global registry of extensions.
+// The generated code will register the generated descriptors by calling RegisterExtension.
+
+var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc)
+
+// RegisterExtension is called from the generated code.
+func RegisterExtension(desc *ExtensionDesc) {
+ st := reflect.TypeOf(desc.ExtendedType).Elem()
+ m := extensionMaps[st]
+ if m == nil {
+ m = make(map[int32]*ExtensionDesc)
+ extensionMaps[st] = m
+ }
+ if _, ok := m[desc.Field]; ok {
+ panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field)))
+ }
+ m[desc.Field] = desc
+}
+
+// RegisteredExtensions returns a map of the registered extensions of a
+// protocol buffer struct, indexed by the extension number.
+// The argument pb should be a nil pointer to the struct type.
+func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc {
+ return extensionMaps[reflect.TypeOf(pb).Elem()]
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
new file mode 100644
index 000000000000..451ad871a23a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/extensions_test.go
@@ -0,0 +1,137 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ pb "./testdata"
+ "github.com/golang/protobuf/proto"
+)
+
+func TestGetExtensionsWithMissingExtensions(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Fatalf("Could not set ext1: %s", ext1)
+ }
+ exts, err := proto.GetExtensions(msg, []*proto.ExtensionDesc{
+ pb.E_Ext_More,
+ pb.E_Ext_Text,
+ })
+ if err != nil {
+ t.Fatalf("GetExtensions() failed: %s", err)
+ }
+ if exts[0] != ext1 {
+ t.Errorf("ext1 not in returned extensions: %T %v", exts[0], exts[0])
+ }
+ if exts[1] != nil {
+ t.Errorf("ext2 in returned extensions: %T %v", exts[1], exts[1])
+ }
+}
+
+func TestGetExtensionStability(t *testing.T) {
+ check := func(m *pb.MyMessage) bool {
+ ext1, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ ext2, err := proto.GetExtension(m, pb.E_Ext_More)
+ if err != nil {
+ t.Fatalf("GetExtension() failed: %s", err)
+ }
+ return ext1 == ext2
+ }
+ msg := &pb.MyMessage{Count: proto.Int32(4)}
+ ext0 := &pb.Ext{}
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext0); err != nil {
+ t.Fatalf("Could not set ext1: %s", ext0)
+ }
+ if !check(msg) {
+ t.Errorf("GetExtension() not stable before marshaling")
+ }
+ bb, err := proto.Marshal(msg)
+ if err != nil {
+ t.Fatalf("Marshal() failed: %s", err)
+ }
+ msg1 := &pb.MyMessage{}
+ err = proto.Unmarshal(bb, msg1)
+ if err != nil {
+ t.Fatalf("Unmarshal() failed: %s", err)
+ }
+ if !check(msg1) {
+ t.Errorf("GetExtension() not stable after unmarshaling")
+ }
+}
+
+func TestExtensionsRoundTrip(t *testing.T) {
+ msg := &pb.MyMessage{}
+ ext1 := &pb.Ext{
+ Data: proto.String("hi"),
+ }
+ ext2 := &pb.Ext{
+ Data: proto.String("there"),
+ }
+ exists := proto.HasExtension(msg, pb.E_Ext_More)
+ if exists {
+ t.Error("Extension More present unexpectedly")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext1); err != nil {
+ t.Error(err)
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext2); err != nil {
+ t.Error(err)
+ }
+ e, err := proto.GetExtension(msg, pb.E_Ext_More)
+ if err != nil {
+ t.Error(err)
+ }
+ x, ok := e.(*pb.Ext)
+ if !ok {
+ t.Errorf("e has type %T, expected testdata.Ext", e)
+ } else if *x.Data != "there" {
+ t.Errorf("SetExtension failed to overwrite, got %+v, not 'there'", x)
+ }
+ proto.ClearExtension(msg, pb.E_Ext_More)
+ if _, err = proto.GetExtension(msg, pb.E_Ext_More); err != proto.ErrMissingExtension {
+ t.Errorf("got %v, expected ErrMissingExtension", e)
+ }
+ if _, err := proto.GetExtension(msg, pb.E_X215); err == nil {
+ t.Error("expected bad extension error, got nil")
+ }
+ if err := proto.SetExtension(msg, pb.E_X215, 12); err == nil {
+ t.Error("expected extension err")
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, 12); err == nil {
+ t.Error("expected some sort of type mismatch error, got nil")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
new file mode 100644
index 000000000000..87c6b9d1acf8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/lib.go
@@ -0,0 +1,751 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+/*
+ Package proto converts data structures to and from the wire format of
+ protocol buffers. It works in concert with the Go source code generated
+ for .proto files by the protocol compiler.
+
+ A summary of the properties of the protocol buffer interface
+ for a protocol buffer variable v:
+
+ - Names are turned from camel_case to CamelCase for export.
+ - There are no methods on v to set fields; just treat
+ them as structure fields.
+ - There are getters that return a field's value if set,
+ and return the field's default value if unset.
+ The getters work even if the receiver is a nil message.
+ - The zero value for a struct is its correct initialization state.
+ All desired fields must be set before marshaling.
+ - A Reset() method will restore a protobuf struct to its zero state.
+ - Non-repeated fields are pointers to the values; nil means unset.
+ That is, optional or required field int32 f becomes F *int32.
+ - Repeated fields are slices.
+ - Helper functions are available to aid the setting of fields.
+ Helpers for getting values are superseded by the
+ GetFoo methods and their use is deprecated.
+ msg.Foo = proto.String("hello") // set field
+ - Constants are defined to hold the default values of all fields that
+ have them. They have the form Default_StructName_FieldName.
+ Because the getter methods handle defaulted values,
+ direct use of these constants should be rare.
+ - Enums are given type names and maps from names to values.
+ Enum values are prefixed with the enum's type name. Enum types have
+ a String method, and a Enum method to assist in message construction.
+ - Nested groups and enums have type names prefixed with the name of
+ the surrounding message type.
+ - Extensions are given descriptor names that start with E_,
+ followed by an underscore-delimited list of the nested messages
+ that contain it (if any) followed by the CamelCased name of the
+ extension field itself. HasExtension, ClearExtension, GetExtension
+ and SetExtension are functions for manipulating extensions.
+ - Marshal and Unmarshal are functions to encode and decode the wire format.
+
+ The simplest way to describe this is to see an example.
+ Given file test.proto, containing
+
+ package example;
+
+ enum FOO { X = 17; };
+
+ message Test {
+ required string label = 1;
+ optional int32 type = 2 [default=77];
+ repeated int64 reps = 3;
+ optional group OptionalGroup = 4 {
+ required string RequiredField = 5;
+ }
+ }
+
+ The resulting file, test.pb.go, is:
+
+ package example
+
+ import "github.com/golang/protobuf/proto"
+
+ type FOO int32
+ const (
+ FOO_X FOO = 17
+ )
+ var FOO_name = map[int32]string{
+ 17: "X",
+ }
+ var FOO_value = map[string]int32{
+ "X": 17,
+ }
+
+ func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+ }
+ func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+ }
+
+ type Test struct {
+ Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"`
+ Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"`
+ Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"`
+ Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (this *Test) Reset() { *this = Test{} }
+ func (this *Test) String() string { return proto.CompactTextString(this) }
+ const Default_Test_Type int32 = 77
+
+ func (this *Test) GetLabel() string {
+ if this != nil && this.Label != nil {
+ return *this.Label
+ }
+ return ""
+ }
+
+ func (this *Test) GetType() int32 {
+ if this != nil && this.Type != nil {
+ return *this.Type
+ }
+ return Default_Test_Type
+ }
+
+ func (this *Test) GetOptionalgroup() *Test_OptionalGroup {
+ if this != nil {
+ return this.Optionalgroup
+ }
+ return nil
+ }
+
+ type Test_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+ }
+ func (this *Test_OptionalGroup) Reset() { *this = Test_OptionalGroup{} }
+ func (this *Test_OptionalGroup) String() string { return proto.CompactTextString(this) }
+
+ func (this *Test_OptionalGroup) GetRequiredField() string {
+ if this != nil && this.RequiredField != nil {
+ return *this.RequiredField
+ }
+ return ""
+ }
+
+ func init() {
+ proto.RegisterEnum("example.FOO", FOO_name, FOO_value)
+ }
+
+ To create and play with a Test object:
+
+ package main
+
+ import (
+ "log"
+
+ "github.com/golang/protobuf/proto"
+ "./example.pb"
+ )
+
+ func main() {
+ test := &example.Test{
+ Label: proto.String("hello"),
+ Type: proto.Int32(17),
+ Optionalgroup: &example.Test_OptionalGroup{
+ RequiredField: proto.String("good bye"),
+ },
+ }
+ data, err := proto.Marshal(test)
+ if err != nil {
+ log.Fatal("marshaling error: ", err)
+ }
+ newTest := new(example.Test)
+ err = proto.Unmarshal(data, newTest)
+ if err != nil {
+ log.Fatal("unmarshaling error: ", err)
+ }
+ // Now test and newTest contain the same data.
+ if test.GetLabel() != newTest.GetLabel() {
+ log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel())
+ }
+ // etc.
+ }
+*/
+package proto
+
+import (
+ "encoding/json"
+ "fmt"
+ "log"
+ "reflect"
+ "strconv"
+ "sync"
+)
+
+// Message is implemented by generated protocol buffer messages.
+type Message interface {
+ Reset()
+ String() string
+ ProtoMessage()
+}
+
+// Stats records allocation details about the protocol buffer encoders
+// and decoders. Useful for tuning the library itself.
+type Stats struct {
+ Emalloc uint64 // mallocs in encode
+ Dmalloc uint64 // mallocs in decode
+ Encode uint64 // number of encodes
+ Decode uint64 // number of decodes
+ Chit uint64 // number of cache hits
+ Cmiss uint64 // number of cache misses
+ Size uint64 // number of sizes
+}
+
+// Set to true to enable stats collection.
+const collectStats = false
+
+var stats Stats
+
+// GetStats returns a copy of the global Stats structure.
+func GetStats() Stats { return stats }
+
+// A Buffer is a buffer manager for marshaling and unmarshaling
+// protocol buffers. It may be reused between invocations to
+// reduce memory usage. It is not necessary to use a Buffer;
+// the global functions Marshal and Unmarshal create a
+// temporary Buffer and are fine for most applications.
+type Buffer struct {
+ buf []byte // encode/decode byte stream
+ index int // write point
+
+ // pools of basic types to amortize allocation.
+ bools []bool
+ uint32s []uint32
+ uint64s []uint64
+
+ // extra pools, only used with pointer_reflect.go
+ int32s []int32
+ int64s []int64
+ float32s []float32
+ float64s []float64
+}
+
+// NewBuffer allocates a new Buffer and initializes its internal data to
+// the contents of the argument slice.
+func NewBuffer(e []byte) *Buffer {
+ return &Buffer{buf: e}
+}
+
+// Reset resets the Buffer, ready for marshaling a new protocol buffer.
+func (p *Buffer) Reset() {
+ p.buf = p.buf[0:0] // for reading/writing
+ p.index = 0 // for reading
+}
+
+// SetBuf replaces the internal buffer with the slice,
+// ready for unmarshaling the contents of the slice.
+func (p *Buffer) SetBuf(s []byte) {
+ p.buf = s
+ p.index = 0
+}
+
+// Bytes returns the contents of the Buffer.
+func (p *Buffer) Bytes() []byte { return p.buf }
+
+/*
+ * Helper routines for simplifying the creation of optional fields of basic type.
+ */
+
+// Bool is a helper routine that allocates a new bool value
+// to store v and returns a pointer to it.
+func Bool(v bool) *bool {
+ return &v
+}
+
+// Int32 is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it.
+func Int32(v int32) *int32 {
+ return &v
+}
+
+// Int is a helper routine that allocates a new int32 value
+// to store v and returns a pointer to it, but unlike Int32
+// its argument value is an int.
+func Int(v int) *int32 {
+ p := new(int32)
+ *p = int32(v)
+ return p
+}
+
+// Int64 is a helper routine that allocates a new int64 value
+// to store v and returns a pointer to it.
+func Int64(v int64) *int64 {
+ return &v
+}
+
+// Float32 is a helper routine that allocates a new float32 value
+// to store v and returns a pointer to it.
+func Float32(v float32) *float32 {
+ return &v
+}
+
+// Float64 is a helper routine that allocates a new float64 value
+// to store v and returns a pointer to it.
+func Float64(v float64) *float64 {
+ return &v
+}
+
+// Uint32 is a helper routine that allocates a new uint32 value
+// to store v and returns a pointer to it.
+func Uint32(v uint32) *uint32 {
+ return &v
+}
+
+// Uint64 is a helper routine that allocates a new uint64 value
+// to store v and returns a pointer to it.
+func Uint64(v uint64) *uint64 {
+ return &v
+}
+
+// String is a helper routine that allocates a new string value
+// to store v and returns a pointer to it.
+func String(v string) *string {
+ return &v
+}
+
+// EnumName is a helper function to simplify printing protocol buffer enums
+// by name. Given an enum map and a value, it returns a useful string.
+func EnumName(m map[int32]string, v int32) string {
+ s, ok := m[v]
+ if ok {
+ return s
+ }
+ return strconv.Itoa(int(v))
+}
+
+// UnmarshalJSONEnum is a helper function to simplify recovering enum int values
+// from their JSON-encoded representation. Given a map from the enum's symbolic
+// names to its int values, and a byte buffer containing the JSON-encoded
+// value, it returns an int32 that can be cast to the enum type by the caller.
+//
+// The function can deal with both JSON representations, numeric and symbolic.
+func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) {
+ if data[0] == '"' {
+ // New style: enums are strings.
+ var repr string
+ if err := json.Unmarshal(data, &repr); err != nil {
+ return -1, err
+ }
+ val, ok := m[repr]
+ if !ok {
+ return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr)
+ }
+ return val, nil
+ }
+ // Old style: enums are ints.
+ var val int32
+ if err := json.Unmarshal(data, &val); err != nil {
+ return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName)
+ }
+ return val, nil
+}
+
+// DebugPrint dumps the encoded data in b in a debugging format with a header
+// including the string s. Used in testing but made available for general debugging.
+func (o *Buffer) DebugPrint(s string, b []byte) {
+ var u uint64
+
+ obuf := o.buf
+ index := o.index
+ o.buf = b
+ o.index = 0
+ depth := 0
+
+ fmt.Printf("\n--- %s ---\n", s)
+
+out:
+ for {
+ for i := 0; i < depth; i++ {
+ fmt.Print(" ")
+ }
+
+ index := o.index
+ if index == len(o.buf) {
+ break
+ }
+
+ op, err := o.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: fetching op err %v\n", index, err)
+ break out
+ }
+ tag := op >> 3
+ wire := op & 7
+
+ switch wire {
+ default:
+ fmt.Printf("%3d: t=%3d unknown wire=%d\n",
+ index, tag, wire)
+ break out
+
+ case WireBytes:
+ var r []byte
+
+ r, err = o.DecodeRawBytes(false)
+ if err != nil {
+ break out
+ }
+ fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r))
+ if len(r) <= 6 {
+ for i := 0; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ } else {
+ for i := 0; i < 3; i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ fmt.Printf(" ..")
+ for i := len(r) - 3; i < len(r); i++ {
+ fmt.Printf(" %.2x", r[i])
+ }
+ }
+ fmt.Printf("\n")
+
+ case WireFixed32:
+ u, err = o.DecodeFixed32()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u)
+
+ case WireFixed64:
+ u, err = o.DecodeFixed64()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u)
+ break
+
+ case WireVarint:
+ u, err = o.DecodeVarint()
+ if err != nil {
+ fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u)
+
+ case WireStartGroup:
+ if err != nil {
+ fmt.Printf("%3d: t=%3d start err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d start\n", index, tag)
+ depth++
+
+ case WireEndGroup:
+ depth--
+ if err != nil {
+ fmt.Printf("%3d: t=%3d end err %v\n", index, tag, err)
+ break out
+ }
+ fmt.Printf("%3d: t=%3d end\n", index, tag)
+ }
+ }
+
+ if depth != 0 {
+ fmt.Printf("%3d: start-end not balanced %d\n", o.index, depth)
+ }
+ fmt.Printf("\n")
+
+ o.buf = obuf
+ o.index = index
+}
+
+// SetDefaults sets unset protocol buffer fields to their default values.
+// It only modifies fields that are both unset and have defined defaults.
+// It recursively sets default values in any non-nil sub-messages.
+func SetDefaults(pb Message) {
+ setDefaults(reflect.ValueOf(pb), true, false)
+}
+
+// v is a pointer to a struct.
+func setDefaults(v reflect.Value, recur, zeros bool) {
+ v = v.Elem()
+
+ defaultMu.RLock()
+ dm, ok := defaults[v.Type()]
+ defaultMu.RUnlock()
+ if !ok {
+ dm = buildDefaultMessage(v.Type())
+ defaultMu.Lock()
+ defaults[v.Type()] = dm
+ defaultMu.Unlock()
+ }
+
+ for _, sf := range dm.scalars {
+ f := v.Field(sf.index)
+ if !f.IsNil() {
+ // field already set
+ continue
+ }
+ dv := sf.value
+ if dv == nil && !zeros {
+ // no explicit default, and don't want to set zeros
+ continue
+ }
+ fptr := f.Addr().Interface() // **T
+ // TODO: Consider batching the allocations we do here.
+ switch sf.kind {
+ case reflect.Bool:
+ b := new(bool)
+ if dv != nil {
+ *b = dv.(bool)
+ }
+ *(fptr.(**bool)) = b
+ case reflect.Float32:
+ f := new(float32)
+ if dv != nil {
+ *f = dv.(float32)
+ }
+ *(fptr.(**float32)) = f
+ case reflect.Float64:
+ f := new(float64)
+ if dv != nil {
+ *f = dv.(float64)
+ }
+ *(fptr.(**float64)) = f
+ case reflect.Int32:
+ // might be an enum
+ if ft := f.Type(); ft != int32PtrType {
+ // enum
+ f.Set(reflect.New(ft.Elem()))
+ if dv != nil {
+ f.Elem().SetInt(int64(dv.(int32)))
+ }
+ } else {
+ // int32 field
+ i := new(int32)
+ if dv != nil {
+ *i = dv.(int32)
+ }
+ *(fptr.(**int32)) = i
+ }
+ case reflect.Int64:
+ i := new(int64)
+ if dv != nil {
+ *i = dv.(int64)
+ }
+ *(fptr.(**int64)) = i
+ case reflect.String:
+ s := new(string)
+ if dv != nil {
+ *s = dv.(string)
+ }
+ *(fptr.(**string)) = s
+ case reflect.Uint8:
+ // exceptional case: []byte
+ var b []byte
+ if dv != nil {
+ db := dv.([]byte)
+ b = make([]byte, len(db))
+ copy(b, db)
+ } else {
+ b = []byte{}
+ }
+ *(fptr.(*[]byte)) = b
+ case reflect.Uint32:
+ u := new(uint32)
+ if dv != nil {
+ *u = dv.(uint32)
+ }
+ *(fptr.(**uint32)) = u
+ case reflect.Uint64:
+ u := new(uint64)
+ if dv != nil {
+ *u = dv.(uint64)
+ }
+ *(fptr.(**uint64)) = u
+ default:
+ log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind)
+ }
+ }
+
+ for _, ni := range dm.nested {
+ f := v.Field(ni)
+ if f.IsNil() {
+ continue
+ }
+ // f is *T or []*T
+ if f.Kind() == reflect.Ptr {
+ setDefaults(f, recur, zeros)
+ } else {
+ for i := 0; i < f.Len(); i++ {
+ e := f.Index(i)
+ if e.IsNil() {
+ continue
+ }
+ setDefaults(e, recur, zeros)
+ }
+ }
+ }
+}
+
+var (
+ // defaults maps a protocol buffer struct type to a slice of the fields,
+ // with its scalar fields set to their proto-declared non-zero default values.
+ defaultMu sync.RWMutex
+ defaults = make(map[reflect.Type]defaultMessage)
+
+ int32PtrType = reflect.TypeOf((*int32)(nil))
+)
+
+// defaultMessage represents information about the default values of a message.
+type defaultMessage struct {
+ scalars []scalarField
+ nested []int // struct field index of nested messages
+}
+
+type scalarField struct {
+ index int // struct field index
+ kind reflect.Kind // element type (the T in *T or []T)
+ value interface{} // the proto-declared default value, or nil
+}
+
+func ptrToStruct(t reflect.Type) bool {
+ return t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct
+}
+
+// t is a struct type.
+func buildDefaultMessage(t reflect.Type) (dm defaultMessage) {
+ sprop := GetProperties(t)
+ for _, prop := range sprop.Prop {
+ fi, ok := sprop.decoderTags.get(prop.Tag)
+ if !ok {
+ // XXX_unrecognized
+ continue
+ }
+ ft := t.Field(fi).Type
+
+ // nested messages
+ if ptrToStruct(ft) || (ft.Kind() == reflect.Slice && ptrToStruct(ft.Elem())) {
+ dm.nested = append(dm.nested, fi)
+ continue
+ }
+
+ sf := scalarField{
+ index: fi,
+ kind: ft.Elem().Kind(),
+ }
+
+ // scalar fields without defaults
+ if !prop.HasDefault {
+ dm.scalars = append(dm.scalars, sf)
+ continue
+ }
+
+ // a scalar field: either *T or []byte
+ switch ft.Elem().Kind() {
+ case reflect.Bool:
+ x, err := strconv.ParseBool(prop.Default)
+ if err != nil {
+ log.Printf("proto: bad default bool %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = x
+ case reflect.Float32:
+ x, err := strconv.ParseFloat(prop.Default, 32)
+ if err != nil {
+ log.Printf("proto: bad default float32 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = float32(x)
+ case reflect.Float64:
+ x, err := strconv.ParseFloat(prop.Default, 64)
+ if err != nil {
+ log.Printf("proto: bad default float64 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = x
+ case reflect.Int32:
+ x, err := strconv.ParseInt(prop.Default, 10, 32)
+ if err != nil {
+ log.Printf("proto: bad default int32 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = int32(x)
+ case reflect.Int64:
+ x, err := strconv.ParseInt(prop.Default, 10, 64)
+ if err != nil {
+ log.Printf("proto: bad default int64 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = x
+ case reflect.String:
+ sf.value = prop.Default
+ case reflect.Uint8:
+ // []byte (not *uint8)
+ sf.value = []byte(prop.Default)
+ case reflect.Uint32:
+ x, err := strconv.ParseUint(prop.Default, 10, 32)
+ if err != nil {
+ log.Printf("proto: bad default uint32 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = uint32(x)
+ case reflect.Uint64:
+ x, err := strconv.ParseUint(prop.Default, 10, 64)
+ if err != nil {
+ log.Printf("proto: bad default uint64 %q: %v", prop.Default, err)
+ continue
+ }
+ sf.value = x
+ default:
+ log.Printf("proto: unhandled def kind %v", ft.Elem().Kind())
+ continue
+ }
+
+ dm.scalars = append(dm.scalars, sf)
+ }
+
+ return dm
+}
+
+// Map fields may have key types of non-float scalars, strings and enums.
+// The easiest way to sort them in some deterministic order is to use fmt.
+// If this turns out to be inefficient we can always consider other options,
+// such as doing a Schwartzian transform.
+
+type mapKeys []reflect.Value
+
+func (s mapKeys) Len() int { return len(s) }
+func (s mapKeys) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s mapKeys) Less(i, j int) bool {
+ return fmt.Sprint(s[i].Interface()) < fmt.Sprint(s[j].Interface())
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
new file mode 100644
index 000000000000..9d912bce19bb
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set.go
@@ -0,0 +1,287 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Support for message sets.
+ */
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "sort"
+)
+
+// ErrNoMessageTypeId occurs when a protocol buffer does not have a message type ID.
+// A message type ID is required for storing a protocol buffer in a message set.
+var ErrNoMessageTypeId = errors.New("proto does not have a message type ID")
+
+// The first two types (_MessageSet_Item and MessageSet)
+// model what the protocol compiler produces for the following protocol message:
+// message MessageSet {
+// repeated group Item = 1 {
+// required int32 type_id = 2;
+// required string message = 3;
+// };
+// }
+// That is the MessageSet wire format. We can't use a proto to generate these
+// because that would introduce a circular dependency between it and this package.
+//
+// When a proto1 proto has a field that looks like:
+// optional message info = 3;
+// the protocol compiler produces a field in the generated struct that looks like:
+// Info *_proto_.MessageSet `protobuf:"bytes,3,opt,name=info"`
+// The package is automatically inserted so there is no need for that proto file to
+// import this package.
+
+type _MessageSet_Item struct {
+ TypeId *int32 `protobuf:"varint,2,req,name=type_id"`
+ Message []byte `protobuf:"bytes,3,req,name=message"`
+}
+
+type MessageSet struct {
+ Item []*_MessageSet_Item `protobuf:"group,1,rep"`
+ XXX_unrecognized []byte
+ // TODO: caching?
+}
+
+// Make sure MessageSet is a Message.
+var _ Message = (*MessageSet)(nil)
+
+// messageTypeIder is an interface satisfied by a protocol buffer type
+// that may be stored in a MessageSet.
+type messageTypeIder interface {
+ MessageTypeId() int32
+}
+
+func (ms *MessageSet) find(pb Message) *_MessageSet_Item {
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return nil
+ }
+ id := mti.MessageTypeId()
+ for _, item := range ms.Item {
+ if *item.TypeId == id {
+ return item
+ }
+ }
+ return nil
+}
+
+func (ms *MessageSet) Has(pb Message) bool {
+ if ms.find(pb) != nil {
+ return true
+ }
+ return false
+}
+
+func (ms *MessageSet) Unmarshal(pb Message) error {
+ if item := ms.find(pb); item != nil {
+ return Unmarshal(item.Message, pb)
+ }
+ if _, ok := pb.(messageTypeIder); !ok {
+ return ErrNoMessageTypeId
+ }
+ return nil // TODO: return error instead?
+}
+
+func (ms *MessageSet) Marshal(pb Message) error {
+ msg, err := Marshal(pb)
+ if err != nil {
+ return err
+ }
+ if item := ms.find(pb); item != nil {
+ // reuse existing item
+ item.Message = msg
+ return nil
+ }
+
+ mti, ok := pb.(messageTypeIder)
+ if !ok {
+ return ErrNoMessageTypeId
+ }
+
+ mtid := mti.MessageTypeId()
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: &mtid,
+ Message: msg,
+ })
+ return nil
+}
+
+func (ms *MessageSet) Reset() { *ms = MessageSet{} }
+func (ms *MessageSet) String() string { return CompactTextString(ms) }
+func (*MessageSet) ProtoMessage() {}
+
+// Support for the message_set_wire_format message option.
+
+func skipVarint(buf []byte) []byte {
+ i := 0
+ for ; buf[i]&0x80 != 0; i++ {
+ }
+ return buf[i+1:]
+}
+
+// MarshalMessageSet encodes the extension map represented by m in the message set wire format.
+// It is called by generated Marshal methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSet(m map[int32]Extension) ([]byte, error) {
+ if err := encodeExtensionMap(m); err != nil {
+ return nil, err
+ }
+
+ // Sort extension IDs to provide a deterministic encoding.
+ // See also enc_map in encode.go.
+ ids := make([]int, 0, len(m))
+ for id := range m {
+ ids = append(ids, int(id))
+ }
+ sort.Ints(ids)
+
+ ms := &MessageSet{Item: make([]*_MessageSet_Item, 0, len(m))}
+ for _, id := range ids {
+ e := m[int32(id)]
+ // Remove the wire type and field number varint, as well as the length varint.
+ msg := skipVarint(skipVarint(e.enc))
+
+ ms.Item = append(ms.Item, &_MessageSet_Item{
+ TypeId: Int32(int32(id)),
+ Message: msg,
+ })
+ }
+ return Marshal(ms)
+}
+
+// UnmarshalMessageSet decodes the extension map encoded in buf in the message set wire format.
+// It is called by generated Unmarshal methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSet(buf []byte, m map[int32]Extension) error {
+ ms := new(MessageSet)
+ if err := Unmarshal(buf, ms); err != nil {
+ return err
+ }
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ msg := item.Message
+
+ // Restore wire type and field number varint, plus length varint.
+ // Be careful to preserve duplicate items.
+ b := EncodeVarint(uint64(id)<<3 | WireBytes)
+ if ext, ok := m[id]; ok {
+ // Existing data; rip off the tag and length varint
+ // so we join the new data correctly.
+ // We can assume that ext.enc is set because we are unmarshaling.
+ o := ext.enc[len(b):] // skip wire type and field number
+ _, n := DecodeVarint(o) // calculate length of length varint
+ o = o[n:] // skip length varint
+ msg = append(o, msg...) // join old data and new data
+ }
+ b = append(b, EncodeVarint(uint64(len(msg)))...)
+ b = append(b, msg...)
+
+ m[id] = Extension{enc: b}
+ }
+ return nil
+}
+
+// MarshalMessageSetJSON encodes the extension map represented by m in JSON format.
+// It is called by generated MarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func MarshalMessageSetJSON(m map[int32]Extension) ([]byte, error) {
+ var b bytes.Buffer
+ b.WriteByte('{')
+
+ // Process the map in key order for deterministic output.
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids)) // int32Slice defined in text.go
+
+ for i, id := range ids {
+ ext := m[id]
+ if i > 0 {
+ b.WriteByte(',')
+ }
+
+ msd, ok := messageSetMap[id]
+ if !ok {
+ // Unknown type; we can't render it, so skip it.
+ continue
+ }
+ fmt.Fprintf(&b, `"[%s]":`, msd.name)
+
+ x := ext.value
+ if x == nil {
+ x = reflect.New(msd.t.Elem()).Interface()
+ if err := Unmarshal(ext.enc, x.(Message)); err != nil {
+ return nil, err
+ }
+ }
+ d, err := json.Marshal(x)
+ if err != nil {
+ return nil, err
+ }
+ b.Write(d)
+ }
+ b.WriteByte('}')
+ return b.Bytes(), nil
+}
+
+// UnmarshalMessageSetJSON decodes the extension map encoded in buf in JSON format.
+// It is called by generated UnmarshalJSON methods on protocol buffer messages with the message_set_wire_format option.
+func UnmarshalMessageSetJSON(buf []byte, m map[int32]Extension) error {
+ // Common-case fast path.
+ if len(buf) == 0 || bytes.Equal(buf, []byte("{}")) {
+ return nil
+ }
+
+ // This is fairly tricky, and it's not clear that it is needed.
+ return errors.New("TODO: UnmarshalMessageSetJSON not yet implemented")
+}
+
+// A global registry of types that can be used in a MessageSet.
+
+var messageSetMap = make(map[int32]messageSetDesc)
+
+type messageSetDesc struct {
+ t reflect.Type // pointer to struct
+ name string
+}
+
+// RegisterMessageSetType is called from the generated code.
+func RegisterMessageSetType(m Message, fieldNum int32, name string) {
+ messageSetMap[fieldNum] = messageSetDesc{
+ t: reflect.TypeOf(m),
+ name: name,
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
new file mode 100644
index 000000000000..7c29bccf4b1d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/message_set_test.go
@@ -0,0 +1,66 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "bytes"
+ "testing"
+)
+
+func TestUnmarshalMessageSetWithDuplicate(t *testing.T) {
+ // Check that a repeated message set entry will be concatenated.
+ in := &MessageSet{
+ Item: []*_MessageSet_Item{
+ {TypeId: Int32(12345), Message: []byte("hoo")},
+ {TypeId: Int32(12345), Message: []byte("hah")},
+ },
+ }
+ b, err := Marshal(in)
+ if err != nil {
+ t.Fatalf("Marshal: %v", err)
+ }
+ t.Logf("Marshaled bytes: %q", b)
+
+ m := make(map[int32]Extension)
+ if err := UnmarshalMessageSet(b, m); err != nil {
+ t.Fatalf("UnmarshalMessageSet: %v", err)
+ }
+ ext, ok := m[12345]
+ if !ok {
+ t.Fatalf("Didn't retrieve extension 12345; map is %v", m)
+ }
+ // Skip wire type/field number and length varints.
+ got := skipVarint(skipVarint(ext.enc))
+ if want := []byte("hoohah"); !bytes.Equal(got, want) {
+ t.Errorf("Combined extension is %q, want %q", got, want)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
new file mode 100644
index 000000000000..93259a3d6571
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_reflect.go
@@ -0,0 +1,479 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build appengine,!appenginevm
+
+// This file contains an implementation of proto field accesses using package reflect.
+// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can
+// be used on App Engine.
+
+package proto
+
+import (
+ "math"
+ "reflect"
+)
+
+// A structPointer is a pointer to a struct.
+type structPointer struct {
+ v reflect.Value
+}
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+// The reflect value must itself be a pointer to a struct.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer{v}
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p.v.IsNil()
+}
+
+// Interface returns the struct pointer as an interface value.
+func structPointer_Interface(p structPointer, _ reflect.Type) interface{} {
+ return p.v.Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by the sequence of field indices
+// passed to reflect's FieldByIndex.
+type field []int
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return f.Index
+}
+
+// invalidField is an invalid field identifier.
+var invalidField = field(nil)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool { return f != nil }
+
+// field returns the given field in the struct as a reflect value.
+func structPointer_field(p structPointer, f field) reflect.Value {
+ // Special case: an extension map entry with a value of type T
+ // passes a *T to the struct-handling code with a zero field,
+ // expecting that it will be treated as equivalent to *struct{ X T },
+ // which has the same memory layout. We have to handle that case
+ // specially, because reflect will panic if we call FieldByIndex on a
+ // non-struct.
+ if f == nil {
+ return p.v.Elem()
+ }
+
+ return p.v.Elem().FieldByIndex(f)
+}
+
+// ifield returns the given field in the struct as an interface value.
+func structPointer_ifield(p structPointer, f field) interface{} {
+ return structPointer_field(p, f).Addr().Interface()
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return structPointer_ifield(p, f).(*[]byte)
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return structPointer_ifield(p, f).(*[][]byte)
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return structPointer_ifield(p, f).(**bool)
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return structPointer_ifield(p, f).(*bool)
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return structPointer_ifield(p, f).(*[]bool)
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return structPointer_ifield(p, f).(**string)
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return structPointer_ifield(p, f).(*string)
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return structPointer_ifield(p, f).(*[]string)
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return structPointer_ifield(p, f).(*map[int32]Extension)
+}
+
+// Map returns the reflect.Value for the address of a map field in the struct.
+func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return structPointer_field(p, f).Addr()
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ structPointer_field(p, f).Set(q.v)
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return structPointer{structPointer_field(p, f)}
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) structPointerSlice {
+ return structPointerSlice{structPointer_field(p, f)}
+}
+
+// A structPointerSlice represents the address of a slice of pointers to structs
+// (themselves messages or groups). That is, v.Type() is *[]*struct{...}.
+type structPointerSlice struct {
+ v reflect.Value
+}
+
+func (p structPointerSlice) Len() int { return p.v.Len() }
+func (p structPointerSlice) Index(i int) structPointer { return structPointer{p.v.Index(i)} }
+func (p structPointerSlice) Append(q structPointer) {
+ p.v.Set(reflect.Append(p.v, q.v))
+}
+
+var (
+ int32Type = reflect.TypeOf(int32(0))
+ uint32Type = reflect.TypeOf(uint32(0))
+ float32Type = reflect.TypeOf(float32(0))
+ int64Type = reflect.TypeOf(int64(0))
+ uint64Type = reflect.TypeOf(uint64(0))
+ float64Type = reflect.TypeOf(float64(0))
+)
+
+// A word32 represents a field of type *int32, *uint32, *float32, or *enum.
+// That is, v.Type() is *int32, *uint32, *float32, or *enum and v is assignable.
+type word32 struct {
+ v reflect.Value
+}
+
+// IsNil reports whether p is nil.
+func word32_IsNil(p word32) bool {
+ return p.v.IsNil()
+}
+
+// Set sets p to point at a newly allocated word with bits set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int32Type:
+ if len(o.int32s) == 0 {
+ o.int32s = make([]int32, uint32PoolSize)
+ }
+ o.int32s[0] = int32(x)
+ p.v.Set(reflect.ValueOf(&o.int32s[0]))
+ o.int32s = o.int32s[1:]
+ return
+ case uint32Type:
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint32s[0]))
+ o.uint32s = o.uint32s[1:]
+ return
+ case float32Type:
+ if len(o.float32s) == 0 {
+ o.float32s = make([]float32, uint32PoolSize)
+ }
+ o.float32s[0] = math.Float32frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float32s[0]))
+ o.float32s = o.float32s[1:]
+ return
+ }
+
+ // must be enum
+ p.v.Set(reflect.New(t))
+ p.v.Elem().SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32_Get(p word32) uint32 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32 returns a reference to a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32{structPointer_field(p, f)}
+}
+
+// A word32Val represents a field of type int32, uint32, float32, or enum.
+// That is, v.Type() is int32, uint32, float32, or enum and v is assignable.
+type word32Val struct {
+ v reflect.Value
+}
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ switch p.v.Type() {
+ case int32Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint32Type:
+ p.v.SetUint(uint64(x))
+ return
+ case float32Type:
+ p.v.SetFloat(float64(math.Float32frombits(x)))
+ return
+ }
+
+ // must be enum
+ p.v.SetInt(int64(int32(x)))
+}
+
+// Get gets the bits pointed at by p, as a uint32.
+func word32Val_Get(p word32Val) uint32 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Val returns a reference to a int32, uint32, float32, or enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val{structPointer_field(p, f)}
+}
+
+// A word32Slice is a slice of 32-bit values.
+// That is, v.Type() is []int32, []uint32, []float32, or []enum.
+type word32Slice struct {
+ v reflect.Value
+}
+
+func (p word32Slice) Append(x uint32) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int32:
+ elem.SetInt(int64(int32(x)))
+ case reflect.Uint32:
+ elem.SetUint(uint64(x))
+ case reflect.Float32:
+ elem.SetFloat(float64(math.Float32frombits(x)))
+ }
+}
+
+func (p word32Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word32Slice) Index(i int) uint32 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int32:
+ return uint32(elem.Int())
+ case reflect.Uint32:
+ return uint32(elem.Uint())
+ case reflect.Float32:
+ return math.Float32bits(float32(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+// Word32Slice returns a reference to a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) word32Slice {
+ return word32Slice{structPointer_field(p, f)}
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 struct {
+ v reflect.Value
+}
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ t := p.v.Type().Elem()
+ switch t {
+ case int64Type:
+ if len(o.int64s) == 0 {
+ o.int64s = make([]int64, uint64PoolSize)
+ }
+ o.int64s[0] = int64(x)
+ p.v.Set(reflect.ValueOf(&o.int64s[0]))
+ o.int64s = o.int64s[1:]
+ return
+ case uint64Type:
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ p.v.Set(reflect.ValueOf(&o.uint64s[0]))
+ o.uint64s = o.uint64s[1:]
+ return
+ case float64Type:
+ if len(o.float64s) == 0 {
+ o.float64s = make([]float64, uint64PoolSize)
+ }
+ o.float64s[0] = math.Float64frombits(x)
+ p.v.Set(reflect.ValueOf(&o.float64s[0]))
+ o.float64s = o.float64s[1:]
+ return
+ }
+ panic("unreachable")
+}
+
+func word64_IsNil(p word64) bool {
+ return p.v.IsNil()
+}
+
+func word64_Get(p word64) uint64 {
+ elem := p.v.Elem()
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64{structPointer_field(p, f)}
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val struct {
+ v reflect.Value
+}
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ switch p.v.Type() {
+ case int64Type:
+ p.v.SetInt(int64(x))
+ return
+ case uint64Type:
+ p.v.SetUint(x)
+ return
+ case float64Type:
+ p.v.SetFloat(math.Float64frombits(x))
+ return
+ }
+ panic("unreachable")
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ elem := p.v
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return elem.Uint()
+ case reflect.Float64:
+ return math.Float64bits(elem.Float())
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val{structPointer_field(p, f)}
+}
+
+type word64Slice struct {
+ v reflect.Value
+}
+
+func (p word64Slice) Append(x uint64) {
+ n, m := p.v.Len(), p.v.Cap()
+ if n < m {
+ p.v.SetLen(n + 1)
+ } else {
+ t := p.v.Type().Elem()
+ p.v.Set(reflect.Append(p.v, reflect.Zero(t)))
+ }
+ elem := p.v.Index(n)
+ switch elem.Kind() {
+ case reflect.Int64:
+ elem.SetInt(int64(int64(x)))
+ case reflect.Uint64:
+ elem.SetUint(uint64(x))
+ case reflect.Float64:
+ elem.SetFloat(float64(math.Float64frombits(x)))
+ }
+}
+
+func (p word64Slice) Len() int {
+ return p.v.Len()
+}
+
+func (p word64Slice) Index(i int) uint64 {
+ elem := p.v.Index(i)
+ switch elem.Kind() {
+ case reflect.Int64:
+ return uint64(elem.Int())
+ case reflect.Uint64:
+ return uint64(elem.Uint())
+ case reflect.Float64:
+ return math.Float64bits(float64(elem.Float()))
+ }
+ panic("unreachable")
+}
+
+func structPointer_Word64Slice(p structPointer, f field) word64Slice {
+ return word64Slice{structPointer_field(p, f)}
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
new file mode 100644
index 000000000000..c52db1ca5ecd
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/pointer_unsafe.go
@@ -0,0 +1,266 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// +build !appengine appenginevm
+
+// This file contains the implementation of the proto field accesses using package unsafe.
+
+package proto
+
+import (
+ "reflect"
+ "unsafe"
+)
+
+// NOTE: These type_Foo functions would more idiomatically be methods,
+// but Go does not allow methods on pointer types, and we must preserve
+// some pointer type for the garbage collector. We use these
+// funcs with clunky names as our poor approximation to methods.
+//
+// An alternative would be
+// type structPointer struct { p unsafe.Pointer }
+// but that does not registerize as well.
+
+// A structPointer is a pointer to a struct.
+type structPointer unsafe.Pointer
+
+// toStructPointer returns a structPointer equivalent to the given reflect value.
+func toStructPointer(v reflect.Value) structPointer {
+ return structPointer(unsafe.Pointer(v.Pointer()))
+}
+
+// IsNil reports whether p is nil.
+func structPointer_IsNil(p structPointer) bool {
+ return p == nil
+}
+
+// Interface returns the struct pointer, assumed to have element type t,
+// as an interface value.
+func structPointer_Interface(p structPointer, t reflect.Type) interface{} {
+ return reflect.NewAt(t, unsafe.Pointer(p)).Interface()
+}
+
+// A field identifies a field in a struct, accessible from a structPointer.
+// In this implementation, a field is identified by its byte offset from the start of the struct.
+type field uintptr
+
+// toField returns a field equivalent to the given reflect field.
+func toField(f *reflect.StructField) field {
+ return field(f.Offset)
+}
+
+// invalidField is an invalid field identifier.
+const invalidField = ^field(0)
+
+// IsValid reports whether the field identifier is valid.
+func (f field) IsValid() bool {
+ return f != ^field(0)
+}
+
+// Bytes returns the address of a []byte field in the struct.
+func structPointer_Bytes(p structPointer, f field) *[]byte {
+ return (*[]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BytesSlice returns the address of a [][]byte field in the struct.
+func structPointer_BytesSlice(p structPointer, f field) *[][]byte {
+ return (*[][]byte)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Bool returns the address of a *bool field in the struct.
+func structPointer_Bool(p structPointer, f field) **bool {
+ return (**bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolVal returns the address of a bool field in the struct.
+func structPointer_BoolVal(p structPointer, f field) *bool {
+ return (*bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// BoolSlice returns the address of a []bool field in the struct.
+func structPointer_BoolSlice(p structPointer, f field) *[]bool {
+ return (*[]bool)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// String returns the address of a *string field in the struct.
+func structPointer_String(p structPointer, f field) **string {
+ return (**string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringVal returns the address of a string field in the struct.
+func structPointer_StringVal(p structPointer, f field) *string {
+ return (*string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StringSlice returns the address of a []string field in the struct.
+func structPointer_StringSlice(p structPointer, f field) *[]string {
+ return (*[]string)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// ExtMap returns the address of an extension map field in the struct.
+func structPointer_ExtMap(p structPointer, f field) *map[int32]Extension {
+ return (*map[int32]Extension)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// Map returns the reflect.Value for the address of a map field in the struct.
+func structPointer_Map(p structPointer, f field, typ reflect.Type) reflect.Value {
+ return reflect.NewAt(typ, unsafe.Pointer(uintptr(p)+uintptr(f)))
+}
+
+// SetStructPointer writes a *struct field in the struct.
+func structPointer_SetStructPointer(p structPointer, f field, q structPointer) {
+ *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f))) = q
+}
+
+// GetStructPointer reads a *struct field in the struct.
+func structPointer_GetStructPointer(p structPointer, f field) structPointer {
+ return *(*structPointer)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// StructPointerSlice the address of a []*struct field in the struct.
+func structPointer_StructPointerSlice(p structPointer, f field) *structPointerSlice {
+ return (*structPointerSlice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// A structPointerSlice represents a slice of pointers to structs (themselves submessages or groups).
+type structPointerSlice []structPointer
+
+func (v *structPointerSlice) Len() int { return len(*v) }
+func (v *structPointerSlice) Index(i int) structPointer { return (*v)[i] }
+func (v *structPointerSlice) Append(p structPointer) { *v = append(*v, p) }
+
+// A word32 is the address of a "pointer to 32-bit value" field.
+type word32 **uint32
+
+// IsNil reports whether *v is nil.
+func word32_IsNil(p word32) bool {
+ return *p == nil
+}
+
+// Set sets *v to point at a newly allocated word set to x.
+func word32_Set(p word32, o *Buffer, x uint32) {
+ if len(o.uint32s) == 0 {
+ o.uint32s = make([]uint32, uint32PoolSize)
+ }
+ o.uint32s[0] = x
+ *p = &o.uint32s[0]
+ o.uint32s = o.uint32s[1:]
+}
+
+// Get gets the value pointed at by *v.
+func word32_Get(p word32) uint32 {
+ return **p
+}
+
+// Word32 returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32(p structPointer, f field) word32 {
+ return word32((**uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Val is the address of a 32-bit value field.
+type word32Val *uint32
+
+// Set sets *p to x.
+func word32Val_Set(p word32Val, x uint32) {
+ *p = x
+}
+
+// Get gets the value pointed at by p.
+func word32Val_Get(p word32Val) uint32 {
+ return *p
+}
+
+// Word32Val returns the address of a *int32, *uint32, *float32, or *enum field in the struct.
+func structPointer_Word32Val(p structPointer, f field) word32Val {
+ return word32Val((*uint32)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// A word32Slice is a slice of 32-bit values.
+type word32Slice []uint32
+
+func (v *word32Slice) Append(x uint32) { *v = append(*v, x) }
+func (v *word32Slice) Len() int { return len(*v) }
+func (v *word32Slice) Index(i int) uint32 { return (*v)[i] }
+
+// Word32Slice returns the address of a []int32, []uint32, []float32, or []enum field in the struct.
+func structPointer_Word32Slice(p structPointer, f field) *word32Slice {
+ return (*word32Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
+
+// word64 is like word32 but for 64-bit values.
+type word64 **uint64
+
+func word64_Set(p word64, o *Buffer, x uint64) {
+ if len(o.uint64s) == 0 {
+ o.uint64s = make([]uint64, uint64PoolSize)
+ }
+ o.uint64s[0] = x
+ *p = &o.uint64s[0]
+ o.uint64s = o.uint64s[1:]
+}
+
+func word64_IsNil(p word64) bool {
+ return *p == nil
+}
+
+func word64_Get(p word64) uint64 {
+ return **p
+}
+
+func structPointer_Word64(p structPointer, f field) word64 {
+ return word64((**uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Val is like word32Val but for 64-bit values.
+type word64Val *uint64
+
+func word64Val_Set(p word64Val, o *Buffer, x uint64) {
+ *p = x
+}
+
+func word64Val_Get(p word64Val) uint64 {
+ return *p
+}
+
+func structPointer_Word64Val(p structPointer, f field) word64Val {
+ return word64Val((*uint64)(unsafe.Pointer(uintptr(p) + uintptr(f))))
+}
+
+// word64Slice is like word32Slice but for 64-bit values.
+type word64Slice []uint64
+
+func (v *word64Slice) Append(x uint64) { *v = append(*v, x) }
+func (v *word64Slice) Len() int { return len(*v) }
+func (v *word64Slice) Index(i int) uint64 { return (*v)[i] }
+
+func structPointer_Word64Slice(p structPointer, f field) *word64Slice {
+ return (*word64Slice)(unsafe.Pointer(uintptr(p) + uintptr(f)))
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
new file mode 100644
index 000000000000..730a59579735
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/properties.go
@@ -0,0 +1,724 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+/*
+ * Routines for encoding data into the wire format for protocol buffers.
+ */
+
+import (
+ "fmt"
+ "os"
+ "reflect"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+const debug bool = false
+
+// Constants that identify the encoding of a value on the wire.
+const (
+ WireVarint = 0
+ WireFixed64 = 1
+ WireBytes = 2
+ WireStartGroup = 3
+ WireEndGroup = 4
+ WireFixed32 = 5
+)
+
+const startSize = 10 // initial slice/string sizes
+
+// Encoders are defined in encode.go
+// An encoder outputs the full representation of a field, including its
+// tag and encoder type.
+type encoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueEncoder encodes a single integer in a particular encoding.
+type valueEncoder func(o *Buffer, x uint64) error
+
+// Sizers are defined in encode.go
+// A sizer returns the encoded size of a field, including its tag and encoder
+// type.
+type sizer func(prop *Properties, base structPointer) int
+
+// A valueSizer returns the encoded size of a single integer in a particular
+// encoding.
+type valueSizer func(x uint64) int
+
+// Decoders are defined in decode.go
+// A decoder creates a value from its wire representation.
+// Unrecognized subelements are saved in unrec.
+type decoder func(p *Buffer, prop *Properties, base structPointer) error
+
+// A valueDecoder decodes a single integer in a particular encoding.
+type valueDecoder func(o *Buffer) (x uint64, err error)
+
+// tagMap is an optimization over map[int]int for typical protocol buffer
+// use-cases. Encoded protocol buffers are often in tag order with small tag
+// numbers.
+type tagMap struct {
+ fastTags []int
+ slowTags map[int]int
+}
+
+// tagMapFastLimit is the upper bound on the tag number that will be stored in
+// the tagMap slice rather than its map.
+const tagMapFastLimit = 1024
+
+func (p *tagMap) get(t int) (int, bool) {
+ if t > 0 && t < tagMapFastLimit {
+ if t >= len(p.fastTags) {
+ return 0, false
+ }
+ fi := p.fastTags[t]
+ return fi, fi >= 0
+ }
+ fi, ok := p.slowTags[t]
+ return fi, ok
+}
+
+func (p *tagMap) put(t int, fi int) {
+ if t > 0 && t < tagMapFastLimit {
+ for len(p.fastTags) < t+1 {
+ p.fastTags = append(p.fastTags, -1)
+ }
+ p.fastTags[t] = fi
+ return
+ }
+ if p.slowTags == nil {
+ p.slowTags = make(map[int]int)
+ }
+ p.slowTags[t] = fi
+}
+
+// StructProperties represents properties for all the fields of a struct.
+// decoderTags and decoderOrigNames should only be used by the decoder.
+type StructProperties struct {
+ Prop []*Properties // properties for each field
+ reqCount int // required count
+ decoderTags tagMap // map from proto tag to struct field number
+ decoderOrigNames map[string]int // map from original name to struct field number
+ order []int // list of struct field numbers in tag order
+ unrecField field // field id of the XXX_unrecognized []byte field
+ extendable bool // is this an extendable proto
+}
+
+// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec.
+// See encode.go, (*Buffer).enc_struct.
+
+func (sp *StructProperties) Len() int { return len(sp.order) }
+func (sp *StructProperties) Less(i, j int) bool {
+ return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag
+}
+func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] }
+
+// Properties represents the protocol-specific behavior of a single struct field.
+type Properties struct {
+ Name string // name of the field, for error messages
+ OrigName string // original name before protocol compiler (always set)
+ Wire string
+ WireType int
+ Tag int
+ Required bool
+ Optional bool
+ Repeated bool
+ Packed bool // relevant for repeated primitives only
+ Enum string // set for enum types only
+ proto3 bool // whether this is known to be a proto3 field; set for []byte only
+
+ Default string // default value
+ HasDefault bool // whether an explicit default was provided
+ def_uint64 uint64
+
+ enc encoder
+ valEnc valueEncoder // set for bool and numeric types only
+ field field
+ tagcode []byte // encoding of EncodeVarint((Tag<<3)|WireType)
+ tagbuf [8]byte
+ stype reflect.Type // set for struct types only
+ sprop *StructProperties // set for struct types only
+ isMarshaler bool
+ isUnmarshaler bool
+
+ mtype reflect.Type // set for map types only
+ mkeyprop *Properties // set for map types only
+ mvalprop *Properties // set for map types only
+
+ size sizer
+ valSize valueSizer // set for bool and numeric types only
+
+ dec decoder
+ valDec valueDecoder // set for bool and numeric types only
+
+ // If this is a packable field, this will be the decoder for the packed version of the field.
+ packedDec decoder
+}
+
+// String formats the properties in the protobuf struct field tag style.
+func (p *Properties) String() string {
+ s := p.Wire
+ s = ","
+ s += strconv.Itoa(p.Tag)
+ if p.Required {
+ s += ",req"
+ }
+ if p.Optional {
+ s += ",opt"
+ }
+ if p.Repeated {
+ s += ",rep"
+ }
+ if p.Packed {
+ s += ",packed"
+ }
+ if p.OrigName != p.Name {
+ s += ",name=" + p.OrigName
+ }
+ if p.proto3 {
+ s += ",proto3"
+ }
+ if len(p.Enum) > 0 {
+ s += ",enum=" + p.Enum
+ }
+ if p.HasDefault {
+ s += ",def=" + p.Default
+ }
+ return s
+}
+
+// Parse populates p by parsing a string in the protobuf struct field tag style.
+func (p *Properties) Parse(s string) {
+ // "bytes,49,opt,name=foo,def=hello!"
+ fields := strings.Split(s, ",") // breaks def=, but handled below.
+ if len(fields) < 2 {
+ fmt.Fprintf(os.Stderr, "proto: tag has too few fields: %q\n", s)
+ return
+ }
+
+ p.Wire = fields[0]
+ switch p.Wire {
+ case "varint":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeVarint
+ p.valDec = (*Buffer).DecodeVarint
+ p.valSize = sizeVarint
+ case "fixed32":
+ p.WireType = WireFixed32
+ p.valEnc = (*Buffer).EncodeFixed32
+ p.valDec = (*Buffer).DecodeFixed32
+ p.valSize = sizeFixed32
+ case "fixed64":
+ p.WireType = WireFixed64
+ p.valEnc = (*Buffer).EncodeFixed64
+ p.valDec = (*Buffer).DecodeFixed64
+ p.valSize = sizeFixed64
+ case "zigzag32":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag32
+ p.valDec = (*Buffer).DecodeZigzag32
+ p.valSize = sizeZigzag32
+ case "zigzag64":
+ p.WireType = WireVarint
+ p.valEnc = (*Buffer).EncodeZigzag64
+ p.valDec = (*Buffer).DecodeZigzag64
+ p.valSize = sizeZigzag64
+ case "bytes", "group":
+ p.WireType = WireBytes
+ // no numeric converter for non-numeric types
+ default:
+ fmt.Fprintf(os.Stderr, "proto: tag has unknown wire type: %q\n", s)
+ return
+ }
+
+ var err error
+ p.Tag, err = strconv.Atoi(fields[1])
+ if err != nil {
+ return
+ }
+
+ for i := 2; i < len(fields); i++ {
+ f := fields[i]
+ switch {
+ case f == "req":
+ p.Required = true
+ case f == "opt":
+ p.Optional = true
+ case f == "rep":
+ p.Repeated = true
+ case f == "packed":
+ p.Packed = true
+ case strings.HasPrefix(f, "name="):
+ p.OrigName = f[5:]
+ case strings.HasPrefix(f, "enum="):
+ p.Enum = f[5:]
+ case f == "proto3":
+ p.proto3 = true
+ case strings.HasPrefix(f, "def="):
+ p.HasDefault = true
+ p.Default = f[4:] // rest of string
+ if i+1 < len(fields) {
+ // Commas aren't escaped, and def is always last.
+ p.Default += "," + strings.Join(fields[i+1:], ",")
+ break
+ }
+ }
+ }
+}
+
+func logNoSliceEnc(t1, t2 reflect.Type) {
+ fmt.Fprintf(os.Stderr, "proto: no slice oenc for %T = []%T\n", t1, t2)
+}
+
+var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem()
+
+// Initialize the fields for encoding and decoding.
+func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lockGetProp bool) {
+ p.enc = nil
+ p.dec = nil
+ p.size = nil
+
+ switch t1 := typ; t1.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1)
+
+ // proto3 scalar types
+
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_proto3_bool
+ p.dec = (*Buffer).dec_proto3_bool
+ p.size = size_proto3_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_proto3_int32
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_proto3_uint32
+ p.dec = (*Buffer).dec_proto3_int32 // can reuse
+ p.size = size_proto3_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_proto3_int64
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_proto3_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int32
+ p.size = size_proto3_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_proto3_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_proto3_int64
+ p.size = size_proto3_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_proto3_string
+ p.dec = (*Buffer).dec_proto3_string
+ p.size = size_proto3_string
+
+ case reflect.Ptr:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no encoder function for %v -> %v\n", t1, t2)
+ break
+ case reflect.Bool:
+ p.enc = (*Buffer).enc_bool
+ p.dec = (*Buffer).dec_bool
+ p.size = size_bool
+ case reflect.Int32:
+ p.enc = (*Buffer).enc_int32
+ p.dec = (*Buffer).dec_int32
+ p.size = size_int32
+ case reflect.Uint32:
+ p.enc = (*Buffer).enc_uint32
+ p.dec = (*Buffer).dec_int32 // can reuse
+ p.size = size_uint32
+ case reflect.Int64, reflect.Uint64:
+ p.enc = (*Buffer).enc_int64
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.Float32:
+ p.enc = (*Buffer).enc_uint32 // can just treat them as bits
+ p.dec = (*Buffer).dec_int32
+ p.size = size_uint32
+ case reflect.Float64:
+ p.enc = (*Buffer).enc_int64 // can just treat them as bits
+ p.dec = (*Buffer).dec_int64
+ p.size = size_int64
+ case reflect.String:
+ p.enc = (*Buffer).enc_string
+ p.dec = (*Buffer).dec_string
+ p.size = size_string
+ case reflect.Struct:
+ p.stype = t1.Elem()
+ p.isMarshaler = isMarshaler(t1)
+ p.isUnmarshaler = isUnmarshaler(t1)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_struct_message
+ p.dec = (*Buffer).dec_struct_message
+ p.size = size_struct_message
+ } else {
+ p.enc = (*Buffer).enc_struct_group
+ p.dec = (*Buffer).dec_struct_group
+ p.size = size_struct_group
+ }
+ }
+
+ case reflect.Slice:
+ switch t2 := t1.Elem(); t2.Kind() {
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ case reflect.Bool:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_bool
+ p.size = size_slice_packed_bool
+ } else {
+ p.enc = (*Buffer).enc_slice_bool
+ p.size = size_slice_bool
+ }
+ p.dec = (*Buffer).dec_slice_bool
+ p.packedDec = (*Buffer).dec_slice_packed_bool
+ case reflect.Int32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int32
+ p.size = size_slice_packed_int32
+ } else {
+ p.enc = (*Buffer).enc_slice_int32
+ p.size = size_slice_int32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Uint32:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case reflect.Int64, reflect.Uint64:
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_byte
+ p.dec = (*Buffer).dec_slice_byte
+ p.size = size_slice_byte
+ if p.proto3 {
+ p.enc = (*Buffer).enc_proto3_slice_byte
+ p.size = size_proto3_slice_byte
+ }
+ case reflect.Float32, reflect.Float64:
+ switch t2.Bits() {
+ case 32:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_uint32
+ p.size = size_slice_packed_uint32
+ } else {
+ p.enc = (*Buffer).enc_slice_uint32
+ p.size = size_slice_uint32
+ }
+ p.dec = (*Buffer).dec_slice_int32
+ p.packedDec = (*Buffer).dec_slice_packed_int32
+ case 64:
+ // can just treat them as bits
+ if p.Packed {
+ p.enc = (*Buffer).enc_slice_packed_int64
+ p.size = size_slice_packed_int64
+ } else {
+ p.enc = (*Buffer).enc_slice_int64
+ p.size = size_slice_int64
+ }
+ p.dec = (*Buffer).dec_slice_int64
+ p.packedDec = (*Buffer).dec_slice_packed_int64
+ default:
+ logNoSliceEnc(t1, t2)
+ break
+ }
+ case reflect.String:
+ p.enc = (*Buffer).enc_slice_string
+ p.dec = (*Buffer).dec_slice_string
+ p.size = size_slice_string
+ case reflect.Ptr:
+ switch t3 := t2.Elem(); t3.Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no ptr oenc for %T -> %T -> %T\n", t1, t2, t3)
+ break
+ case reflect.Struct:
+ p.stype = t2.Elem()
+ p.isMarshaler = isMarshaler(t2)
+ p.isUnmarshaler = isUnmarshaler(t2)
+ if p.Wire == "bytes" {
+ p.enc = (*Buffer).enc_slice_struct_message
+ p.dec = (*Buffer).dec_slice_struct_message
+ p.size = size_slice_struct_message
+ } else {
+ p.enc = (*Buffer).enc_slice_struct_group
+ p.dec = (*Buffer).dec_slice_struct_group
+ p.size = size_slice_struct_group
+ }
+ }
+ case reflect.Slice:
+ switch t2.Elem().Kind() {
+ default:
+ fmt.Fprintf(os.Stderr, "proto: no slice elem oenc for %T -> %T -> %T\n", t1, t2, t2.Elem())
+ break
+ case reflect.Uint8:
+ p.enc = (*Buffer).enc_slice_slice_byte
+ p.dec = (*Buffer).dec_slice_slice_byte
+ p.size = size_slice_slice_byte
+ }
+ }
+
+ case reflect.Map:
+ p.enc = (*Buffer).enc_new_map
+ p.dec = (*Buffer).dec_new_map
+ p.size = size_new_map
+
+ p.mtype = t1
+ p.mkeyprop = &Properties{}
+ p.mkeyprop.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp)
+ p.mvalprop = &Properties{}
+ vtype := p.mtype.Elem()
+ if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice {
+ // The value type is not a message (*T) or bytes ([]byte),
+ // so we need encoders for the pointer to this type.
+ vtype = reflect.PtrTo(vtype)
+ }
+ p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp)
+ }
+
+ // precalculate tag code
+ wire := p.WireType
+ if p.Packed {
+ wire = WireBytes
+ }
+ x := uint32(p.Tag)<<3 | uint32(wire)
+ i := 0
+ for i = 0; x > 127; i++ {
+ p.tagbuf[i] = 0x80 | uint8(x&0x7F)
+ x >>= 7
+ }
+ p.tagbuf[i] = uint8(x)
+ p.tagcode = p.tagbuf[0 : i+1]
+
+ if p.stype != nil {
+ if lockGetProp {
+ p.sprop = GetProperties(p.stype)
+ } else {
+ p.sprop = getPropertiesLocked(p.stype)
+ }
+ }
+}
+
+var (
+ marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem()
+ unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem()
+)
+
+// isMarshaler reports whether type t implements Marshaler.
+func isMarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isMarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isMarshaler")
+ }
+ return t.Implements(marshalerType)
+}
+
+// isUnmarshaler reports whether type t implements Unmarshaler.
+func isUnmarshaler(t reflect.Type) bool {
+ // We're checking for (likely) pointer-receiver methods
+ // so if t is not a pointer, something is very wrong.
+ // The calls above only invoke isUnmarshaler on pointer types.
+ if t.Kind() != reflect.Ptr {
+ panic("proto: misuse of isUnmarshaler")
+ }
+ return t.Implements(unmarshalerType)
+}
+
+// Init populates the properties from a protocol buffer struct tag.
+func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) {
+ p.init(typ, name, tag, f, true)
+}
+
+func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) {
+ // "bytes,49,opt,def=hello!"
+ p.Name = name
+ p.OrigName = name
+ if f != nil {
+ p.field = toField(f)
+ }
+ if tag == "" {
+ return
+ }
+ p.Parse(tag)
+ p.setEncAndDec(typ, f, lockGetProp)
+}
+
+var (
+ mutex sync.Mutex
+ propertiesMap = make(map[reflect.Type]*StructProperties)
+)
+
+// GetProperties returns the list of properties for the type represented by t.
+// t must represent a generated struct type of a protocol message.
+func GetProperties(t reflect.Type) *StructProperties {
+ if t.Kind() != reflect.Struct {
+ panic("proto: type must have kind struct")
+ }
+ mutex.Lock()
+ sprop := getPropertiesLocked(t)
+ mutex.Unlock()
+ return sprop
+}
+
+// getPropertiesLocked requires that mutex is held.
+func getPropertiesLocked(t reflect.Type) *StructProperties {
+ if prop, ok := propertiesMap[t]; ok {
+ if collectStats {
+ stats.Chit++
+ }
+ return prop
+ }
+ if collectStats {
+ stats.Cmiss++
+ }
+
+ prop := new(StructProperties)
+ // in case of recursive protos, fill this in now.
+ propertiesMap[t] = prop
+
+ // build properties
+ prop.extendable = reflect.PtrTo(t).Implements(extendableProtoType)
+ prop.unrecField = invalidField
+ prop.Prop = make([]*Properties, t.NumField())
+ prop.order = make([]int, t.NumField())
+
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ p := new(Properties)
+ name := f.Name
+ p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false)
+
+ if f.Name == "XXX_extensions" { // special case
+ p.enc = (*Buffer).enc_map
+ p.dec = nil // not needed
+ p.size = size_map
+ }
+ if f.Name == "XXX_unrecognized" { // special case
+ prop.unrecField = toField(&f)
+ }
+ prop.Prop[i] = p
+ prop.order[i] = i
+ if debug {
+ print(i, " ", f.Name, " ", t.String(), " ")
+ if p.Tag > 0 {
+ print(p.String())
+ }
+ print("\n")
+ }
+ if p.enc == nil && !strings.HasPrefix(f.Name, "XXX_") {
+ fmt.Fprintln(os.Stderr, "proto: no encoder for", f.Name, f.Type.String(), "[GetProperties]")
+ }
+ }
+
+ // Re-order prop.order.
+ sort.Sort(prop)
+
+ // build required counts
+ // build tags
+ reqCount := 0
+ prop.decoderOrigNames = make(map[string]int)
+ for i, p := range prop.Prop {
+ if strings.HasPrefix(p.Name, "XXX_") {
+ // Internal fields should not appear in tags/origNames maps.
+ // They are handled specially when encoding and decoding.
+ continue
+ }
+ if p.Required {
+ reqCount++
+ }
+ prop.decoderTags.put(p.Tag, i)
+ prop.decoderOrigNames[p.OrigName] = i
+ }
+ prop.reqCount = reqCount
+
+ return prop
+}
+
+// Return the Properties object for the x[0]'th field of the structure.
+func propByIndex(t reflect.Type, x []int) *Properties {
+ if len(x) != 1 {
+ fmt.Fprintf(os.Stderr, "proto: field index dimension %d (not 1) for type %s\n", len(x), t)
+ return nil
+ }
+ prop := GetProperties(t)
+ return prop.Prop[x[0]]
+}
+
+// Get the address and type of a pointer to a struct from an interface.
+func getbase(pb Message) (t reflect.Type, b structPointer, err error) {
+ if pb == nil {
+ err = ErrNil
+ return
+ }
+ // get the reflect type of the pointer to the struct.
+ t = reflect.TypeOf(pb)
+ // get the address of the struct.
+ value := reflect.ValueOf(pb)
+ b = toStructPointer(value)
+ return
+}
+
+// A global registry of enum types.
+// The generated code will register the generated maps by calling RegisterEnum.
+
+var enumValueMaps = make(map[string]map[string]int32)
+
+// RegisterEnum is called from the generated code to install the enum descriptor
+// maps into the global table to aid parsing text format protocol buffers.
+func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) {
+ if _, ok := enumValueMaps[typeName]; ok {
+ panic("proto: duplicate enum registered: " + typeName)
+ }
+ enumValueMaps[typeName] = valueMap
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
new file mode 100644
index 000000000000..75144b582e6d
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/Makefile
@@ -0,0 +1,44 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2014 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include ../../Make.protobuf
+
+all: regenerate
+
+regenerate:
+ rm -f proto3.pb.go
+ make proto3.pb.go
+
+# The following rules are just aids to development. Not needed for typical testing.
+
+diff: regenerate
+ git diff proto3.pb.go
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
new file mode 100644
index 000000000000..3e327ded1d2c
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_proto/proto3.proto
@@ -0,0 +1,58 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+syntax = "proto3";
+
+package proto3_proto;
+
+message Message {
+ enum Humour {
+ UNKNOWN = 0;
+ PUNS = 1;
+ SLAPSTICK = 2;
+ BILL_BAILEY = 3;
+ }
+
+ string name = 1;
+ Humour hilarity = 2;
+ uint32 height_in_cm = 3;
+ bytes data = 4;
+ int64 result_count = 7;
+ bool true_scotsman = 8;
+ float score = 9;
+
+ repeated uint64 key = 5;
+ Nested nested = 6;
+}
+
+message Nested {
+ string bunny = 1;
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
new file mode 100644
index 000000000000..d4c96a9e730f
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/proto3_test.go
@@ -0,0 +1,93 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2014 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "testing"
+
+ pb "./proto3_proto"
+ "github.com/golang/protobuf/proto"
+)
+
+func TestProto3ZeroValues(t *testing.T) {
+ tests := []struct {
+ desc string
+ m proto.Message
+ }{
+ {"zero message", &pb.Message{}},
+ {"empty bytes field", &pb.Message{Data: []byte{}}},
+ }
+ for _, test := range tests {
+ b, err := proto.Marshal(test.m)
+ if err != nil {
+ t.Errorf("%s: proto.Marshal: %v", test.desc, err)
+ continue
+ }
+ if len(b) > 0 {
+ t.Errorf("%s: Encoding is non-empty: %q", test.desc, b)
+ }
+ }
+}
+
+func TestRoundTripProto3(t *testing.T) {
+ m := &pb.Message{
+ Name: "David", // (2 | 1<<3): 0x0a 0x05 "David"
+ Hilarity: pb.Message_PUNS, // (0 | 2<<3): 0x10 0x01
+ HeightInCm: 178, // (0 | 3<<3): 0x18 0xb2 0x01
+ Data: []byte("roboto"), // (2 | 4<<3): 0x20 0x06 "roboto"
+ ResultCount: 47, // (0 | 7<<3): 0x38 0x2f
+ TrueScotsman: true, // (0 | 8<<3): 0x40 0x01
+ Score: 8.1, // (5 | 9<<3): 0x4d <8.1>
+
+ Key: []uint64{1, 0xdeadbeef},
+ Nested: &pb.Nested{
+ Bunny: "Monty",
+ },
+ }
+ t.Logf(" m: %v", m)
+
+ b, err := proto.Marshal(m)
+ if err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ t.Logf(" b: %q", b)
+
+ m2 := new(pb.Message)
+ if err := proto.Unmarshal(b, m2); err != nil {
+ t.Fatalf("proto.Unmarshal: %v", err)
+ }
+ t.Logf("m2: %v", m2)
+
+ if !proto.Equal(m, m2) {
+ t.Errorf("proto.Equal returned false:\n m: %v\nm2: %v", m, m2)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go
new file mode 100644
index 000000000000..a2729c39a1b8
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size2_test.go
@@ -0,0 +1,63 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+import (
+ "testing"
+)
+
+// This is a separate file and package from size_test.go because that one uses
+// generated messages and thus may not be in package proto without having a circular
+// dependency, whereas this file tests unexported details of size.go.
+
+func TestVarintSize(t *testing.T) {
+ // Check the edge cases carefully.
+ testCases := []struct {
+ n uint64
+ size int
+ }{
+ {0, 1},
+ {1, 1},
+ {127, 1},
+ {128, 2},
+ {16383, 2},
+ {16384, 3},
+ {1<<63 - 1, 9},
+ {1 << 63, 10},
+ }
+ for _, tc := range testCases {
+ size := sizeVarint(tc.n)
+ if size != tc.size {
+ t.Errorf("sizeVarint(%d) = %d, want %d", tc.n, size, tc.size)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
new file mode 100644
index 000000000000..e5f92d6b90a9
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/size_test.go
@@ -0,0 +1,135 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "log"
+ "testing"
+
+ proto3pb "./proto3_proto"
+ pb "./testdata"
+ . "github.com/golang/protobuf/proto"
+)
+
+var messageWithExtension1 = &pb.MyMessage{Count: Int32(7)}
+
+// messageWithExtension2 is in equal_test.go.
+var messageWithExtension3 = &pb.MyMessage{Count: Int32(8)}
+
+func init() {
+ if err := SetExtension(messageWithExtension1, pb.E_Ext_More, &pb.Ext{Data: String("Abbott")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+ if err := SetExtension(messageWithExtension3, pb.E_Ext_More, &pb.Ext{Data: String("Costello")}); err != nil {
+ log.Panicf("SetExtension: %v", err)
+ }
+
+ // Force messageWithExtension3 to have the extension encoded.
+ Marshal(messageWithExtension3)
+
+}
+
+var SizeTests = []struct {
+ desc string
+ pb Message
+}{
+ {"empty", &pb.OtherMessage{}},
+ // Basic types.
+ {"bool", &pb.Defaults{F_Bool: Bool(true)}},
+ {"int32", &pb.Defaults{F_Int32: Int32(12)}},
+ {"negative int32", &pb.Defaults{F_Int32: Int32(-1)}},
+ {"small int64", &pb.Defaults{F_Int64: Int64(1)}},
+ {"big int64", &pb.Defaults{F_Int64: Int64(1 << 20)}},
+ {"negative int64", &pb.Defaults{F_Int64: Int64(-1)}},
+ {"fixed32", &pb.Defaults{F_Fixed32: Uint32(71)}},
+ {"fixed64", &pb.Defaults{F_Fixed64: Uint64(72)}},
+ {"uint32", &pb.Defaults{F_Uint32: Uint32(123)}},
+ {"uint64", &pb.Defaults{F_Uint64: Uint64(124)}},
+ {"float", &pb.Defaults{F_Float: Float32(12.6)}},
+ {"double", &pb.Defaults{F_Double: Float64(13.9)}},
+ {"string", &pb.Defaults{F_String: String("niles")}},
+ {"bytes", &pb.Defaults{F_Bytes: []byte("wowsa")}},
+ {"bytes, empty", &pb.Defaults{F_Bytes: []byte{}}},
+ {"sint32", &pb.Defaults{F_Sint32: Int32(65)}},
+ {"sint64", &pb.Defaults{F_Sint64: Int64(67)}},
+ {"enum", &pb.Defaults{F_Enum: pb.Defaults_BLUE.Enum()}},
+ // Repeated.
+ {"empty repeated bool", &pb.MoreRepeated{Bools: []bool{}}},
+ {"repeated bool", &pb.MoreRepeated{Bools: []bool{false, true, true, false}}},
+ {"packed repeated bool", &pb.MoreRepeated{BoolsPacked: []bool{false, true, true, false, true, true, true}}},
+ {"repeated int32", &pb.MoreRepeated{Ints: []int32{1, 12203, 1729, -1}}},
+ {"repeated int32 packed", &pb.MoreRepeated{IntsPacked: []int32{1, 12203, 1729}}},
+ {"repeated int64 packed", &pb.MoreRepeated{Int64SPacked: []int64{
+ // Need enough large numbers to verify that the header is counting the number of bytes
+ // for the field, not the number of elements.
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62, 1 << 62,
+ }}},
+ {"repeated string", &pb.MoreRepeated{Strings: []string{"r", "ken", "gri"}}},
+ {"repeated fixed", &pb.MoreRepeated{Fixeds: []uint32{1, 2, 3, 4}}},
+ // Nested.
+ {"nested", &pb.OldMessage{Nested: &pb.OldMessage_Nested{Name: String("whatever")}}},
+ {"group", &pb.GroupOld{G: &pb.GroupOld_G{X: Int32(12345)}}},
+ // Other things.
+ {"unrecognized", &pb.MoreRepeated{XXX_unrecognized: []byte{13<<3 | 0, 4}}},
+ {"extension (unencoded)", messageWithExtension1},
+ {"extension (encoded)", messageWithExtension3},
+ // proto3 message
+ {"proto3 empty", &proto3pb.Message{}},
+ {"proto3 bool", &proto3pb.Message{TrueScotsman: true}},
+ {"proto3 int64", &proto3pb.Message{ResultCount: 1}},
+ {"proto3 uint32", &proto3pb.Message{HeightInCm: 123}},
+ {"proto3 float", &proto3pb.Message{Score: 12.6}},
+ {"proto3 string", &proto3pb.Message{Name: "Snezana"}},
+ {"proto3 bytes", &proto3pb.Message{Data: []byte("wowsa")}},
+ {"proto3 bytes, empty", &proto3pb.Message{Data: []byte{}}},
+ {"proto3 enum", &proto3pb.Message{Hilarity: proto3pb.Message_PUNS}},
+
+ {"map field", &pb.MessageWithMap{NameMapping: map[int32]string{1: "Rob", 7: "Andrew"}}},
+ {"map field with message", &pb.MessageWithMap{MsgMapping: map[int64]*pb.FloatingPoint{0x7001: &pb.FloatingPoint{F: Float64(2.0)}}}},
+ {"map field with bytes", &pb.MessageWithMap{ByteMapping: map[bool][]byte{true: []byte("this time for sure")}}},
+}
+
+func TestSize(t *testing.T) {
+ for _, tc := range SizeTests {
+ size := Size(tc.pb)
+ b, err := Marshal(tc.pb)
+ if err != nil {
+ t.Errorf("%v: Marshal failed: %v", tc.desc, err)
+ continue
+ }
+ if size != len(b) {
+ t.Errorf("%v: Size(%v) = %d, want %d", tc.desc, tc.pb, size, len(b))
+ t.Logf("%v: bytes: %#v", tc.desc, b)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
new file mode 100644
index 000000000000..fc288628a752
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/Makefile
@@ -0,0 +1,50 @@
+# Go support for Protocol Buffers - Google's data interchange format
+#
+# Copyright 2010 The Go Authors. All rights reserved.
+# https://github.com/golang/protobuf
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are
+# met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following disclaimer
+# in the documentation and/or other materials provided with the
+# distribution.
+# * Neither the name of Google Inc. nor the names of its
+# contributors may be used to endorse or promote products derived from
+# this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+include ../../Make.protobuf
+
+all: regenerate
+
+regenerate:
+ rm -f test.pb.go
+ make test.pb.go
+
+# The following rules are just aids to development. Not needed for typical testing.
+
+diff: regenerate
+ git diff test.pb.go
+
+restore:
+ cp test.pb.go.golden test.pb.go
+
+preserve:
+ cp test.pb.go test.pb.go.golden
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
new file mode 100644
index 000000000000..7172d0e96985
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/golden_test.go
@@ -0,0 +1,86 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2012 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// Verify that the compiler output for test.proto is unchanged.
+
+package testdata
+
+import (
+ "crypto/sha1"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+// sum returns in string form (for easy comparison) the SHA-1 hash of the named file.
+func sum(t *testing.T, name string) string {
+ data, err := ioutil.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ t.Logf("sum(%q): length is %d", name, len(data))
+ hash := sha1.New()
+ _, err = hash.Write(data)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return fmt.Sprintf("% x", hash.Sum(nil))
+}
+
+func run(t *testing.T, name string, args ...string) {
+ cmd := exec.Command(name, args...)
+ cmd.Stdin = os.Stdin
+ cmd.Stdout = os.Stdout
+ cmd.Stderr = os.Stderr
+ err := cmd.Run()
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestGolden(t *testing.T) {
+ // Compute the original checksum.
+ goldenSum := sum(t, "test.pb.go")
+ // Run the proto compiler.
+ run(t, "protoc", "--go_out="+os.TempDir(), "test.proto")
+ newFile := filepath.Join(os.TempDir(), "test.pb.go")
+ defer os.Remove(newFile)
+ // Compute the new checksum.
+ newSum := sum(t, newFile)
+ // Verify
+ if newSum != goldenSum {
+ run(t, "diff", "-u", "test.pb.go", newFile)
+ t.Fatal("Code generated by protoc-gen-go has changed; update test.pb.go")
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
new file mode 100644
index 000000000000..f47d9e0e3949
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.pb.go
@@ -0,0 +1,2389 @@
+// Code generated by protoc-gen-go.
+// source: test.proto
+// DO NOT EDIT!
+
+/*
+Package testdata is a generated protocol buffer package.
+
+It is generated from these files:
+ test.proto
+
+It has these top-level messages:
+ GoEnum
+ GoTestField
+ GoTest
+ GoSkipTest
+ NonPackedTest
+ PackedTest
+ MaxTag
+ OldMessage
+ NewMessage
+ InnerMessage
+ OtherMessage
+ MyMessage
+ Ext
+ MyMessageSet
+ Empty
+ MessageList
+ Strings
+ Defaults
+ SubDefaults
+ RepeatedEnum
+ MoreRepeated
+ GroupOld
+ GroupNew
+ FloatingPoint
+ MessageWithMap
+*/
+package testdata
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type FOO int32
+
+const (
+ FOO_FOO1 FOO = 1
+)
+
+var FOO_name = map[int32]string{
+ 1: "FOO1",
+}
+var FOO_value = map[string]int32{
+ "FOO1": 1,
+}
+
+func (x FOO) Enum() *FOO {
+ p := new(FOO)
+ *p = x
+ return p
+}
+func (x FOO) String() string {
+ return proto.EnumName(FOO_name, int32(x))
+}
+func (x *FOO) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FOO_value, data, "FOO")
+ if err != nil {
+ return err
+ }
+ *x = FOO(value)
+ return nil
+}
+
+// An enum, for completeness.
+type GoTest_KIND int32
+
+const (
+ GoTest_VOID GoTest_KIND = 0
+ // Basic types
+ GoTest_BOOL GoTest_KIND = 1
+ GoTest_BYTES GoTest_KIND = 2
+ GoTest_FINGERPRINT GoTest_KIND = 3
+ GoTest_FLOAT GoTest_KIND = 4
+ GoTest_INT GoTest_KIND = 5
+ GoTest_STRING GoTest_KIND = 6
+ GoTest_TIME GoTest_KIND = 7
+ // Groupings
+ GoTest_TUPLE GoTest_KIND = 8
+ GoTest_ARRAY GoTest_KIND = 9
+ GoTest_MAP GoTest_KIND = 10
+ // Table types
+ GoTest_TABLE GoTest_KIND = 11
+ // Functions
+ GoTest_FUNCTION GoTest_KIND = 12
+)
+
+var GoTest_KIND_name = map[int32]string{
+ 0: "VOID",
+ 1: "BOOL",
+ 2: "BYTES",
+ 3: "FINGERPRINT",
+ 4: "FLOAT",
+ 5: "INT",
+ 6: "STRING",
+ 7: "TIME",
+ 8: "TUPLE",
+ 9: "ARRAY",
+ 10: "MAP",
+ 11: "TABLE",
+ 12: "FUNCTION",
+}
+var GoTest_KIND_value = map[string]int32{
+ "VOID": 0,
+ "BOOL": 1,
+ "BYTES": 2,
+ "FINGERPRINT": 3,
+ "FLOAT": 4,
+ "INT": 5,
+ "STRING": 6,
+ "TIME": 7,
+ "TUPLE": 8,
+ "ARRAY": 9,
+ "MAP": 10,
+ "TABLE": 11,
+ "FUNCTION": 12,
+}
+
+func (x GoTest_KIND) Enum() *GoTest_KIND {
+ p := new(GoTest_KIND)
+ *p = x
+ return p
+}
+func (x GoTest_KIND) String() string {
+ return proto.EnumName(GoTest_KIND_name, int32(x))
+}
+func (x *GoTest_KIND) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(GoTest_KIND_value, data, "GoTest_KIND")
+ if err != nil {
+ return err
+ }
+ *x = GoTest_KIND(value)
+ return nil
+}
+
+type MyMessage_Color int32
+
+const (
+ MyMessage_RED MyMessage_Color = 0
+ MyMessage_GREEN MyMessage_Color = 1
+ MyMessage_BLUE MyMessage_Color = 2
+)
+
+var MyMessage_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var MyMessage_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x MyMessage_Color) Enum() *MyMessage_Color {
+ p := new(MyMessage_Color)
+ *p = x
+ return p
+}
+func (x MyMessage_Color) String() string {
+ return proto.EnumName(MyMessage_Color_name, int32(x))
+}
+func (x *MyMessage_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MyMessage_Color_value, data, "MyMessage_Color")
+ if err != nil {
+ return err
+ }
+ *x = MyMessage_Color(value)
+ return nil
+}
+
+type Defaults_Color int32
+
+const (
+ Defaults_RED Defaults_Color = 0
+ Defaults_GREEN Defaults_Color = 1
+ Defaults_BLUE Defaults_Color = 2
+)
+
+var Defaults_Color_name = map[int32]string{
+ 0: "RED",
+ 1: "GREEN",
+ 2: "BLUE",
+}
+var Defaults_Color_value = map[string]int32{
+ "RED": 0,
+ "GREEN": 1,
+ "BLUE": 2,
+}
+
+func (x Defaults_Color) Enum() *Defaults_Color {
+ p := new(Defaults_Color)
+ *p = x
+ return p
+}
+func (x Defaults_Color) String() string {
+ return proto.EnumName(Defaults_Color_name, int32(x))
+}
+func (x *Defaults_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Defaults_Color_value, data, "Defaults_Color")
+ if err != nil {
+ return err
+ }
+ *x = Defaults_Color(value)
+ return nil
+}
+
+type RepeatedEnum_Color int32
+
+const (
+ RepeatedEnum_RED RepeatedEnum_Color = 1
+)
+
+var RepeatedEnum_Color_name = map[int32]string{
+ 1: "RED",
+}
+var RepeatedEnum_Color_value = map[string]int32{
+ "RED": 1,
+}
+
+func (x RepeatedEnum_Color) Enum() *RepeatedEnum_Color {
+ p := new(RepeatedEnum_Color)
+ *p = x
+ return p
+}
+func (x RepeatedEnum_Color) String() string {
+ return proto.EnumName(RepeatedEnum_Color_name, int32(x))
+}
+func (x *RepeatedEnum_Color) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RepeatedEnum_Color_value, data, "RepeatedEnum_Color")
+ if err != nil {
+ return err
+ }
+ *x = RepeatedEnum_Color(value)
+ return nil
+}
+
+type GoEnum struct {
+ Foo *FOO `protobuf:"varint,1,req,name=foo,enum=testdata.FOO" json:"foo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoEnum) Reset() { *m = GoEnum{} }
+func (m *GoEnum) String() string { return proto.CompactTextString(m) }
+func (*GoEnum) ProtoMessage() {}
+
+func (m *GoEnum) GetFoo() FOO {
+ if m != nil && m.Foo != nil {
+ return *m.Foo
+ }
+ return FOO_FOO1
+}
+
+type GoTestField struct {
+ Label *string `protobuf:"bytes,1,req" json:"Label,omitempty"`
+ Type *string `protobuf:"bytes,2,req" json:"Type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTestField) Reset() { *m = GoTestField{} }
+func (m *GoTestField) String() string { return proto.CompactTextString(m) }
+func (*GoTestField) ProtoMessage() {}
+
+func (m *GoTestField) GetLabel() string {
+ if m != nil && m.Label != nil {
+ return *m.Label
+ }
+ return ""
+}
+
+func (m *GoTestField) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+type GoTest struct {
+ // Some typical parameters
+ Kind *GoTest_KIND `protobuf:"varint,1,req,enum=testdata.GoTest_KIND" json:"Kind,omitempty"`
+ Table *string `protobuf:"bytes,2,opt" json:"Table,omitempty"`
+ Param *int32 `protobuf:"varint,3,opt" json:"Param,omitempty"`
+ // Required, repeated and optional foreign fields.
+ RequiredField *GoTestField `protobuf:"bytes,4,req" json:"RequiredField,omitempty"`
+ RepeatedField []*GoTestField `protobuf:"bytes,5,rep" json:"RepeatedField,omitempty"`
+ OptionalField *GoTestField `protobuf:"bytes,6,opt" json:"OptionalField,omitempty"`
+ // Required fields of all basic types
+ F_BoolRequired *bool `protobuf:"varint,10,req,name=F_Bool_required" json:"F_Bool_required,omitempty"`
+ F_Int32Required *int32 `protobuf:"varint,11,req,name=F_Int32_required" json:"F_Int32_required,omitempty"`
+ F_Int64Required *int64 `protobuf:"varint,12,req,name=F_Int64_required" json:"F_Int64_required,omitempty"`
+ F_Fixed32Required *uint32 `protobuf:"fixed32,13,req,name=F_Fixed32_required" json:"F_Fixed32_required,omitempty"`
+ F_Fixed64Required *uint64 `protobuf:"fixed64,14,req,name=F_Fixed64_required" json:"F_Fixed64_required,omitempty"`
+ F_Uint32Required *uint32 `protobuf:"varint,15,req,name=F_Uint32_required" json:"F_Uint32_required,omitempty"`
+ F_Uint64Required *uint64 `protobuf:"varint,16,req,name=F_Uint64_required" json:"F_Uint64_required,omitempty"`
+ F_FloatRequired *float32 `protobuf:"fixed32,17,req,name=F_Float_required" json:"F_Float_required,omitempty"`
+ F_DoubleRequired *float64 `protobuf:"fixed64,18,req,name=F_Double_required" json:"F_Double_required,omitempty"`
+ F_StringRequired *string `protobuf:"bytes,19,req,name=F_String_required" json:"F_String_required,omitempty"`
+ F_BytesRequired []byte `protobuf:"bytes,101,req,name=F_Bytes_required" json:"F_Bytes_required,omitempty"`
+ F_Sint32Required *int32 `protobuf:"zigzag32,102,req,name=F_Sint32_required" json:"F_Sint32_required,omitempty"`
+ F_Sint64Required *int64 `protobuf:"zigzag64,103,req,name=F_Sint64_required" json:"F_Sint64_required,omitempty"`
+ // Repeated fields of all basic types
+ F_BoolRepeated []bool `protobuf:"varint,20,rep,name=F_Bool_repeated" json:"F_Bool_repeated,omitempty"`
+ F_Int32Repeated []int32 `protobuf:"varint,21,rep,name=F_Int32_repeated" json:"F_Int32_repeated,omitempty"`
+ F_Int64Repeated []int64 `protobuf:"varint,22,rep,name=F_Int64_repeated" json:"F_Int64_repeated,omitempty"`
+ F_Fixed32Repeated []uint32 `protobuf:"fixed32,23,rep,name=F_Fixed32_repeated" json:"F_Fixed32_repeated,omitempty"`
+ F_Fixed64Repeated []uint64 `protobuf:"fixed64,24,rep,name=F_Fixed64_repeated" json:"F_Fixed64_repeated,omitempty"`
+ F_Uint32Repeated []uint32 `protobuf:"varint,25,rep,name=F_Uint32_repeated" json:"F_Uint32_repeated,omitempty"`
+ F_Uint64Repeated []uint64 `protobuf:"varint,26,rep,name=F_Uint64_repeated" json:"F_Uint64_repeated,omitempty"`
+ F_FloatRepeated []float32 `protobuf:"fixed32,27,rep,name=F_Float_repeated" json:"F_Float_repeated,omitempty"`
+ F_DoubleRepeated []float64 `protobuf:"fixed64,28,rep,name=F_Double_repeated" json:"F_Double_repeated,omitempty"`
+ F_StringRepeated []string `protobuf:"bytes,29,rep,name=F_String_repeated" json:"F_String_repeated,omitempty"`
+ F_BytesRepeated [][]byte `protobuf:"bytes,201,rep,name=F_Bytes_repeated" json:"F_Bytes_repeated,omitempty"`
+ F_Sint32Repeated []int32 `protobuf:"zigzag32,202,rep,name=F_Sint32_repeated" json:"F_Sint32_repeated,omitempty"`
+ F_Sint64Repeated []int64 `protobuf:"zigzag64,203,rep,name=F_Sint64_repeated" json:"F_Sint64_repeated,omitempty"`
+ // Optional fields of all basic types
+ F_BoolOptional *bool `protobuf:"varint,30,opt,name=F_Bool_optional" json:"F_Bool_optional,omitempty"`
+ F_Int32Optional *int32 `protobuf:"varint,31,opt,name=F_Int32_optional" json:"F_Int32_optional,omitempty"`
+ F_Int64Optional *int64 `protobuf:"varint,32,opt,name=F_Int64_optional" json:"F_Int64_optional,omitempty"`
+ F_Fixed32Optional *uint32 `protobuf:"fixed32,33,opt,name=F_Fixed32_optional" json:"F_Fixed32_optional,omitempty"`
+ F_Fixed64Optional *uint64 `protobuf:"fixed64,34,opt,name=F_Fixed64_optional" json:"F_Fixed64_optional,omitempty"`
+ F_Uint32Optional *uint32 `protobuf:"varint,35,opt,name=F_Uint32_optional" json:"F_Uint32_optional,omitempty"`
+ F_Uint64Optional *uint64 `protobuf:"varint,36,opt,name=F_Uint64_optional" json:"F_Uint64_optional,omitempty"`
+ F_FloatOptional *float32 `protobuf:"fixed32,37,opt,name=F_Float_optional" json:"F_Float_optional,omitempty"`
+ F_DoubleOptional *float64 `protobuf:"fixed64,38,opt,name=F_Double_optional" json:"F_Double_optional,omitempty"`
+ F_StringOptional *string `protobuf:"bytes,39,opt,name=F_String_optional" json:"F_String_optional,omitempty"`
+ F_BytesOptional []byte `protobuf:"bytes,301,opt,name=F_Bytes_optional" json:"F_Bytes_optional,omitempty"`
+ F_Sint32Optional *int32 `protobuf:"zigzag32,302,opt,name=F_Sint32_optional" json:"F_Sint32_optional,omitempty"`
+ F_Sint64Optional *int64 `protobuf:"zigzag64,303,opt,name=F_Sint64_optional" json:"F_Sint64_optional,omitempty"`
+ // Default-valued fields of all basic types
+ F_BoolDefaulted *bool `protobuf:"varint,40,opt,name=F_Bool_defaulted,def=1" json:"F_Bool_defaulted,omitempty"`
+ F_Int32Defaulted *int32 `protobuf:"varint,41,opt,name=F_Int32_defaulted,def=32" json:"F_Int32_defaulted,omitempty"`
+ F_Int64Defaulted *int64 `protobuf:"varint,42,opt,name=F_Int64_defaulted,def=64" json:"F_Int64_defaulted,omitempty"`
+ F_Fixed32Defaulted *uint32 `protobuf:"fixed32,43,opt,name=F_Fixed32_defaulted,def=320" json:"F_Fixed32_defaulted,omitempty"`
+ F_Fixed64Defaulted *uint64 `protobuf:"fixed64,44,opt,name=F_Fixed64_defaulted,def=640" json:"F_Fixed64_defaulted,omitempty"`
+ F_Uint32Defaulted *uint32 `protobuf:"varint,45,opt,name=F_Uint32_defaulted,def=3200" json:"F_Uint32_defaulted,omitempty"`
+ F_Uint64Defaulted *uint64 `protobuf:"varint,46,opt,name=F_Uint64_defaulted,def=6400" json:"F_Uint64_defaulted,omitempty"`
+ F_FloatDefaulted *float32 `protobuf:"fixed32,47,opt,name=F_Float_defaulted,def=314159" json:"F_Float_defaulted,omitempty"`
+ F_DoubleDefaulted *float64 `protobuf:"fixed64,48,opt,name=F_Double_defaulted,def=271828" json:"F_Double_defaulted,omitempty"`
+ F_StringDefaulted *string `protobuf:"bytes,49,opt,name=F_String_defaulted,def=hello, \"world!\"\n" json:"F_String_defaulted,omitempty"`
+ F_BytesDefaulted []byte `protobuf:"bytes,401,opt,name=F_Bytes_defaulted,def=Bignose" json:"F_Bytes_defaulted,omitempty"`
+ F_Sint32Defaulted *int32 `protobuf:"zigzag32,402,opt,name=F_Sint32_defaulted,def=-32" json:"F_Sint32_defaulted,omitempty"`
+ F_Sint64Defaulted *int64 `protobuf:"zigzag64,403,opt,name=F_Sint64_defaulted,def=-64" json:"F_Sint64_defaulted,omitempty"`
+ // Packed repeated fields (no string or bytes).
+ F_BoolRepeatedPacked []bool `protobuf:"varint,50,rep,packed,name=F_Bool_repeated_packed" json:"F_Bool_repeated_packed,omitempty"`
+ F_Int32RepeatedPacked []int32 `protobuf:"varint,51,rep,packed,name=F_Int32_repeated_packed" json:"F_Int32_repeated_packed,omitempty"`
+ F_Int64RepeatedPacked []int64 `protobuf:"varint,52,rep,packed,name=F_Int64_repeated_packed" json:"F_Int64_repeated_packed,omitempty"`
+ F_Fixed32RepeatedPacked []uint32 `protobuf:"fixed32,53,rep,packed,name=F_Fixed32_repeated_packed" json:"F_Fixed32_repeated_packed,omitempty"`
+ F_Fixed64RepeatedPacked []uint64 `protobuf:"fixed64,54,rep,packed,name=F_Fixed64_repeated_packed" json:"F_Fixed64_repeated_packed,omitempty"`
+ F_Uint32RepeatedPacked []uint32 `protobuf:"varint,55,rep,packed,name=F_Uint32_repeated_packed" json:"F_Uint32_repeated_packed,omitempty"`
+ F_Uint64RepeatedPacked []uint64 `protobuf:"varint,56,rep,packed,name=F_Uint64_repeated_packed" json:"F_Uint64_repeated_packed,omitempty"`
+ F_FloatRepeatedPacked []float32 `protobuf:"fixed32,57,rep,packed,name=F_Float_repeated_packed" json:"F_Float_repeated_packed,omitempty"`
+ F_DoubleRepeatedPacked []float64 `protobuf:"fixed64,58,rep,packed,name=F_Double_repeated_packed" json:"F_Double_repeated_packed,omitempty"`
+ F_Sint32RepeatedPacked []int32 `protobuf:"zigzag32,502,rep,packed,name=F_Sint32_repeated_packed" json:"F_Sint32_repeated_packed,omitempty"`
+ F_Sint64RepeatedPacked []int64 `protobuf:"zigzag64,503,rep,packed,name=F_Sint64_repeated_packed" json:"F_Sint64_repeated_packed,omitempty"`
+ Requiredgroup *GoTest_RequiredGroup `protobuf:"group,70,req,name=RequiredGroup" json:"requiredgroup,omitempty"`
+ Repeatedgroup []*GoTest_RepeatedGroup `protobuf:"group,80,rep,name=RepeatedGroup" json:"repeatedgroup,omitempty"`
+ Optionalgroup *GoTest_OptionalGroup `protobuf:"group,90,opt,name=OptionalGroup" json:"optionalgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest) Reset() { *m = GoTest{} }
+func (m *GoTest) String() string { return proto.CompactTextString(m) }
+func (*GoTest) ProtoMessage() {}
+
+const Default_GoTest_F_BoolDefaulted bool = true
+const Default_GoTest_F_Int32Defaulted int32 = 32
+const Default_GoTest_F_Int64Defaulted int64 = 64
+const Default_GoTest_F_Fixed32Defaulted uint32 = 320
+const Default_GoTest_F_Fixed64Defaulted uint64 = 640
+const Default_GoTest_F_Uint32Defaulted uint32 = 3200
+const Default_GoTest_F_Uint64Defaulted uint64 = 6400
+const Default_GoTest_F_FloatDefaulted float32 = 314159
+const Default_GoTest_F_DoubleDefaulted float64 = 271828
+const Default_GoTest_F_StringDefaulted string = "hello, \"world!\"\n"
+
+var Default_GoTest_F_BytesDefaulted []byte = []byte("Bignose")
+
+const Default_GoTest_F_Sint32Defaulted int32 = -32
+const Default_GoTest_F_Sint64Defaulted int64 = -64
+
+func (m *GoTest) GetKind() GoTest_KIND {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return GoTest_VOID
+}
+
+func (m *GoTest) GetTable() string {
+ if m != nil && m.Table != nil {
+ return *m.Table
+ }
+ return ""
+}
+
+func (m *GoTest) GetParam() int32 {
+ if m != nil && m.Param != nil {
+ return *m.Param
+ }
+ return 0
+}
+
+func (m *GoTest) GetRequiredField() *GoTestField {
+ if m != nil {
+ return m.RequiredField
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedField() []*GoTestField {
+ if m != nil {
+ return m.RepeatedField
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalField() *GoTestField {
+ if m != nil {
+ return m.OptionalField
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolRequired() bool {
+ if m != nil && m.F_BoolRequired != nil {
+ return *m.F_BoolRequired
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Required() int32 {
+ if m != nil && m.F_Int32Required != nil {
+ return *m.F_Int32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Required() int64 {
+ if m != nil && m.F_Int64Required != nil {
+ return *m.F_Int64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Required() uint32 {
+ if m != nil && m.F_Fixed32Required != nil {
+ return *m.F_Fixed32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Required() uint64 {
+ if m != nil && m.F_Fixed64Required != nil {
+ return *m.F_Fixed64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Required() uint32 {
+ if m != nil && m.F_Uint32Required != nil {
+ return *m.F_Uint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Required() uint64 {
+ if m != nil && m.F_Uint64Required != nil {
+ return *m.F_Uint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatRequired() float32 {
+ if m != nil && m.F_FloatRequired != nil {
+ return *m.F_FloatRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleRequired() float64 {
+ if m != nil && m.F_DoubleRequired != nil {
+ return *m.F_DoubleRequired
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringRequired() string {
+ if m != nil && m.F_StringRequired != nil {
+ return *m.F_StringRequired
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesRequired() []byte {
+ if m != nil {
+ return m.F_BytesRequired
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Required() int32 {
+ if m != nil && m.F_Sint32Required != nil {
+ return *m.F_Sint32Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Required() int64 {
+ if m != nil && m.F_Sint64Required != nil {
+ return *m.F_Sint64Required
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolRepeated() []bool {
+ if m != nil {
+ return m.F_BoolRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32Repeated() []int32 {
+ if m != nil {
+ return m.F_Int32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64Repeated() []int64 {
+ if m != nil {
+ return m.F_Int64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Fixed32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Fixed64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32Repeated() []uint32 {
+ if m != nil {
+ return m.F_Uint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64Repeated() []uint64 {
+ if m != nil {
+ return m.F_Uint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeated() []float32 {
+ if m != nil {
+ return m.F_FloatRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeated() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_StringRepeated() []string {
+ if m != nil {
+ return m.F_StringRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BytesRepeated() [][]byte {
+ if m != nil {
+ return m.F_BytesRepeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Repeated() []int32 {
+ if m != nil {
+ return m.F_Sint32Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64Repeated() []int64 {
+ if m != nil {
+ return m.F_Sint64Repeated
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_BoolOptional() bool {
+ if m != nil && m.F_BoolOptional != nil {
+ return *m.F_BoolOptional
+ }
+ return false
+}
+
+func (m *GoTest) GetF_Int32Optional() int32 {
+ if m != nil && m.F_Int32Optional != nil {
+ return *m.F_Int32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Int64Optional() int64 {
+ if m != nil && m.F_Int64Optional != nil {
+ return *m.F_Int64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed32Optional() uint32 {
+ if m != nil && m.F_Fixed32Optional != nil {
+ return *m.F_Fixed32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Fixed64Optional() uint64 {
+ if m != nil && m.F_Fixed64Optional != nil {
+ return *m.F_Fixed64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint32Optional() uint32 {
+ if m != nil && m.F_Uint32Optional != nil {
+ return *m.F_Uint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Uint64Optional() uint64 {
+ if m != nil && m.F_Uint64Optional != nil {
+ return *m.F_Uint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_FloatOptional() float32 {
+ if m != nil && m.F_FloatOptional != nil {
+ return *m.F_FloatOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_DoubleOptional() float64 {
+ if m != nil && m.F_DoubleOptional != nil {
+ return *m.F_DoubleOptional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_StringOptional() string {
+ if m != nil && m.F_StringOptional != nil {
+ return *m.F_StringOptional
+ }
+ return ""
+}
+
+func (m *GoTest) GetF_BytesOptional() []byte {
+ if m != nil {
+ return m.F_BytesOptional
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32Optional() int32 {
+ if m != nil && m.F_Sint32Optional != nil {
+ return *m.F_Sint32Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_Sint64Optional() int64 {
+ if m != nil && m.F_Sint64Optional != nil {
+ return *m.F_Sint64Optional
+ }
+ return 0
+}
+
+func (m *GoTest) GetF_BoolDefaulted() bool {
+ if m != nil && m.F_BoolDefaulted != nil {
+ return *m.F_BoolDefaulted
+ }
+ return Default_GoTest_F_BoolDefaulted
+}
+
+func (m *GoTest) GetF_Int32Defaulted() int32 {
+ if m != nil && m.F_Int32Defaulted != nil {
+ return *m.F_Int32Defaulted
+ }
+ return Default_GoTest_F_Int32Defaulted
+}
+
+func (m *GoTest) GetF_Int64Defaulted() int64 {
+ if m != nil && m.F_Int64Defaulted != nil {
+ return *m.F_Int64Defaulted
+ }
+ return Default_GoTest_F_Int64Defaulted
+}
+
+func (m *GoTest) GetF_Fixed32Defaulted() uint32 {
+ if m != nil && m.F_Fixed32Defaulted != nil {
+ return *m.F_Fixed32Defaulted
+ }
+ return Default_GoTest_F_Fixed32Defaulted
+}
+
+func (m *GoTest) GetF_Fixed64Defaulted() uint64 {
+ if m != nil && m.F_Fixed64Defaulted != nil {
+ return *m.F_Fixed64Defaulted
+ }
+ return Default_GoTest_F_Fixed64Defaulted
+}
+
+func (m *GoTest) GetF_Uint32Defaulted() uint32 {
+ if m != nil && m.F_Uint32Defaulted != nil {
+ return *m.F_Uint32Defaulted
+ }
+ return Default_GoTest_F_Uint32Defaulted
+}
+
+func (m *GoTest) GetF_Uint64Defaulted() uint64 {
+ if m != nil && m.F_Uint64Defaulted != nil {
+ return *m.F_Uint64Defaulted
+ }
+ return Default_GoTest_F_Uint64Defaulted
+}
+
+func (m *GoTest) GetF_FloatDefaulted() float32 {
+ if m != nil && m.F_FloatDefaulted != nil {
+ return *m.F_FloatDefaulted
+ }
+ return Default_GoTest_F_FloatDefaulted
+}
+
+func (m *GoTest) GetF_DoubleDefaulted() float64 {
+ if m != nil && m.F_DoubleDefaulted != nil {
+ return *m.F_DoubleDefaulted
+ }
+ return Default_GoTest_F_DoubleDefaulted
+}
+
+func (m *GoTest) GetF_StringDefaulted() string {
+ if m != nil && m.F_StringDefaulted != nil {
+ return *m.F_StringDefaulted
+ }
+ return Default_GoTest_F_StringDefaulted
+}
+
+func (m *GoTest) GetF_BytesDefaulted() []byte {
+ if m != nil && m.F_BytesDefaulted != nil {
+ return m.F_BytesDefaulted
+ }
+ return append([]byte(nil), Default_GoTest_F_BytesDefaulted...)
+}
+
+func (m *GoTest) GetF_Sint32Defaulted() int32 {
+ if m != nil && m.F_Sint32Defaulted != nil {
+ return *m.F_Sint32Defaulted
+ }
+ return Default_GoTest_F_Sint32Defaulted
+}
+
+func (m *GoTest) GetF_Sint64Defaulted() int64 {
+ if m != nil && m.F_Sint64Defaulted != nil {
+ return *m.F_Sint64Defaulted
+ }
+ return Default_GoTest_F_Sint64Defaulted
+}
+
+func (m *GoTest) GetF_BoolRepeatedPacked() []bool {
+ if m != nil {
+ return m.F_BoolRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Int32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Int64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Int64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Fixed32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Fixed64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Fixed64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint32RepeatedPacked() []uint32 {
+ if m != nil {
+ return m.F_Uint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Uint64RepeatedPacked() []uint64 {
+ if m != nil {
+ return m.F_Uint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_FloatRepeatedPacked() []float32 {
+ if m != nil {
+ return m.F_FloatRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_DoubleRepeatedPacked() []float64 {
+ if m != nil {
+ return m.F_DoubleRepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint32RepeatedPacked() []int32 {
+ if m != nil {
+ return m.F_Sint32RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetF_Sint64RepeatedPacked() []int64 {
+ if m != nil {
+ return m.F_Sint64RepeatedPacked
+ }
+ return nil
+}
+
+func (m *GoTest) GetRequiredgroup() *GoTest_RequiredGroup {
+ if m != nil {
+ return m.Requiredgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetRepeatedgroup() []*GoTest_RepeatedGroup {
+ if m != nil {
+ return m.Repeatedgroup
+ }
+ return nil
+}
+
+func (m *GoTest) GetOptionalgroup() *GoTest_OptionalGroup {
+ if m != nil {
+ return m.Optionalgroup
+ }
+ return nil
+}
+
+// Required, repeated, and optional groups.
+type GoTest_RequiredGroup struct {
+ RequiredField *string `protobuf:"bytes,71,req" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RequiredGroup) Reset() { *m = GoTest_RequiredGroup{} }
+func (m *GoTest_RequiredGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RequiredGroup) ProtoMessage() {}
+
+func (m *GoTest_RequiredGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_RepeatedGroup struct {
+ RequiredField *string `protobuf:"bytes,81,req" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_RepeatedGroup) Reset() { *m = GoTest_RepeatedGroup{} }
+func (m *GoTest_RepeatedGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_RepeatedGroup) ProtoMessage() {}
+
+func (m *GoTest_RepeatedGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+type GoTest_OptionalGroup struct {
+ RequiredField *string `protobuf:"bytes,91,req" json:"RequiredField,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoTest_OptionalGroup) Reset() { *m = GoTest_OptionalGroup{} }
+func (m *GoTest_OptionalGroup) String() string { return proto.CompactTextString(m) }
+func (*GoTest_OptionalGroup) ProtoMessage() {}
+
+func (m *GoTest_OptionalGroup) GetRequiredField() string {
+ if m != nil && m.RequiredField != nil {
+ return *m.RequiredField
+ }
+ return ""
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+type GoSkipTest struct {
+ SkipInt32 *int32 `protobuf:"varint,11,req,name=skip_int32" json:"skip_int32,omitempty"`
+ SkipFixed32 *uint32 `protobuf:"fixed32,12,req,name=skip_fixed32" json:"skip_fixed32,omitempty"`
+ SkipFixed64 *uint64 `protobuf:"fixed64,13,req,name=skip_fixed64" json:"skip_fixed64,omitempty"`
+ SkipString *string `protobuf:"bytes,14,req,name=skip_string" json:"skip_string,omitempty"`
+ Skipgroup *GoSkipTest_SkipGroup `protobuf:"group,15,req,name=SkipGroup" json:"skipgroup,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest) Reset() { *m = GoSkipTest{} }
+func (m *GoSkipTest) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest) ProtoMessage() {}
+
+func (m *GoSkipTest) GetSkipInt32() int32 {
+ if m != nil && m.SkipInt32 != nil {
+ return *m.SkipInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed32() uint32 {
+ if m != nil && m.SkipFixed32 != nil {
+ return *m.SkipFixed32
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipFixed64() uint64 {
+ if m != nil && m.SkipFixed64 != nil {
+ return *m.SkipFixed64
+ }
+ return 0
+}
+
+func (m *GoSkipTest) GetSkipString() string {
+ if m != nil && m.SkipString != nil {
+ return *m.SkipString
+ }
+ return ""
+}
+
+func (m *GoSkipTest) GetSkipgroup() *GoSkipTest_SkipGroup {
+ if m != nil {
+ return m.Skipgroup
+ }
+ return nil
+}
+
+type GoSkipTest_SkipGroup struct {
+ GroupInt32 *int32 `protobuf:"varint,16,req,name=group_int32" json:"group_int32,omitempty"`
+ GroupString *string `protobuf:"bytes,17,req,name=group_string" json:"group_string,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GoSkipTest_SkipGroup) Reset() { *m = GoSkipTest_SkipGroup{} }
+func (m *GoSkipTest_SkipGroup) String() string { return proto.CompactTextString(m) }
+func (*GoSkipTest_SkipGroup) ProtoMessage() {}
+
+func (m *GoSkipTest_SkipGroup) GetGroupInt32() int32 {
+ if m != nil && m.GroupInt32 != nil {
+ return *m.GroupInt32
+ }
+ return 0
+}
+
+func (m *GoSkipTest_SkipGroup) GetGroupString() string {
+ if m != nil && m.GroupString != nil {
+ return *m.GroupString
+ }
+ return ""
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+type NonPackedTest struct {
+ A []int32 `protobuf:"varint,1,rep,name=a" json:"a,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NonPackedTest) Reset() { *m = NonPackedTest{} }
+func (m *NonPackedTest) String() string { return proto.CompactTextString(m) }
+func (*NonPackedTest) ProtoMessage() {}
+
+func (m *NonPackedTest) GetA() []int32 {
+ if m != nil {
+ return m.A
+ }
+ return nil
+}
+
+type PackedTest struct {
+ B []int32 `protobuf:"varint,1,rep,packed,name=b" json:"b,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PackedTest) Reset() { *m = PackedTest{} }
+func (m *PackedTest) String() string { return proto.CompactTextString(m) }
+func (*PackedTest) ProtoMessage() {}
+
+func (m *PackedTest) GetB() []int32 {
+ if m != nil {
+ return m.B
+ }
+ return nil
+}
+
+type MaxTag struct {
+ // Maximum possible tag number.
+ LastField *string `protobuf:"bytes,536870911,opt,name=last_field" json:"last_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MaxTag) Reset() { *m = MaxTag{} }
+func (m *MaxTag) String() string { return proto.CompactTextString(m) }
+func (*MaxTag) ProtoMessage() {}
+
+func (m *MaxTag) GetLastField() string {
+ if m != nil && m.LastField != nil {
+ return *m.LastField
+ }
+ return ""
+}
+
+type OldMessage struct {
+ Nested *OldMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ Num *int32 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage) Reset() { *m = OldMessage{} }
+func (m *OldMessage) String() string { return proto.CompactTextString(m) }
+func (*OldMessage) ProtoMessage() {}
+
+func (m *OldMessage) GetNested() *OldMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *OldMessage) GetNum() int32 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type OldMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OldMessage_Nested) Reset() { *m = OldMessage_Nested{} }
+func (m *OldMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*OldMessage_Nested) ProtoMessage() {}
+
+func (m *OldMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+type NewMessage struct {
+ Nested *NewMessage_Nested `protobuf:"bytes,1,opt,name=nested" json:"nested,omitempty"`
+ // This is an int32 in OldMessage.
+ Num *int64 `protobuf:"varint,2,opt,name=num" json:"num,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage) Reset() { *m = NewMessage{} }
+func (m *NewMessage) String() string { return proto.CompactTextString(m) }
+func (*NewMessage) ProtoMessage() {}
+
+func (m *NewMessage) GetNested() *NewMessage_Nested {
+ if m != nil {
+ return m.Nested
+ }
+ return nil
+}
+
+func (m *NewMessage) GetNum() int64 {
+ if m != nil && m.Num != nil {
+ return *m.Num
+ }
+ return 0
+}
+
+type NewMessage_Nested struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ FoodGroup *string `protobuf:"bytes,2,opt,name=food_group" json:"food_group,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NewMessage_Nested) Reset() { *m = NewMessage_Nested{} }
+func (m *NewMessage_Nested) String() string { return proto.CompactTextString(m) }
+func (*NewMessage_Nested) ProtoMessage() {}
+
+func (m *NewMessage_Nested) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *NewMessage_Nested) GetFoodGroup() string {
+ if m != nil && m.FoodGroup != nil {
+ return *m.FoodGroup
+ }
+ return ""
+}
+
+type InnerMessage struct {
+ Host *string `protobuf:"bytes,1,req,name=host" json:"host,omitempty"`
+ Port *int32 `protobuf:"varint,2,opt,name=port,def=4000" json:"port,omitempty"`
+ Connected *bool `protobuf:"varint,3,opt,name=connected" json:"connected,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InnerMessage) Reset() { *m = InnerMessage{} }
+func (m *InnerMessage) String() string { return proto.CompactTextString(m) }
+func (*InnerMessage) ProtoMessage() {}
+
+const Default_InnerMessage_Port int32 = 4000
+
+func (m *InnerMessage) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *InnerMessage) GetPort() int32 {
+ if m != nil && m.Port != nil {
+ return *m.Port
+ }
+ return Default_InnerMessage_Port
+}
+
+func (m *InnerMessage) GetConnected() bool {
+ if m != nil && m.Connected != nil {
+ return *m.Connected
+ }
+ return false
+}
+
+type OtherMessage struct {
+ Key *int64 `protobuf:"varint,1,opt,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Weight *float32 `protobuf:"fixed32,3,opt,name=weight" json:"weight,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,4,opt,name=inner" json:"inner,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OtherMessage) Reset() { *m = OtherMessage{} }
+func (m *OtherMessage) String() string { return proto.CompactTextString(m) }
+func (*OtherMessage) ProtoMessage() {}
+
+func (m *OtherMessage) GetKey() int64 {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *OtherMessage) GetWeight() float32 {
+ if m != nil && m.Weight != nil {
+ return *m.Weight
+ }
+ return 0
+}
+
+func (m *OtherMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+type MyMessage struct {
+ Count *int32 `protobuf:"varint,1,req,name=count" json:"count,omitempty"`
+ Name *string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"`
+ Quote *string `protobuf:"bytes,3,opt,name=quote" json:"quote,omitempty"`
+ Pet []string `protobuf:"bytes,4,rep,name=pet" json:"pet,omitempty"`
+ Inner *InnerMessage `protobuf:"bytes,5,opt,name=inner" json:"inner,omitempty"`
+ Others []*OtherMessage `protobuf:"bytes,6,rep,name=others" json:"others,omitempty"`
+ RepInner []*InnerMessage `protobuf:"bytes,12,rep,name=rep_inner" json:"rep_inner,omitempty"`
+ Bikeshed *MyMessage_Color `protobuf:"varint,7,opt,name=bikeshed,enum=testdata.MyMessage_Color" json:"bikeshed,omitempty"`
+ Somegroup *MyMessage_SomeGroup `protobuf:"group,8,opt,name=SomeGroup" json:"somegroup,omitempty"`
+ // This field becomes [][]byte in the generated code.
+ RepBytes [][]byte `protobuf:"bytes,10,rep,name=rep_bytes" json:"rep_bytes,omitempty"`
+ Bigfloat *float64 `protobuf:"fixed64,11,opt,name=bigfloat" json:"bigfloat,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage) Reset() { *m = MyMessage{} }
+func (m *MyMessage) String() string { return proto.CompactTextString(m) }
+func (*MyMessage) ProtoMessage() {}
+
+var extRange_MyMessage = []proto.ExtensionRange{
+ {100, 536870911},
+}
+
+func (*MyMessage) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessage
+}
+func (m *MyMessage) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *MyMessage) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *MyMessage) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MyMessage) GetQuote() string {
+ if m != nil && m.Quote != nil {
+ return *m.Quote
+ }
+ return ""
+}
+
+func (m *MyMessage) GetPet() []string {
+ if m != nil {
+ return m.Pet
+ }
+ return nil
+}
+
+func (m *MyMessage) GetInner() *InnerMessage {
+ if m != nil {
+ return m.Inner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetOthers() []*OtherMessage {
+ if m != nil {
+ return m.Others
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepInner() []*InnerMessage {
+ if m != nil {
+ return m.RepInner
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBikeshed() MyMessage_Color {
+ if m != nil && m.Bikeshed != nil {
+ return *m.Bikeshed
+ }
+ return MyMessage_RED
+}
+
+func (m *MyMessage) GetSomegroup() *MyMessage_SomeGroup {
+ if m != nil {
+ return m.Somegroup
+ }
+ return nil
+}
+
+func (m *MyMessage) GetRepBytes() [][]byte {
+ if m != nil {
+ return m.RepBytes
+ }
+ return nil
+}
+
+func (m *MyMessage) GetBigfloat() float64 {
+ if m != nil && m.Bigfloat != nil {
+ return *m.Bigfloat
+ }
+ return 0
+}
+
+type MyMessage_SomeGroup struct {
+ GroupField *int32 `protobuf:"varint,9,opt,name=group_field" json:"group_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessage_SomeGroup) Reset() { *m = MyMessage_SomeGroup{} }
+func (m *MyMessage_SomeGroup) String() string { return proto.CompactTextString(m) }
+func (*MyMessage_SomeGroup) ProtoMessage() {}
+
+func (m *MyMessage_SomeGroup) GetGroupField() int32 {
+ if m != nil && m.GroupField != nil {
+ return *m.GroupField
+ }
+ return 0
+}
+
+type Ext struct {
+ Data *string `protobuf:"bytes,1,opt,name=data" json:"data,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Ext) Reset() { *m = Ext{} }
+func (m *Ext) String() string { return proto.CompactTextString(m) }
+func (*Ext) ProtoMessage() {}
+
+func (m *Ext) GetData() string {
+ if m != nil && m.Data != nil {
+ return *m.Data
+ }
+ return ""
+}
+
+var E_Ext_More = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*Ext)(nil),
+ Field: 103,
+ Name: "testdata.Ext.more",
+ Tag: "bytes,103,opt,name=more",
+}
+
+var E_Ext_Text = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*string)(nil),
+ Field: 104,
+ Name: "testdata.Ext.text",
+ Tag: "bytes,104,opt,name=text",
+}
+
+var E_Ext_Number = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: (*int32)(nil),
+ Field: 105,
+ Name: "testdata.Ext.number",
+ Tag: "varint,105,opt,name=number",
+}
+
+type MyMessageSet struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MyMessageSet) Reset() { *m = MyMessageSet{} }
+func (m *MyMessageSet) String() string { return proto.CompactTextString(m) }
+func (*MyMessageSet) ProtoMessage() {}
+
+func (m *MyMessageSet) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *MyMessageSet) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *MyMessageSet) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *MyMessageSet) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure MyMessageSet satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*MyMessageSet)(nil)
+var _ proto.Unmarshaler = (*MyMessageSet)(nil)
+
+var extRange_MyMessageSet = []proto.ExtensionRange{
+ {100, 2147483646},
+}
+
+func (*MyMessageSet) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_MyMessageSet
+}
+func (m *MyMessageSet) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type Empty struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Empty) Reset() { *m = Empty{} }
+func (m *Empty) String() string { return proto.CompactTextString(m) }
+func (*Empty) ProtoMessage() {}
+
+type MessageList struct {
+ Message []*MessageList_Message `protobuf:"group,1,rep" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList) Reset() { *m = MessageList{} }
+func (m *MessageList) String() string { return proto.CompactTextString(m) }
+func (*MessageList) ProtoMessage() {}
+
+func (m *MessageList) GetMessage() []*MessageList_Message {
+ if m != nil {
+ return m.Message
+ }
+ return nil
+}
+
+type MessageList_Message struct {
+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,3,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageList_Message) Reset() { *m = MessageList_Message{} }
+func (m *MessageList_Message) String() string { return proto.CompactTextString(m) }
+func (*MessageList_Message) ProtoMessage() {}
+
+func (m *MessageList_Message) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MessageList_Message) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type Strings struct {
+ StringField *string `protobuf:"bytes,1,opt,name=string_field" json:"string_field,omitempty"`
+ BytesField []byte `protobuf:"bytes,2,opt,name=bytes_field" json:"bytes_field,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Strings) Reset() { *m = Strings{} }
+func (m *Strings) String() string { return proto.CompactTextString(m) }
+func (*Strings) ProtoMessage() {}
+
+func (m *Strings) GetStringField() string {
+ if m != nil && m.StringField != nil {
+ return *m.StringField
+ }
+ return ""
+}
+
+func (m *Strings) GetBytesField() []byte {
+ if m != nil {
+ return m.BytesField
+ }
+ return nil
+}
+
+type Defaults struct {
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ F_Bool *bool `protobuf:"varint,1,opt,def=1" json:"F_Bool,omitempty"`
+ F_Int32 *int32 `protobuf:"varint,2,opt,def=32" json:"F_Int32,omitempty"`
+ F_Int64 *int64 `protobuf:"varint,3,opt,def=64" json:"F_Int64,omitempty"`
+ F_Fixed32 *uint32 `protobuf:"fixed32,4,opt,def=320" json:"F_Fixed32,omitempty"`
+ F_Fixed64 *uint64 `protobuf:"fixed64,5,opt,def=640" json:"F_Fixed64,omitempty"`
+ F_Uint32 *uint32 `protobuf:"varint,6,opt,def=3200" json:"F_Uint32,omitempty"`
+ F_Uint64 *uint64 `protobuf:"varint,7,opt,def=6400" json:"F_Uint64,omitempty"`
+ F_Float *float32 `protobuf:"fixed32,8,opt,def=314159" json:"F_Float,omitempty"`
+ F_Double *float64 `protobuf:"fixed64,9,opt,def=271828" json:"F_Double,omitempty"`
+ F_String *string `protobuf:"bytes,10,opt,def=hello, \"world!\"\n" json:"F_String,omitempty"`
+ F_Bytes []byte `protobuf:"bytes,11,opt,def=Bignose" json:"F_Bytes,omitempty"`
+ F_Sint32 *int32 `protobuf:"zigzag32,12,opt,def=-32" json:"F_Sint32,omitempty"`
+ F_Sint64 *int64 `protobuf:"zigzag64,13,opt,def=-64" json:"F_Sint64,omitempty"`
+ F_Enum *Defaults_Color `protobuf:"varint,14,opt,enum=testdata.Defaults_Color,def=1" json:"F_Enum,omitempty"`
+ // More fields with crazy defaults.
+ F_Pinf *float32 `protobuf:"fixed32,15,opt,def=inf" json:"F_Pinf,omitempty"`
+ F_Ninf *float32 `protobuf:"fixed32,16,opt,def=-inf" json:"F_Ninf,omitempty"`
+ F_Nan *float32 `protobuf:"fixed32,17,opt,def=nan" json:"F_Nan,omitempty"`
+ // Sub-message.
+ Sub *SubDefaults `protobuf:"bytes,18,opt,name=sub" json:"sub,omitempty"`
+ // Redundant but explicit defaults.
+ StrZero *string `protobuf:"bytes,19,opt,name=str_zero,def=" json:"str_zero,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Defaults) Reset() { *m = Defaults{} }
+func (m *Defaults) String() string { return proto.CompactTextString(m) }
+func (*Defaults) ProtoMessage() {}
+
+const Default_Defaults_F_Bool bool = true
+const Default_Defaults_F_Int32 int32 = 32
+const Default_Defaults_F_Int64 int64 = 64
+const Default_Defaults_F_Fixed32 uint32 = 320
+const Default_Defaults_F_Fixed64 uint64 = 640
+const Default_Defaults_F_Uint32 uint32 = 3200
+const Default_Defaults_F_Uint64 uint64 = 6400
+const Default_Defaults_F_Float float32 = 314159
+const Default_Defaults_F_Double float64 = 271828
+const Default_Defaults_F_String string = "hello, \"world!\"\n"
+
+var Default_Defaults_F_Bytes []byte = []byte("Bignose")
+
+const Default_Defaults_F_Sint32 int32 = -32
+const Default_Defaults_F_Sint64 int64 = -64
+const Default_Defaults_F_Enum Defaults_Color = Defaults_GREEN
+
+var Default_Defaults_F_Pinf float32 = float32(math.Inf(1))
+var Default_Defaults_F_Ninf float32 = float32(math.Inf(-1))
+var Default_Defaults_F_Nan float32 = float32(math.NaN())
+
+func (m *Defaults) GetF_Bool() bool {
+ if m != nil && m.F_Bool != nil {
+ return *m.F_Bool
+ }
+ return Default_Defaults_F_Bool
+}
+
+func (m *Defaults) GetF_Int32() int32 {
+ if m != nil && m.F_Int32 != nil {
+ return *m.F_Int32
+ }
+ return Default_Defaults_F_Int32
+}
+
+func (m *Defaults) GetF_Int64() int64 {
+ if m != nil && m.F_Int64 != nil {
+ return *m.F_Int64
+ }
+ return Default_Defaults_F_Int64
+}
+
+func (m *Defaults) GetF_Fixed32() uint32 {
+ if m != nil && m.F_Fixed32 != nil {
+ return *m.F_Fixed32
+ }
+ return Default_Defaults_F_Fixed32
+}
+
+func (m *Defaults) GetF_Fixed64() uint64 {
+ if m != nil && m.F_Fixed64 != nil {
+ return *m.F_Fixed64
+ }
+ return Default_Defaults_F_Fixed64
+}
+
+func (m *Defaults) GetF_Uint32() uint32 {
+ if m != nil && m.F_Uint32 != nil {
+ return *m.F_Uint32
+ }
+ return Default_Defaults_F_Uint32
+}
+
+func (m *Defaults) GetF_Uint64() uint64 {
+ if m != nil && m.F_Uint64 != nil {
+ return *m.F_Uint64
+ }
+ return Default_Defaults_F_Uint64
+}
+
+func (m *Defaults) GetF_Float() float32 {
+ if m != nil && m.F_Float != nil {
+ return *m.F_Float
+ }
+ return Default_Defaults_F_Float
+}
+
+func (m *Defaults) GetF_Double() float64 {
+ if m != nil && m.F_Double != nil {
+ return *m.F_Double
+ }
+ return Default_Defaults_F_Double
+}
+
+func (m *Defaults) GetF_String() string {
+ if m != nil && m.F_String != nil {
+ return *m.F_String
+ }
+ return Default_Defaults_F_String
+}
+
+func (m *Defaults) GetF_Bytes() []byte {
+ if m != nil && m.F_Bytes != nil {
+ return m.F_Bytes
+ }
+ return append([]byte(nil), Default_Defaults_F_Bytes...)
+}
+
+func (m *Defaults) GetF_Sint32() int32 {
+ if m != nil && m.F_Sint32 != nil {
+ return *m.F_Sint32
+ }
+ return Default_Defaults_F_Sint32
+}
+
+func (m *Defaults) GetF_Sint64() int64 {
+ if m != nil && m.F_Sint64 != nil {
+ return *m.F_Sint64
+ }
+ return Default_Defaults_F_Sint64
+}
+
+func (m *Defaults) GetF_Enum() Defaults_Color {
+ if m != nil && m.F_Enum != nil {
+ return *m.F_Enum
+ }
+ return Default_Defaults_F_Enum
+}
+
+func (m *Defaults) GetF_Pinf() float32 {
+ if m != nil && m.F_Pinf != nil {
+ return *m.F_Pinf
+ }
+ return Default_Defaults_F_Pinf
+}
+
+func (m *Defaults) GetF_Ninf() float32 {
+ if m != nil && m.F_Ninf != nil {
+ return *m.F_Ninf
+ }
+ return Default_Defaults_F_Ninf
+}
+
+func (m *Defaults) GetF_Nan() float32 {
+ if m != nil && m.F_Nan != nil {
+ return *m.F_Nan
+ }
+ return Default_Defaults_F_Nan
+}
+
+func (m *Defaults) GetSub() *SubDefaults {
+ if m != nil {
+ return m.Sub
+ }
+ return nil
+}
+
+func (m *Defaults) GetStrZero() string {
+ if m != nil && m.StrZero != nil {
+ return *m.StrZero
+ }
+ return ""
+}
+
+type SubDefaults struct {
+ N *int64 `protobuf:"varint,1,opt,name=n,def=7" json:"n,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SubDefaults) Reset() { *m = SubDefaults{} }
+func (m *SubDefaults) String() string { return proto.CompactTextString(m) }
+func (*SubDefaults) ProtoMessage() {}
+
+const Default_SubDefaults_N int64 = 7
+
+func (m *SubDefaults) GetN() int64 {
+ if m != nil && m.N != nil {
+ return *m.N
+ }
+ return Default_SubDefaults_N
+}
+
+type RepeatedEnum struct {
+ Color []RepeatedEnum_Color `protobuf:"varint,1,rep,name=color,enum=testdata.RepeatedEnum_Color" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RepeatedEnum) Reset() { *m = RepeatedEnum{} }
+func (m *RepeatedEnum) String() string { return proto.CompactTextString(m) }
+func (*RepeatedEnum) ProtoMessage() {}
+
+func (m *RepeatedEnum) GetColor() []RepeatedEnum_Color {
+ if m != nil {
+ return m.Color
+ }
+ return nil
+}
+
+type MoreRepeated struct {
+ Bools []bool `protobuf:"varint,1,rep,name=bools" json:"bools,omitempty"`
+ BoolsPacked []bool `protobuf:"varint,2,rep,packed,name=bools_packed" json:"bools_packed,omitempty"`
+ Ints []int32 `protobuf:"varint,3,rep,name=ints" json:"ints,omitempty"`
+ IntsPacked []int32 `protobuf:"varint,4,rep,packed,name=ints_packed" json:"ints_packed,omitempty"`
+ Int64SPacked []int64 `protobuf:"varint,7,rep,packed,name=int64s_packed" json:"int64s_packed,omitempty"`
+ Strings []string `protobuf:"bytes,5,rep,name=strings" json:"strings,omitempty"`
+ Fixeds []uint32 `protobuf:"fixed32,6,rep,name=fixeds" json:"fixeds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MoreRepeated) Reset() { *m = MoreRepeated{} }
+func (m *MoreRepeated) String() string { return proto.CompactTextString(m) }
+func (*MoreRepeated) ProtoMessage() {}
+
+func (m *MoreRepeated) GetBools() []bool {
+ if m != nil {
+ return m.Bools
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetBoolsPacked() []bool {
+ if m != nil {
+ return m.BoolsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInts() []int32 {
+ if m != nil {
+ return m.Ints
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetIntsPacked() []int32 {
+ if m != nil {
+ return m.IntsPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetInt64SPacked() []int64 {
+ if m != nil {
+ return m.Int64SPacked
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetStrings() []string {
+ if m != nil {
+ return m.Strings
+ }
+ return nil
+}
+
+func (m *MoreRepeated) GetFixeds() []uint32 {
+ if m != nil {
+ return m.Fixeds
+ }
+ return nil
+}
+
+type GroupOld struct {
+ G *GroupOld_G `protobuf:"group,101,opt" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld) Reset() { *m = GroupOld{} }
+func (m *GroupOld) String() string { return proto.CompactTextString(m) }
+func (*GroupOld) ProtoMessage() {}
+
+func (m *GroupOld) GetG() *GroupOld_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupOld_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupOld_G) Reset() { *m = GroupOld_G{} }
+func (m *GroupOld_G) String() string { return proto.CompactTextString(m) }
+func (*GroupOld_G) ProtoMessage() {}
+
+func (m *GroupOld_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+type GroupNew struct {
+ G *GroupNew_G `protobuf:"group,101,opt" json:"g,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew) Reset() { *m = GroupNew{} }
+func (m *GroupNew) String() string { return proto.CompactTextString(m) }
+func (*GroupNew) ProtoMessage() {}
+
+func (m *GroupNew) GetG() *GroupNew_G {
+ if m != nil {
+ return m.G
+ }
+ return nil
+}
+
+type GroupNew_G struct {
+ X *int32 `protobuf:"varint,2,opt,name=x" json:"x,omitempty"`
+ Y *int32 `protobuf:"varint,3,opt,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GroupNew_G) Reset() { *m = GroupNew_G{} }
+func (m *GroupNew_G) String() string { return proto.CompactTextString(m) }
+func (*GroupNew_G) ProtoMessage() {}
+
+func (m *GroupNew_G) GetX() int32 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *GroupNew_G) GetY() int32 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type FloatingPoint struct {
+ F *float64 `protobuf:"fixed64,1,req,name=f" json:"f,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FloatingPoint) Reset() { *m = FloatingPoint{} }
+func (m *FloatingPoint) String() string { return proto.CompactTextString(m) }
+func (*FloatingPoint) ProtoMessage() {}
+
+func (m *FloatingPoint) GetF() float64 {
+ if m != nil && m.F != nil {
+ return *m.F
+ }
+ return 0
+}
+
+type MessageWithMap struct {
+ NameMapping map[int32]string `protobuf:"bytes,1,rep,name=name_mapping" json:"name_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ MsgMapping map[int64]*FloatingPoint `protobuf:"bytes,2,rep,name=msg_mapping" json:"msg_mapping,omitempty" protobuf_key:"zigzag64,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ ByteMapping map[bool][]byte `protobuf:"bytes,3,rep,name=byte_mapping" json:"byte_mapping,omitempty" protobuf_key:"varint,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MessageWithMap) Reset() { *m = MessageWithMap{} }
+func (m *MessageWithMap) String() string { return proto.CompactTextString(m) }
+func (*MessageWithMap) ProtoMessage() {}
+
+func (m *MessageWithMap) GetNameMapping() map[int32]string {
+ if m != nil {
+ return m.NameMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetMsgMapping() map[int64]*FloatingPoint {
+ if m != nil {
+ return m.MsgMapping
+ }
+ return nil
+}
+
+func (m *MessageWithMap) GetByteMapping() map[bool][]byte {
+ if m != nil {
+ return m.ByteMapping
+ }
+ return nil
+}
+
+var E_Greeting = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessage)(nil),
+ ExtensionType: ([]string)(nil),
+ Field: 106,
+ Name: "testdata.greeting",
+ Tag: "bytes,106,rep,name=greeting",
+}
+
+var E_X201 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 201,
+ Name: "testdata.x201",
+ Tag: "bytes,201,opt,name=x201",
+}
+
+var E_X202 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 202,
+ Name: "testdata.x202",
+ Tag: "bytes,202,opt,name=x202",
+}
+
+var E_X203 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 203,
+ Name: "testdata.x203",
+ Tag: "bytes,203,opt,name=x203",
+}
+
+var E_X204 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 204,
+ Name: "testdata.x204",
+ Tag: "bytes,204,opt,name=x204",
+}
+
+var E_X205 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 205,
+ Name: "testdata.x205",
+ Tag: "bytes,205,opt,name=x205",
+}
+
+var E_X206 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 206,
+ Name: "testdata.x206",
+ Tag: "bytes,206,opt,name=x206",
+}
+
+var E_X207 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 207,
+ Name: "testdata.x207",
+ Tag: "bytes,207,opt,name=x207",
+}
+
+var E_X208 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 208,
+ Name: "testdata.x208",
+ Tag: "bytes,208,opt,name=x208",
+}
+
+var E_X209 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 209,
+ Name: "testdata.x209",
+ Tag: "bytes,209,opt,name=x209",
+}
+
+var E_X210 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 210,
+ Name: "testdata.x210",
+ Tag: "bytes,210,opt,name=x210",
+}
+
+var E_X211 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 211,
+ Name: "testdata.x211",
+ Tag: "bytes,211,opt,name=x211",
+}
+
+var E_X212 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 212,
+ Name: "testdata.x212",
+ Tag: "bytes,212,opt,name=x212",
+}
+
+var E_X213 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 213,
+ Name: "testdata.x213",
+ Tag: "bytes,213,opt,name=x213",
+}
+
+var E_X214 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 214,
+ Name: "testdata.x214",
+ Tag: "bytes,214,opt,name=x214",
+}
+
+var E_X215 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 215,
+ Name: "testdata.x215",
+ Tag: "bytes,215,opt,name=x215",
+}
+
+var E_X216 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 216,
+ Name: "testdata.x216",
+ Tag: "bytes,216,opt,name=x216",
+}
+
+var E_X217 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 217,
+ Name: "testdata.x217",
+ Tag: "bytes,217,opt,name=x217",
+}
+
+var E_X218 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 218,
+ Name: "testdata.x218",
+ Tag: "bytes,218,opt,name=x218",
+}
+
+var E_X219 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 219,
+ Name: "testdata.x219",
+ Tag: "bytes,219,opt,name=x219",
+}
+
+var E_X220 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 220,
+ Name: "testdata.x220",
+ Tag: "bytes,220,opt,name=x220",
+}
+
+var E_X221 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 221,
+ Name: "testdata.x221",
+ Tag: "bytes,221,opt,name=x221",
+}
+
+var E_X222 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 222,
+ Name: "testdata.x222",
+ Tag: "bytes,222,opt,name=x222",
+}
+
+var E_X223 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 223,
+ Name: "testdata.x223",
+ Tag: "bytes,223,opt,name=x223",
+}
+
+var E_X224 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 224,
+ Name: "testdata.x224",
+ Tag: "bytes,224,opt,name=x224",
+}
+
+var E_X225 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 225,
+ Name: "testdata.x225",
+ Tag: "bytes,225,opt,name=x225",
+}
+
+var E_X226 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 226,
+ Name: "testdata.x226",
+ Tag: "bytes,226,opt,name=x226",
+}
+
+var E_X227 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 227,
+ Name: "testdata.x227",
+ Tag: "bytes,227,opt,name=x227",
+}
+
+var E_X228 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 228,
+ Name: "testdata.x228",
+ Tag: "bytes,228,opt,name=x228",
+}
+
+var E_X229 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 229,
+ Name: "testdata.x229",
+ Tag: "bytes,229,opt,name=x229",
+}
+
+var E_X230 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 230,
+ Name: "testdata.x230",
+ Tag: "bytes,230,opt,name=x230",
+}
+
+var E_X231 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 231,
+ Name: "testdata.x231",
+ Tag: "bytes,231,opt,name=x231",
+}
+
+var E_X232 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 232,
+ Name: "testdata.x232",
+ Tag: "bytes,232,opt,name=x232",
+}
+
+var E_X233 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 233,
+ Name: "testdata.x233",
+ Tag: "bytes,233,opt,name=x233",
+}
+
+var E_X234 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 234,
+ Name: "testdata.x234",
+ Tag: "bytes,234,opt,name=x234",
+}
+
+var E_X235 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 235,
+ Name: "testdata.x235",
+ Tag: "bytes,235,opt,name=x235",
+}
+
+var E_X236 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 236,
+ Name: "testdata.x236",
+ Tag: "bytes,236,opt,name=x236",
+}
+
+var E_X237 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 237,
+ Name: "testdata.x237",
+ Tag: "bytes,237,opt,name=x237",
+}
+
+var E_X238 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 238,
+ Name: "testdata.x238",
+ Tag: "bytes,238,opt,name=x238",
+}
+
+var E_X239 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 239,
+ Name: "testdata.x239",
+ Tag: "bytes,239,opt,name=x239",
+}
+
+var E_X240 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 240,
+ Name: "testdata.x240",
+ Tag: "bytes,240,opt,name=x240",
+}
+
+var E_X241 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 241,
+ Name: "testdata.x241",
+ Tag: "bytes,241,opt,name=x241",
+}
+
+var E_X242 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 242,
+ Name: "testdata.x242",
+ Tag: "bytes,242,opt,name=x242",
+}
+
+var E_X243 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 243,
+ Name: "testdata.x243",
+ Tag: "bytes,243,opt,name=x243",
+}
+
+var E_X244 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 244,
+ Name: "testdata.x244",
+ Tag: "bytes,244,opt,name=x244",
+}
+
+var E_X245 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 245,
+ Name: "testdata.x245",
+ Tag: "bytes,245,opt,name=x245",
+}
+
+var E_X246 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 246,
+ Name: "testdata.x246",
+ Tag: "bytes,246,opt,name=x246",
+}
+
+var E_X247 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 247,
+ Name: "testdata.x247",
+ Tag: "bytes,247,opt,name=x247",
+}
+
+var E_X248 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 248,
+ Name: "testdata.x248",
+ Tag: "bytes,248,opt,name=x248",
+}
+
+var E_X249 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 249,
+ Name: "testdata.x249",
+ Tag: "bytes,249,opt,name=x249",
+}
+
+var E_X250 = &proto.ExtensionDesc{
+ ExtendedType: (*MyMessageSet)(nil),
+ ExtensionType: (*Empty)(nil),
+ Field: 250,
+ Name: "testdata.x250",
+ Tag: "bytes,250,opt,name=x250",
+}
+
+func init() {
+ proto.RegisterEnum("testdata.FOO", FOO_name, FOO_value)
+ proto.RegisterEnum("testdata.GoTest_KIND", GoTest_KIND_name, GoTest_KIND_value)
+ proto.RegisterEnum("testdata.MyMessage_Color", MyMessage_Color_name, MyMessage_Color_value)
+ proto.RegisterEnum("testdata.Defaults_Color", Defaults_Color_name, Defaults_Color_value)
+ proto.RegisterEnum("testdata.RepeatedEnum_Color", RepeatedEnum_Color_name, RepeatedEnum_Color_value)
+ proto.RegisterExtension(E_Ext_More)
+ proto.RegisterExtension(E_Ext_Text)
+ proto.RegisterExtension(E_Ext_Number)
+ proto.RegisterExtension(E_Greeting)
+ proto.RegisterExtension(E_X201)
+ proto.RegisterExtension(E_X202)
+ proto.RegisterExtension(E_X203)
+ proto.RegisterExtension(E_X204)
+ proto.RegisterExtension(E_X205)
+ proto.RegisterExtension(E_X206)
+ proto.RegisterExtension(E_X207)
+ proto.RegisterExtension(E_X208)
+ proto.RegisterExtension(E_X209)
+ proto.RegisterExtension(E_X210)
+ proto.RegisterExtension(E_X211)
+ proto.RegisterExtension(E_X212)
+ proto.RegisterExtension(E_X213)
+ proto.RegisterExtension(E_X214)
+ proto.RegisterExtension(E_X215)
+ proto.RegisterExtension(E_X216)
+ proto.RegisterExtension(E_X217)
+ proto.RegisterExtension(E_X218)
+ proto.RegisterExtension(E_X219)
+ proto.RegisterExtension(E_X220)
+ proto.RegisterExtension(E_X221)
+ proto.RegisterExtension(E_X222)
+ proto.RegisterExtension(E_X223)
+ proto.RegisterExtension(E_X224)
+ proto.RegisterExtension(E_X225)
+ proto.RegisterExtension(E_X226)
+ proto.RegisterExtension(E_X227)
+ proto.RegisterExtension(E_X228)
+ proto.RegisterExtension(E_X229)
+ proto.RegisterExtension(E_X230)
+ proto.RegisterExtension(E_X231)
+ proto.RegisterExtension(E_X232)
+ proto.RegisterExtension(E_X233)
+ proto.RegisterExtension(E_X234)
+ proto.RegisterExtension(E_X235)
+ proto.RegisterExtension(E_X236)
+ proto.RegisterExtension(E_X237)
+ proto.RegisterExtension(E_X238)
+ proto.RegisterExtension(E_X239)
+ proto.RegisterExtension(E_X240)
+ proto.RegisterExtension(E_X241)
+ proto.RegisterExtension(E_X242)
+ proto.RegisterExtension(E_X243)
+ proto.RegisterExtension(E_X244)
+ proto.RegisterExtension(E_X245)
+ proto.RegisterExtension(E_X246)
+ proto.RegisterExtension(E_X247)
+ proto.RegisterExtension(E_X248)
+ proto.RegisterExtension(E_X249)
+ proto.RegisterExtension(E_X250)
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
new file mode 100644
index 000000000000..6cc755baef84
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/testdata/test.proto
@@ -0,0 +1,434 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+// A feature-rich test file for the protocol compiler and libraries.
+
+syntax = "proto2";
+
+package testdata;
+
+enum FOO { FOO1 = 1; };
+
+message GoEnum {
+ required FOO foo = 1;
+}
+
+message GoTestField {
+ required string Label = 1;
+ required string Type = 2;
+}
+
+message GoTest {
+ // An enum, for completeness.
+ enum KIND {
+ VOID = 0;
+
+ // Basic types
+ BOOL = 1;
+ BYTES = 2;
+ FINGERPRINT = 3;
+ FLOAT = 4;
+ INT = 5;
+ STRING = 6;
+ TIME = 7;
+
+ // Groupings
+ TUPLE = 8;
+ ARRAY = 9;
+ MAP = 10;
+
+ // Table types
+ TABLE = 11;
+
+ // Functions
+ FUNCTION = 12; // last tag
+ };
+
+ // Some typical parameters
+ required KIND Kind = 1;
+ optional string Table = 2;
+ optional int32 Param = 3;
+
+ // Required, repeated and optional foreign fields.
+ required GoTestField RequiredField = 4;
+ repeated GoTestField RepeatedField = 5;
+ optional GoTestField OptionalField = 6;
+
+ // Required fields of all basic types
+ required bool F_Bool_required = 10;
+ required int32 F_Int32_required = 11;
+ required int64 F_Int64_required = 12;
+ required fixed32 F_Fixed32_required = 13;
+ required fixed64 F_Fixed64_required = 14;
+ required uint32 F_Uint32_required = 15;
+ required uint64 F_Uint64_required = 16;
+ required float F_Float_required = 17;
+ required double F_Double_required = 18;
+ required string F_String_required = 19;
+ required bytes F_Bytes_required = 101;
+ required sint32 F_Sint32_required = 102;
+ required sint64 F_Sint64_required = 103;
+
+ // Repeated fields of all basic types
+ repeated bool F_Bool_repeated = 20;
+ repeated int32 F_Int32_repeated = 21;
+ repeated int64 F_Int64_repeated = 22;
+ repeated fixed32 F_Fixed32_repeated = 23;
+ repeated fixed64 F_Fixed64_repeated = 24;
+ repeated uint32 F_Uint32_repeated = 25;
+ repeated uint64 F_Uint64_repeated = 26;
+ repeated float F_Float_repeated = 27;
+ repeated double F_Double_repeated = 28;
+ repeated string F_String_repeated = 29;
+ repeated bytes F_Bytes_repeated = 201;
+ repeated sint32 F_Sint32_repeated = 202;
+ repeated sint64 F_Sint64_repeated = 203;
+
+ // Optional fields of all basic types
+ optional bool F_Bool_optional = 30;
+ optional int32 F_Int32_optional = 31;
+ optional int64 F_Int64_optional = 32;
+ optional fixed32 F_Fixed32_optional = 33;
+ optional fixed64 F_Fixed64_optional = 34;
+ optional uint32 F_Uint32_optional = 35;
+ optional uint64 F_Uint64_optional = 36;
+ optional float F_Float_optional = 37;
+ optional double F_Double_optional = 38;
+ optional string F_String_optional = 39;
+ optional bytes F_Bytes_optional = 301;
+ optional sint32 F_Sint32_optional = 302;
+ optional sint64 F_Sint64_optional = 303;
+
+ // Default-valued fields of all basic types
+ optional bool F_Bool_defaulted = 40 [default=true];
+ optional int32 F_Int32_defaulted = 41 [default=32];
+ optional int64 F_Int64_defaulted = 42 [default=64];
+ optional fixed32 F_Fixed32_defaulted = 43 [default=320];
+ optional fixed64 F_Fixed64_defaulted = 44 [default=640];
+ optional uint32 F_Uint32_defaulted = 45 [default=3200];
+ optional uint64 F_Uint64_defaulted = 46 [default=6400];
+ optional float F_Float_defaulted = 47 [default=314159.];
+ optional double F_Double_defaulted = 48 [default=271828.];
+ optional string F_String_defaulted = 49 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes_defaulted = 401 [default="Bignose"];
+ optional sint32 F_Sint32_defaulted = 402 [default = -32];
+ optional sint64 F_Sint64_defaulted = 403 [default = -64];
+
+ // Packed repeated fields (no string or bytes).
+ repeated bool F_Bool_repeated_packed = 50 [packed=true];
+ repeated int32 F_Int32_repeated_packed = 51 [packed=true];
+ repeated int64 F_Int64_repeated_packed = 52 [packed=true];
+ repeated fixed32 F_Fixed32_repeated_packed = 53 [packed=true];
+ repeated fixed64 F_Fixed64_repeated_packed = 54 [packed=true];
+ repeated uint32 F_Uint32_repeated_packed = 55 [packed=true];
+ repeated uint64 F_Uint64_repeated_packed = 56 [packed=true];
+ repeated float F_Float_repeated_packed = 57 [packed=true];
+ repeated double F_Double_repeated_packed = 58 [packed=true];
+ repeated sint32 F_Sint32_repeated_packed = 502 [packed=true];
+ repeated sint64 F_Sint64_repeated_packed = 503 [packed=true];
+
+ // Required, repeated, and optional groups.
+ required group RequiredGroup = 70 {
+ required string RequiredField = 71;
+ };
+
+ repeated group RepeatedGroup = 80 {
+ required string RequiredField = 81;
+ };
+
+ optional group OptionalGroup = 90 {
+ required string RequiredField = 91;
+ };
+}
+
+// For testing skipping of unrecognized fields.
+// Numbers are all big, larger than tag numbers in GoTestField,
+// the message used in the corresponding test.
+message GoSkipTest {
+ required int32 skip_int32 = 11;
+ required fixed32 skip_fixed32 = 12;
+ required fixed64 skip_fixed64 = 13;
+ required string skip_string = 14;
+ required group SkipGroup = 15 {
+ required int32 group_int32 = 16;
+ required string group_string = 17;
+ }
+}
+
+// For testing packed/non-packed decoder switching.
+// A serialized instance of one should be deserializable as the other.
+message NonPackedTest {
+ repeated int32 a = 1;
+}
+
+message PackedTest {
+ repeated int32 b = 1 [packed=true];
+}
+
+message MaxTag {
+ // Maximum possible tag number.
+ optional string last_field = 536870911;
+}
+
+message OldMessage {
+ message Nested {
+ optional string name = 1;
+ }
+ optional Nested nested = 1;
+
+ optional int32 num = 2;
+}
+
+// NewMessage is wire compatible with OldMessage;
+// imagine it as a future version.
+message NewMessage {
+ message Nested {
+ optional string name = 1;
+ optional string food_group = 2;
+ }
+ optional Nested nested = 1;
+
+ // This is an int32 in OldMessage.
+ optional int64 num = 2;
+}
+
+// Smaller tests for ASCII formatting.
+
+message InnerMessage {
+ required string host = 1;
+ optional int32 port = 2 [default=4000];
+ optional bool connected = 3;
+}
+
+message OtherMessage {
+ optional int64 key = 1;
+ optional bytes value = 2;
+ optional float weight = 3;
+ optional InnerMessage inner = 4;
+}
+
+message MyMessage {
+ required int32 count = 1;
+ optional string name = 2;
+ optional string quote = 3;
+ repeated string pet = 4;
+ optional InnerMessage inner = 5;
+ repeated OtherMessage others = 6;
+ repeated InnerMessage rep_inner = 12;
+
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ };
+ optional Color bikeshed = 7;
+
+ optional group SomeGroup = 8 {
+ optional int32 group_field = 9;
+ }
+
+ // This field becomes [][]byte in the generated code.
+ repeated bytes rep_bytes = 10;
+
+ optional double bigfloat = 11;
+
+ extensions 100 to max;
+}
+
+message Ext {
+ extend MyMessage {
+ optional Ext more = 103;
+ optional string text = 104;
+ optional int32 number = 105;
+ }
+
+ optional string data = 1;
+}
+
+extend MyMessage {
+ repeated string greeting = 106;
+}
+
+message MyMessageSet {
+ option message_set_wire_format = true;
+ extensions 100 to max;
+}
+
+message Empty {
+}
+
+extend MyMessageSet {
+ optional Empty x201 = 201;
+ optional Empty x202 = 202;
+ optional Empty x203 = 203;
+ optional Empty x204 = 204;
+ optional Empty x205 = 205;
+ optional Empty x206 = 206;
+ optional Empty x207 = 207;
+ optional Empty x208 = 208;
+ optional Empty x209 = 209;
+ optional Empty x210 = 210;
+ optional Empty x211 = 211;
+ optional Empty x212 = 212;
+ optional Empty x213 = 213;
+ optional Empty x214 = 214;
+ optional Empty x215 = 215;
+ optional Empty x216 = 216;
+ optional Empty x217 = 217;
+ optional Empty x218 = 218;
+ optional Empty x219 = 219;
+ optional Empty x220 = 220;
+ optional Empty x221 = 221;
+ optional Empty x222 = 222;
+ optional Empty x223 = 223;
+ optional Empty x224 = 224;
+ optional Empty x225 = 225;
+ optional Empty x226 = 226;
+ optional Empty x227 = 227;
+ optional Empty x228 = 228;
+ optional Empty x229 = 229;
+ optional Empty x230 = 230;
+ optional Empty x231 = 231;
+ optional Empty x232 = 232;
+ optional Empty x233 = 233;
+ optional Empty x234 = 234;
+ optional Empty x235 = 235;
+ optional Empty x236 = 236;
+ optional Empty x237 = 237;
+ optional Empty x238 = 238;
+ optional Empty x239 = 239;
+ optional Empty x240 = 240;
+ optional Empty x241 = 241;
+ optional Empty x242 = 242;
+ optional Empty x243 = 243;
+ optional Empty x244 = 244;
+ optional Empty x245 = 245;
+ optional Empty x246 = 246;
+ optional Empty x247 = 247;
+ optional Empty x248 = 248;
+ optional Empty x249 = 249;
+ optional Empty x250 = 250;
+}
+
+message MessageList {
+ repeated group Message = 1 {
+ required string name = 2;
+ required int32 count = 3;
+ }
+}
+
+message Strings {
+ optional string string_field = 1;
+ optional bytes bytes_field = 2;
+}
+
+message Defaults {
+ enum Color {
+ RED = 0;
+ GREEN = 1;
+ BLUE = 2;
+ }
+
+ // Default-valued fields of all basic types.
+ // Same as GoTest, but copied here to make testing easier.
+ optional bool F_Bool = 1 [default=true];
+ optional int32 F_Int32 = 2 [default=32];
+ optional int64 F_Int64 = 3 [default=64];
+ optional fixed32 F_Fixed32 = 4 [default=320];
+ optional fixed64 F_Fixed64 = 5 [default=640];
+ optional uint32 F_Uint32 = 6 [default=3200];
+ optional uint64 F_Uint64 = 7 [default=6400];
+ optional float F_Float = 8 [default=314159.];
+ optional double F_Double = 9 [default=271828.];
+ optional string F_String = 10 [default="hello, \"world!\"\n"];
+ optional bytes F_Bytes = 11 [default="Bignose"];
+ optional sint32 F_Sint32 = 12 [default=-32];
+ optional sint64 F_Sint64 = 13 [default=-64];
+ optional Color F_Enum = 14 [default=GREEN];
+
+ // More fields with crazy defaults.
+ optional float F_Pinf = 15 [default=inf];
+ optional float F_Ninf = 16 [default=-inf];
+ optional float F_Nan = 17 [default=nan];
+
+ // Sub-message.
+ optional SubDefaults sub = 18;
+
+ // Redundant but explicit defaults.
+ optional string str_zero = 19 [default=""];
+}
+
+message SubDefaults {
+ optional int64 n = 1 [default=7];
+}
+
+message RepeatedEnum {
+ enum Color {
+ RED = 1;
+ }
+ repeated Color color = 1;
+}
+
+message MoreRepeated {
+ repeated bool bools = 1;
+ repeated bool bools_packed = 2 [packed=true];
+ repeated int32 ints = 3;
+ repeated int32 ints_packed = 4 [packed=true];
+ repeated int64 int64s_packed = 7 [packed=true];
+ repeated string strings = 5;
+ repeated fixed32 fixeds = 6;
+}
+
+// GroupOld and GroupNew have the same wire format.
+// GroupNew has a new field inside a group.
+
+message GroupOld {
+ optional group G = 101 {
+ optional int32 x = 2;
+ }
+}
+
+message GroupNew {
+ optional group G = 101 {
+ optional int32 x = 2;
+ optional int32 y = 3;
+ }
+}
+
+message FloatingPoint {
+ required double f = 1;
+}
+
+message MessageWithMap {
+ map name_mapping = 1;
+ map msg_mapping = 2;
+ map byte_mapping = 3;
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
new file mode 100644
index 000000000000..720eac470505
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text.go
@@ -0,0 +1,789 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for writing the text protocol buffer format.
+
+import (
+ "bufio"
+ "bytes"
+ "encoding"
+ "fmt"
+ "io"
+ "log"
+ "math"
+ "os"
+ "reflect"
+ "sort"
+ "strings"
+)
+
+var (
+ newline = []byte("\n")
+ spaces = []byte(" ")
+ gtNewline = []byte(">\n")
+ endBraceNewline = []byte("}\n")
+ backslashN = []byte{'\\', 'n'}
+ backslashR = []byte{'\\', 'r'}
+ backslashT = []byte{'\\', 't'}
+ backslashDQ = []byte{'\\', '"'}
+ backslashBS = []byte{'\\', '\\'}
+ posInf = []byte("inf")
+ negInf = []byte("-inf")
+ nan = []byte("nan")
+)
+
+type writer interface {
+ io.Writer
+ WriteByte(byte) error
+}
+
+// textWriter is an io.Writer that tracks its indentation level.
+type textWriter struct {
+ ind int
+ complete bool // if the current position is a complete line
+ compact bool // whether to write out as a one-liner
+ w writer
+}
+
+func (w *textWriter) WriteString(s string) (n int, err error) {
+ if !strings.Contains(s, "\n") {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ w.complete = false
+ return io.WriteString(w.w, s)
+ }
+ // WriteString is typically called without newlines, so this
+ // codepath and its copy are rare. We copy to avoid
+ // duplicating all of Write's logic here.
+ return w.Write([]byte(s))
+}
+
+func (w *textWriter) Write(p []byte) (n int, err error) {
+ newlines := bytes.Count(p, newline)
+ if newlines == 0 {
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ n, err = w.w.Write(p)
+ w.complete = false
+ return n, err
+ }
+
+ frags := bytes.SplitN(p, newline, newlines+1)
+ if w.compact {
+ for i, frag := range frags {
+ if i > 0 {
+ if err := w.w.WriteByte(' '); err != nil {
+ return n, err
+ }
+ n++
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ }
+ return n, nil
+ }
+
+ for i, frag := range frags {
+ if w.complete {
+ w.writeIndent()
+ }
+ nn, err := w.w.Write(frag)
+ n += nn
+ if err != nil {
+ return n, err
+ }
+ if i+1 < len(frags) {
+ if err := w.w.WriteByte('\n'); err != nil {
+ return n, err
+ }
+ n++
+ }
+ }
+ w.complete = len(frags[len(frags)-1]) == 0
+ return n, nil
+}
+
+func (w *textWriter) WriteByte(c byte) error {
+ if w.compact && c == '\n' {
+ c = ' '
+ }
+ if !w.compact && w.complete {
+ w.writeIndent()
+ }
+ err := w.w.WriteByte(c)
+ w.complete = c == '\n'
+ return err
+}
+
+func (w *textWriter) indent() { w.ind++ }
+
+func (w *textWriter) unindent() {
+ if w.ind == 0 {
+ log.Printf("proto: textWriter unindented too far")
+ return
+ }
+ w.ind--
+}
+
+func writeName(w *textWriter, props *Properties) error {
+ if _, err := w.WriteString(props.OrigName); err != nil {
+ return err
+ }
+ if props.Wire != "group" {
+ return w.WriteByte(':')
+ }
+ return nil
+}
+
+var (
+ messageSetType = reflect.TypeOf((*MessageSet)(nil)).Elem()
+)
+
+// raw is the interface satisfied by RawMessage.
+type raw interface {
+ Bytes() []byte
+}
+
+func writeStruct(w *textWriter, sv reflect.Value) error {
+ if sv.Type() == messageSetType {
+ return writeMessageSet(w, sv.Addr().Interface().(*MessageSet))
+ }
+
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < sv.NumField(); i++ {
+ fv := sv.Field(i)
+ props := sprops.Prop[i]
+ name := st.Field(i).Name
+
+ if strings.HasPrefix(name, "XXX_") {
+ // There are two XXX_ fields:
+ // XXX_unrecognized []byte
+ // XXX_extensions map[int32]proto.Extension
+ // The first is handled here;
+ // the second is handled at the bottom of this function.
+ if name == "XXX_unrecognized" && !fv.IsNil() {
+ if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Ptr && fv.IsNil() {
+ // Field not filled in. This could be an optional field or
+ // a required field that wasn't filled in. Either way, there
+ // isn't anything we can show for it.
+ continue
+ }
+ if fv.Kind() == reflect.Slice && fv.IsNil() {
+ // Repeated field that is empty, or a bytes field that is unused.
+ continue
+ }
+
+ if props.Repeated && fv.Kind() == reflect.Slice {
+ // Repeated field.
+ for j := 0; j < fv.Len(); j++ {
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ v := fv.Index(j)
+ if v.Kind() == reflect.Ptr && v.IsNil() {
+ // A nil message in a repeated field is not valid,
+ // but we can handle that more gracefully than panicking.
+ if _, err := w.Write([]byte("\n")); err != nil {
+ return err
+ }
+ continue
+ }
+ if err := writeAny(w, v, props); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if fv.Kind() == reflect.Map {
+ // Map fields are rendered as a repeated struct with key/value fields.
+ keys := fv.MapKeys() // TODO: should we sort these for deterministic output?
+ sort.Sort(mapKeys(keys))
+ for _, key := range keys {
+ val := fv.MapIndex(key)
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ // open struct
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ // key
+ if _, err := w.WriteString("key:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // value
+ if _, err := w.WriteString("value:"); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ // close struct
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 {
+ // empty bytes field
+ continue
+ }
+ if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice {
+ // proto3 non-repeated scalar field; skip if zero value
+ switch fv.Kind() {
+ case reflect.Bool:
+ if !fv.Bool() {
+ continue
+ }
+ case reflect.Int32, reflect.Int64:
+ if fv.Int() == 0 {
+ continue
+ }
+ case reflect.Uint32, reflect.Uint64:
+ if fv.Uint() == 0 {
+ continue
+ }
+ case reflect.Float32, reflect.Float64:
+ if fv.Float() == 0 {
+ continue
+ }
+ case reflect.String:
+ if fv.String() == "" {
+ continue
+ }
+ }
+ }
+
+ if err := writeName(w, props); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if b, ok := fv.Interface().(raw); ok {
+ if err := writeRaw(w, b.Bytes()); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Enums have a String method, so writeAny will work fine.
+ if err := writeAny(w, fv, props); err != nil {
+ return err
+ }
+
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+
+ // Extensions (the XXX_extensions field).
+ pv := sv.Addr()
+ if pv.Type().Implements(extendableProtoType) {
+ if err := writeExtensions(w, pv); err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// writeRaw writes an uninterpreted raw message.
+func writeRaw(w *textWriter, b []byte) error {
+ if err := w.WriteByte('<'); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, b); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte('>'); err != nil {
+ return err
+ }
+ return nil
+}
+
+// writeAny writes an arbitrary field.
+func writeAny(w *textWriter, v reflect.Value, props *Properties) error {
+ v = reflect.Indirect(v)
+
+ // Floats have special cases.
+ if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 {
+ x := v.Float()
+ var b []byte
+ switch {
+ case math.IsInf(x, 1):
+ b = posInf
+ case math.IsInf(x, -1):
+ b = negInf
+ case math.IsNaN(x):
+ b = nan
+ }
+ if b != nil {
+ _, err := w.Write(b)
+ return err
+ }
+ // Other values are handled below.
+ }
+
+ // We don't attempt to serialise every possible value type; only those
+ // that can occur in protocol buffers.
+ switch v.Kind() {
+ case reflect.Slice:
+ // Should only be a []byte; repeated fields are handled in writeStruct.
+ if err := writeString(w, string(v.Interface().([]byte))); err != nil {
+ return err
+ }
+ case reflect.String:
+ if err := writeString(w, v.String()); err != nil {
+ return err
+ }
+ case reflect.Struct:
+ // Required/optional group/message.
+ var bra, ket byte = '<', '>'
+ if props != nil && props.Wire == "group" {
+ bra, ket = '{', '}'
+ }
+ if err := w.WriteByte(bra); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ w.indent()
+ if tm, ok := v.Interface().(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = w.Write(text); err != nil {
+ return err
+ }
+ } else if err := writeStruct(w, v); err != nil {
+ return err
+ }
+ w.unindent()
+ if err := w.WriteByte(ket); err != nil {
+ return err
+ }
+ default:
+ _, err := fmt.Fprint(w, v.Interface())
+ return err
+ }
+ return nil
+}
+
+// equivalent to C's isprint.
+func isprint(c byte) bool {
+ return c >= 0x20 && c < 0x7f
+}
+
+// writeString writes a string in the protocol buffer text format.
+// It is similar to strconv.Quote except we don't use Go escape sequences,
+// we treat the string as a byte sequence, and we use octal escapes.
+// These differences are to maintain interoperability with the other
+// languages' implementations of the text format.
+func writeString(w *textWriter, s string) error {
+ // use WriteByte here to get any needed indent
+ if err := w.WriteByte('"'); err != nil {
+ return err
+ }
+ // Loop over the bytes, not the runes.
+ for i := 0; i < len(s); i++ {
+ var err error
+ // Divergence from C++: we don't escape apostrophes.
+ // There's no need to escape them, and the C++ parser
+ // copes with a naked apostrophe.
+ switch c := s[i]; c {
+ case '\n':
+ _, err = w.w.Write(backslashN)
+ case '\r':
+ _, err = w.w.Write(backslashR)
+ case '\t':
+ _, err = w.w.Write(backslashT)
+ case '"':
+ _, err = w.w.Write(backslashDQ)
+ case '\\':
+ _, err = w.w.Write(backslashBS)
+ default:
+ if isprint(c) {
+ err = w.w.WriteByte(c)
+ } else {
+ _, err = fmt.Fprintf(w.w, "\\%03o", c)
+ }
+ }
+ if err != nil {
+ return err
+ }
+ }
+ return w.WriteByte('"')
+}
+
+func writeMessageSet(w *textWriter, ms *MessageSet) error {
+ for _, item := range ms.Item {
+ id := *item.TypeId
+ if msd, ok := messageSetMap[id]; ok {
+ // Known message set type.
+ if _, err := fmt.Fprintf(w, "[%s]: <\n", msd.name); err != nil {
+ return err
+ }
+ w.indent()
+
+ pb := reflect.New(msd.t.Elem())
+ if err := Unmarshal(item.Message, pb.Interface().(Message)); err != nil {
+ if _, err := fmt.Fprintf(w, "/* bad message: %v */\n", err); err != nil {
+ return err
+ }
+ } else {
+ if err := writeStruct(w, pb.Elem()); err != nil {
+ return err
+ }
+ }
+ } else {
+ // Unknown type.
+ if _, err := fmt.Fprintf(w, "[%d]: <\n", id); err != nil {
+ return err
+ }
+ w.indent()
+ if err := writeUnknownStruct(w, item.Message); err != nil {
+ return err
+ }
+ }
+ w.unindent()
+ if _, err := w.Write(gtNewline); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownStruct(w *textWriter, data []byte) (err error) {
+ if !w.compact {
+ if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil {
+ return err
+ }
+ }
+ b := NewBuffer(data)
+ for b.index < len(b.buf) {
+ x, err := b.DecodeVarint()
+ if err != nil {
+ _, err := fmt.Fprintf(w, "/* %v */\n", err)
+ return err
+ }
+ wire, tag := x&7, x>>3
+ if wire == WireEndGroup {
+ w.unindent()
+ if _, err := w.Write(endBraceNewline); err != nil {
+ return err
+ }
+ continue
+ }
+ if _, err := fmt.Fprint(w, tag); err != nil {
+ return err
+ }
+ if wire != WireStartGroup {
+ if err := w.WriteByte(':'); err != nil {
+ return err
+ }
+ }
+ if !w.compact || wire == WireStartGroup {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ switch wire {
+ case WireBytes:
+ buf, e := b.DecodeRawBytes(false)
+ if e == nil {
+ _, err = fmt.Fprintf(w, "%q", buf)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", e)
+ }
+ case WireFixed32:
+ x, err = b.DecodeFixed32()
+ err = writeUnknownInt(w, x, err)
+ case WireFixed64:
+ x, err = b.DecodeFixed64()
+ err = writeUnknownInt(w, x, err)
+ case WireStartGroup:
+ err = w.WriteByte('{')
+ w.indent()
+ case WireVarint:
+ x, err = b.DecodeVarint()
+ err = writeUnknownInt(w, x, err)
+ default:
+ _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire)
+ }
+ if err != nil {
+ return err
+ }
+ if err = w.WriteByte('\n'); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func writeUnknownInt(w *textWriter, x uint64, err error) error {
+ if err == nil {
+ _, err = fmt.Fprint(w, x)
+ } else {
+ _, err = fmt.Fprintf(w, "/* %v */", err)
+ }
+ return err
+}
+
+type int32Slice []int32
+
+func (s int32Slice) Len() int { return len(s) }
+func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] }
+func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// writeExtensions writes all the extensions in pv.
+// pv is assumed to be a pointer to a protocol message struct that is extendable.
+func writeExtensions(w *textWriter, pv reflect.Value) error {
+ emap := extensionMaps[pv.Type().Elem()]
+ ep := pv.Interface().(extendableProto)
+
+ // Order the extensions by ID.
+ // This isn't strictly necessary, but it will give us
+ // canonical output, which will also make testing easier.
+ m := ep.ExtensionMap()
+ ids := make([]int32, 0, len(m))
+ for id := range m {
+ ids = append(ids, id)
+ }
+ sort.Sort(int32Slice(ids))
+
+ for _, extNum := range ids {
+ ext := m[extNum]
+ var desc *ExtensionDesc
+ if emap != nil {
+ desc = emap[extNum]
+ }
+ if desc == nil {
+ // Unknown extension.
+ if err := writeUnknownStruct(w, ext.enc); err != nil {
+ return err
+ }
+ continue
+ }
+
+ pb, err := GetExtension(ep, desc)
+ if err != nil {
+ if _, err := fmt.Fprintln(os.Stderr, "proto: failed getting extension: ", err); err != nil {
+ return err
+ }
+ continue
+ }
+
+ // Repeated extensions will appear as a slice.
+ if !desc.repeated() {
+ if err := writeExtension(w, desc.Name, pb); err != nil {
+ return err
+ }
+ } else {
+ v := reflect.ValueOf(pb)
+ for i := 0; i < v.Len(); i++ {
+ if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil {
+ return err
+ }
+ }
+ }
+ }
+ return nil
+}
+
+func writeExtension(w *textWriter, name string, pb interface{}) error {
+ if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil {
+ return err
+ }
+ if !w.compact {
+ if err := w.WriteByte(' '); err != nil {
+ return err
+ }
+ }
+ if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil {
+ return err
+ }
+ if err := w.WriteByte('\n'); err != nil {
+ return err
+ }
+ return nil
+}
+
+func (w *textWriter) writeIndent() {
+ if !w.complete {
+ return
+ }
+ remain := w.ind * 2
+ for remain > 0 {
+ n := remain
+ if n > len(spaces) {
+ n = len(spaces)
+ }
+ w.w.Write(spaces[:n])
+ remain -= n
+ }
+ w.complete = false
+}
+
+func marshalText(w io.Writer, pb Message, compact bool) error {
+ val := reflect.ValueOf(pb)
+ if pb == nil || val.IsNil() {
+ w.Write([]byte(""))
+ return nil
+ }
+ var bw *bufio.Writer
+ ww, ok := w.(writer)
+ if !ok {
+ bw = bufio.NewWriter(w)
+ ww = bw
+ }
+ aw := &textWriter{
+ w: ww,
+ complete: true,
+ compact: compact,
+ }
+
+ if tm, ok := pb.(encoding.TextMarshaler); ok {
+ text, err := tm.MarshalText()
+ if err != nil {
+ return err
+ }
+ if _, err = aw.Write(text); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+ }
+ // Dereference the received pointer so we don't have outer < and >.
+ v := reflect.Indirect(val)
+ if err := writeStruct(aw, v); err != nil {
+ return err
+ }
+ if bw != nil {
+ return bw.Flush()
+ }
+ return nil
+}
+
+// MarshalText writes a given protocol buffer in text format.
+// The only errors returned are from w.
+func MarshalText(w io.Writer, pb Message) error {
+ return marshalText(w, pb, false)
+}
+
+// MarshalTextString is the same as MarshalText, but returns the string directly.
+func MarshalTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, false)
+ return buf.String()
+}
+
+// CompactText writes a given protocol buffer in compact text format (one line).
+func CompactText(w io.Writer, pb Message) error { return marshalText(w, pb, true) }
+
+// CompactTextString is the same as CompactText, but returns the string directly.
+func CompactTextString(pb Message) string {
+ var buf bytes.Buffer
+ marshalText(&buf, pb, true)
+ return buf.String()
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
new file mode 100644
index 000000000000..ddd9579cdf47
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser.go
@@ -0,0 +1,757 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto
+
+// Functions for parsing the Text protocol buffer format.
+// TODO: message sets.
+
+import (
+ "encoding"
+ "errors"
+ "fmt"
+ "reflect"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+type ParseError struct {
+ Message string
+ Line int // 1-based line number
+ Offset int // 0-based byte offset from start of input
+}
+
+func (p *ParseError) Error() string {
+ if p.Line == 1 {
+ // show offset only for first line
+ return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message)
+ }
+ return fmt.Sprintf("line %d: %v", p.Line, p.Message)
+}
+
+type token struct {
+ value string
+ err *ParseError
+ line int // line number
+ offset int // byte number from start of input, not start of line
+ unquoted string // the unquoted version of value, if it was a quoted string
+}
+
+func (t *token) String() string {
+ if t.err == nil {
+ return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset)
+ }
+ return fmt.Sprintf("parse error: %v", t.err)
+}
+
+type textParser struct {
+ s string // remaining input
+ done bool // whether the parsing is finished (success or error)
+ backed bool // whether back() was called
+ offset, line int
+ cur token
+}
+
+func newTextParser(s string) *textParser {
+ p := new(textParser)
+ p.s = s
+ p.line = 1
+ p.cur.line = 1
+ return p
+}
+
+func (p *textParser) errorf(format string, a ...interface{}) *ParseError {
+ pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset}
+ p.cur.err = pe
+ p.done = true
+ return pe
+}
+
+// Numbers and identifiers are matched by [-+._A-Za-z0-9]
+func isIdentOrNumberChar(c byte) bool {
+ switch {
+ case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z':
+ return true
+ case '0' <= c && c <= '9':
+ return true
+ }
+ switch c {
+ case '-', '+', '.', '_':
+ return true
+ }
+ return false
+}
+
+func isWhitespace(c byte) bool {
+ switch c {
+ case ' ', '\t', '\n', '\r':
+ return true
+ }
+ return false
+}
+
+func (p *textParser) skipWhitespace() {
+ i := 0
+ for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') {
+ if p.s[i] == '#' {
+ // comment; skip to end of line or input
+ for i < len(p.s) && p.s[i] != '\n' {
+ i++
+ }
+ if i == len(p.s) {
+ break
+ }
+ }
+ if p.s[i] == '\n' {
+ p.line++
+ }
+ i++
+ }
+ p.offset += i
+ p.s = p.s[i:len(p.s)]
+ if len(p.s) == 0 {
+ p.done = true
+ }
+}
+
+func (p *textParser) advance() {
+ // Skip whitespace
+ p.skipWhitespace()
+ if p.done {
+ return
+ }
+
+ // Start of non-whitespace
+ p.cur.err = nil
+ p.cur.offset, p.cur.line = p.offset, p.line
+ p.cur.unquoted = ""
+ switch p.s[0] {
+ case '<', '>', '{', '}', ':', '[', ']', ';', ',':
+ // Single symbol
+ p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)]
+ case '"', '\'':
+ // Quoted string
+ i := 1
+ for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' {
+ if p.s[i] == '\\' && i+1 < len(p.s) {
+ // skip escaped char
+ i++
+ }
+ i++
+ }
+ if i >= len(p.s) || p.s[i] != p.s[0] {
+ p.errorf("unmatched quote")
+ return
+ }
+ unq, err := unquoteC(p.s[1:i], rune(p.s[0]))
+ if err != nil {
+ p.errorf("invalid quoted string %v", p.s[0:i+1])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)]
+ p.cur.unquoted = unq
+ default:
+ i := 0
+ for i < len(p.s) && isIdentOrNumberChar(p.s[i]) {
+ i++
+ }
+ if i == 0 {
+ p.errorf("unexpected byte %#x", p.s[0])
+ return
+ }
+ p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)]
+ }
+ p.offset += len(p.cur.value)
+}
+
+var (
+ errBadUTF8 = errors.New("proto: bad UTF-8")
+ errBadHex = errors.New("proto: bad hexadecimal")
+)
+
+func unquoteC(s string, quote rune) (string, error) {
+ // This is based on C++'s tokenizer.cc.
+ // Despite its name, this is *not* parsing C syntax.
+ // For instance, "\0" is an invalid quoted string.
+
+ // Avoid allocation in trivial cases.
+ simple := true
+ for _, r := range s {
+ if r == '\\' || r == quote {
+ simple = false
+ break
+ }
+ }
+ if simple {
+ return s, nil
+ }
+
+ buf := make([]byte, 0, 3*len(s)/2)
+ for len(s) > 0 {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", errBadUTF8
+ }
+ s = s[n:]
+ if r != '\\' {
+ if r < utf8.RuneSelf {
+ buf = append(buf, byte(r))
+ } else {
+ buf = append(buf, string(r)...)
+ }
+ continue
+ }
+
+ ch, tail, err := unescape(s)
+ if err != nil {
+ return "", err
+ }
+ buf = append(buf, ch...)
+ s = tail
+ }
+ return string(buf), nil
+}
+
+func unescape(s string) (ch string, tail string, err error) {
+ r, n := utf8.DecodeRuneInString(s)
+ if r == utf8.RuneError && n == 1 {
+ return "", "", errBadUTF8
+ }
+ s = s[n:]
+ switch r {
+ case 'a':
+ return "\a", s, nil
+ case 'b':
+ return "\b", s, nil
+ case 'f':
+ return "\f", s, nil
+ case 'n':
+ return "\n", s, nil
+ case 'r':
+ return "\r", s, nil
+ case 't':
+ return "\t", s, nil
+ case 'v':
+ return "\v", s, nil
+ case '?':
+ return "?", s, nil // trigraph workaround
+ case '\'', '"', '\\':
+ return string(r), s, nil
+ case '0', '1', '2', '3', '4', '5', '6', '7', 'x', 'X':
+ if len(s) < 2 {
+ return "", "", fmt.Errorf(`\%c requires 2 following digits`, r)
+ }
+ base := 8
+ ss := s[:2]
+ s = s[2:]
+ if r == 'x' || r == 'X' {
+ base = 16
+ } else {
+ ss = string(r) + ss
+ }
+ i, err := strconv.ParseUint(ss, base, 8)
+ if err != nil {
+ return "", "", err
+ }
+ return string([]byte{byte(i)}), s, nil
+ case 'u', 'U':
+ n := 4
+ if r == 'U' {
+ n = 8
+ }
+ if len(s) < n {
+ return "", "", fmt.Errorf(`\%c requires %d digits`, r, n)
+ }
+
+ bs := make([]byte, n/2)
+ for i := 0; i < n; i += 2 {
+ a, ok1 := unhex(s[i])
+ b, ok2 := unhex(s[i+1])
+ if !ok1 || !ok2 {
+ return "", "", errBadHex
+ }
+ bs[i/2] = a<<4 | b
+ }
+ s = s[n:]
+ return string(bs), s, nil
+ }
+ return "", "", fmt.Errorf(`unknown escape \%c`, r)
+}
+
+// Adapted from src/pkg/strconv/quote.go.
+func unhex(b byte) (v byte, ok bool) {
+ switch {
+ case '0' <= b && b <= '9':
+ return b - '0', true
+ case 'a' <= b && b <= 'f':
+ return b - 'a' + 10, true
+ case 'A' <= b && b <= 'F':
+ return b - 'A' + 10, true
+ }
+ return 0, false
+}
+
+// Back off the parser by one token. Can only be done between calls to next().
+// It makes the next advance() a no-op.
+func (p *textParser) back() { p.backed = true }
+
+// Advances the parser and returns the new current token.
+func (p *textParser) next() *token {
+ if p.backed || p.done {
+ p.backed = false
+ return &p.cur
+ }
+ p.advance()
+ if p.done {
+ p.cur.value = ""
+ } else if len(p.cur.value) > 0 && p.cur.value[0] == '"' {
+ // Look for multiple quoted strings separated by whitespace,
+ // and concatenate them.
+ cat := p.cur
+ for {
+ p.skipWhitespace()
+ if p.done || p.s[0] != '"' {
+ break
+ }
+ p.advance()
+ if p.cur.err != nil {
+ return &p.cur
+ }
+ cat.value += " " + p.cur.value
+ cat.unquoted += p.cur.unquoted
+ }
+ p.done = false // parser may have seen EOF, but we want to return cat
+ p.cur = cat
+ }
+ return &p.cur
+}
+
+func (p *textParser) consumeToken(s string) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != s {
+ p.back()
+ return p.errorf("expected %q, found %q", s, tok.value)
+ }
+ return nil
+}
+
+// Return a RequiredNotSetError indicating which required field was not set.
+func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError {
+ st := sv.Type()
+ sprops := GetProperties(st)
+ for i := 0; i < st.NumField(); i++ {
+ if !isNil(sv.Field(i)) {
+ continue
+ }
+
+ props := sprops.Prop[i]
+ if props.Required {
+ return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)}
+ }
+ }
+ return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen
+}
+
+// Returns the index in the struct for the named field, as well as the parsed tag properties.
+func structFieldByName(st reflect.Type, name string) (int, *Properties, bool) {
+ sprops := GetProperties(st)
+ i, ok := sprops.decoderOrigNames[name]
+ if ok {
+ return i, sprops.Prop[i], true
+ }
+ return -1, nil, false
+}
+
+// Consume a ':' from the input stream (if the next token is a colon),
+// returning an error if a colon is needed but not present.
+func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ":" {
+ // Colon is optional when the field is a group or message.
+ needColon := true
+ switch props.Wire {
+ case "group":
+ needColon = false
+ case "bytes":
+ // A "bytes" field is either a message, a string, or a repeated field;
+ // those three become *T, *string and []T respectively, so we can check for
+ // this field being a pointer to a non-string.
+ if typ.Kind() == reflect.Ptr {
+ // *T or *string
+ if typ.Elem().Kind() == reflect.String {
+ break
+ }
+ } else if typ.Kind() == reflect.Slice {
+ // []T or []*T
+ if typ.Elem().Kind() != reflect.Ptr {
+ break
+ }
+ } else if typ.Kind() == reflect.String {
+ // The proto3 exception is for a string field,
+ // which requires a colon.
+ break
+ }
+ needColon = false
+ }
+ if needColon {
+ return p.errorf("expected ':', found %q", tok.value)
+ }
+ p.back()
+ }
+ return nil
+}
+
+func (p *textParser) readStruct(sv reflect.Value, terminator string) error {
+ st := sv.Type()
+ reqCount := GetProperties(st).reqCount
+ var reqFieldErr error
+ fieldSet := make(map[string]bool)
+ // A struct is a sequence of "name: value", terminated by one of
+ // '>' or '}', or the end of the input. A name may also be
+ // "[extension]".
+ for {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == terminator {
+ break
+ }
+ if tok.value == "[" {
+ // Looks like an extension.
+ //
+ // TODO: Check whether we need to handle
+ // namespace rooted names (e.g. ".something.Foo").
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ var desc *ExtensionDesc
+ // This could be faster, but it's functional.
+ // TODO: Do something smarter than a linear scan.
+ for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) {
+ if d.Name == tok.value {
+ desc = d
+ break
+ }
+ }
+ if desc == nil {
+ return p.errorf("unrecognized extension %q", tok.value)
+ }
+ // Check the extension terminator.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != "]" {
+ return p.errorf("unrecognized extension terminator %q", tok.value)
+ }
+
+ props := &Properties{}
+ props.Parse(desc.Tag)
+
+ typ := reflect.TypeOf(desc.ExtensionType)
+ if err := p.checkForColon(props, typ); err != nil {
+ return err
+ }
+
+ rep := desc.repeated()
+
+ // Read the extension structure, and set it in
+ // the value we're constructing.
+ var ext reflect.Value
+ if !rep {
+ ext = reflect.New(typ).Elem()
+ } else {
+ ext = reflect.New(typ.Elem()).Elem()
+ }
+ if err := p.readAny(ext, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ }
+ ep := sv.Addr().Interface().(extendableProto)
+ if !rep {
+ SetExtension(ep, desc, ext.Interface())
+ } else {
+ old, err := GetExtension(ep, desc)
+ var sl reflect.Value
+ if err == nil {
+ sl = reflect.ValueOf(old) // existing slice
+ } else {
+ sl = reflect.MakeSlice(typ, 0, 1)
+ }
+ sl = reflect.Append(sl, ext)
+ SetExtension(ep, desc, sl.Interface())
+ }
+ } else {
+ // This is a normal, non-extension field.
+ name := tok.value
+ fi, props, ok := structFieldByName(st, name)
+ if !ok {
+ return p.errorf("unknown field name %q in %v", name, st)
+ }
+
+ dst := sv.Field(fi)
+
+ if dst.Kind() == reflect.Map {
+ // Consume any colon.
+ if err := p.checkForColon(props, dst.Type()); err != nil {
+ return err
+ }
+
+ // Construct the map if it doesn't already exist.
+ if dst.IsNil() {
+ dst.Set(reflect.MakeMap(dst.Type()))
+ }
+ key := reflect.New(dst.Type().Key()).Elem()
+ val := reflect.New(dst.Type().Elem()).Elem()
+
+ // The map entry should be this sequence of tokens:
+ // < key : KEY value : VALUE >
+ // Technically the "key" and "value" could come in any order,
+ // but in practice they won't.
+
+ tok := p.next()
+ var terminator string
+ switch tok.value {
+ case "<":
+ terminator = ">"
+ case "{":
+ terminator = "}"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ if err := p.consumeToken("key"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(key, props.mkeyprop); err != nil {
+ return err
+ }
+ if err := p.consumeToken("value"); err != nil {
+ return err
+ }
+ if err := p.consumeToken(":"); err != nil {
+ return err
+ }
+ if err := p.readAny(val, props.mvalprop); err != nil {
+ return err
+ }
+ if err := p.consumeToken(terminator); err != nil {
+ return err
+ }
+
+ dst.SetMapIndex(key, val)
+ continue
+ }
+
+ // Check that it's not already set if it's not a repeated field.
+ if !props.Repeated && fieldSet[name] {
+ return p.errorf("non-repeated field %q was repeated", name)
+ }
+
+ if err := p.checkForColon(props, st.Field(fi).Type); err != nil {
+ return err
+ }
+
+ // Parse into the field.
+ fieldSet[name] = true
+ if err := p.readAny(dst, props); err != nil {
+ if _, ok := err.(*RequiredNotSetError); !ok {
+ return err
+ }
+ reqFieldErr = err
+ } else if props.Required {
+ reqCount--
+ }
+ }
+
+ // For backward compatibility, permit a semicolon or comma after a field.
+ tok = p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value != ";" && tok.value != "," {
+ p.back()
+ }
+ }
+
+ if reqCount > 0 {
+ return p.missingRequiredFieldError(sv)
+ }
+ return reqFieldErr
+}
+
+func (p *textParser) readAny(v reflect.Value, props *Properties) error {
+ tok := p.next()
+ if tok.err != nil {
+ return tok.err
+ }
+ if tok.value == "" {
+ return p.errorf("unexpected EOF")
+ }
+
+ switch fv := v; fv.Kind() {
+ case reflect.Slice:
+ at := v.Type()
+ if at.Elem().Kind() == reflect.Uint8 {
+ // Special case for []byte
+ if tok.value[0] != '"' && tok.value[0] != '\'' {
+ // Deliberately written out here, as the error after
+ // this switch statement would write "invalid []byte: ...",
+ // which is not as user-friendly.
+ return p.errorf("invalid string: %v", tok.value)
+ }
+ bytes := []byte(tok.unquoted)
+ fv.Set(reflect.ValueOf(bytes))
+ return nil
+ }
+ // Repeated field. May already exist.
+ flen := fv.Len()
+ if flen == fv.Cap() {
+ nav := reflect.MakeSlice(at, flen, 2*flen+1)
+ reflect.Copy(nav, fv)
+ fv.Set(nav)
+ }
+ fv.SetLen(flen + 1)
+
+ // Read one.
+ p.back()
+ return p.readAny(fv.Index(flen), props)
+ case reflect.Bool:
+ // Either "true", "false", 1 or 0.
+ switch tok.value {
+ case "true", "1":
+ fv.SetBool(true)
+ return nil
+ case "false", "0":
+ fv.SetBool(false)
+ return nil
+ }
+ case reflect.Float32, reflect.Float64:
+ v := tok.value
+ // Ignore 'f' for compatibility with output generated by C++, but don't
+ // remove 'f' when the value is "-inf" or "inf".
+ if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" {
+ v = v[:len(v)-1]
+ }
+ if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil {
+ fv.SetFloat(f)
+ return nil
+ }
+ case reflect.Int32:
+ if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ if len(props.Enum) == 0 {
+ break
+ }
+ m, ok := enumValueMaps[props.Enum]
+ if !ok {
+ break
+ }
+ x, ok := m[tok.value]
+ if !ok {
+ break
+ }
+ fv.SetInt(int64(x))
+ return nil
+ case reflect.Int64:
+ if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil {
+ fv.SetInt(x)
+ return nil
+ }
+
+ case reflect.Ptr:
+ // A basic field (indirected through pointer), or a repeated message/group
+ p.back()
+ fv.Set(reflect.New(fv.Type().Elem()))
+ return p.readAny(fv.Elem(), props)
+ case reflect.String:
+ if tok.value[0] == '"' || tok.value[0] == '\'' {
+ fv.SetString(tok.unquoted)
+ return nil
+ }
+ case reflect.Struct:
+ var terminator string
+ switch tok.value {
+ case "{":
+ terminator = "}"
+ case "<":
+ terminator = ">"
+ default:
+ return p.errorf("expected '{' or '<', found %q", tok.value)
+ }
+ // TODO: Handle nested messages which implement encoding.TextUnmarshaler.
+ return p.readStruct(fv, terminator)
+ case reflect.Uint32:
+ if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil {
+ fv.SetUint(uint64(x))
+ return nil
+ }
+ case reflect.Uint64:
+ if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil {
+ fv.SetUint(x)
+ return nil
+ }
+ }
+ return p.errorf("invalid %v: %v", v.Type(), tok.value)
+}
+
+// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb
+// before starting to unmarshal, so any existing data in pb is always removed.
+// If a required field is not set and no other error occurs,
+// UnmarshalText returns *RequiredNotSetError.
+func UnmarshalText(s string, pb Message) error {
+ if um, ok := pb.(encoding.TextUnmarshaler); ok {
+ err := um.UnmarshalText([]byte(s))
+ return err
+ }
+ pb.Reset()
+ v := reflect.ValueOf(pb)
+ if pe := newTextParser(s).readStruct(v.Elem(), ""); pe != nil {
+ return pe
+ }
+ return nil
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
new file mode 100644
index 000000000000..e5ee8b922639
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_parser_test.go
@@ -0,0 +1,509 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "math"
+ "reflect"
+ "testing"
+
+ proto3pb "./proto3_proto"
+ . "./testdata"
+ . "github.com/golang/protobuf/proto"
+)
+
+type UnmarshalTextTest struct {
+ in string
+ err string // if "", no error expected
+ out *MyMessage
+}
+
+func buildExtStructTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_More, &Ext{
+ Data: String("Hello, world!"),
+ })
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtDataTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ SetExtension(msg, E_Ext_Text, String("Hello, world!"))
+ SetExtension(msg, E_Ext_Number, Int32(1729))
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+func buildExtRepStringTest(text string) UnmarshalTextTest {
+ msg := &MyMessage{
+ Count: Int32(42),
+ }
+ if err := SetExtension(msg, E_Greeting, []string{"bula", "hola"}); err != nil {
+ panic(err)
+ }
+ return UnmarshalTextTest{in: text, out: msg}
+}
+
+var unMarshalTextTests = []UnmarshalTextTest{
+ // Basic
+ {
+ in: " count:42\n name:\"Dave\" ",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ },
+ },
+
+ // Empty quoted string
+ {
+ in: `count:42 name:""`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(""),
+ },
+ },
+
+ // Quoted string concatenation
+ {
+ in: `count:42 name: "My name is "` + "\n" + `"elsewhere"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("My name is elsewhere"),
+ },
+ },
+
+ // Quoted string with escaped apostrophe
+ {
+ in: `count:42 name: "HOLIDAY - New Year\'s Day"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("HOLIDAY - New Year's Day"),
+ },
+ },
+
+ // Quoted string with single quote
+ {
+ in: `count:42 name: 'Roger "The Ramster" Ramjet'`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`Roger "The Ramster" Ramjet`),
+ },
+ },
+
+ // Quoted string with all the accepted special characters from the C++ test
+ {
+ in: `count:42 name: ` + "\"\\\"A string with \\' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces"),
+ },
+ },
+
+ // Quoted string with quoted backslash
+ {
+ in: `count:42 name: "\\'xyz"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String(`\'xyz`),
+ },
+ },
+
+ // Quoted string with UTF-8 bytes.
+ {
+ in: "count:42 name: '\303\277\302\201\xAB'",
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("\303\277\302\201\xAB"),
+ },
+ },
+
+ // Bad quoted string
+ {
+ in: `inner: < host: "\0" >` + "\n",
+ err: `line 1.15: invalid quoted string "\0"`,
+ },
+
+ // Number too large for int64
+ {
+ in: "count: 1 others { key: 123456789012345678901 }",
+ err: "line 1.23: invalid int64: 123456789012345678901",
+ },
+
+ // Number too large for int32
+ {
+ in: "count: 1234567890123",
+ err: "line 1.7: invalid int32: 1234567890123",
+ },
+
+ // Number in hexadecimal
+ {
+ in: "count: 0x2beef",
+ out: &MyMessage{
+ Count: Int32(0x2beef),
+ },
+ },
+
+ // Number in octal
+ {
+ in: "count: 024601",
+ out: &MyMessage{
+ Count: Int32(024601),
+ },
+ },
+
+ // Floating point number with "f" suffix
+ {
+ in: "count: 4 others:< weight: 17.0f >",
+ out: &MyMessage{
+ Count: Int32(4),
+ Others: []*OtherMessage{
+ {
+ Weight: Float32(17),
+ },
+ },
+ },
+ },
+
+ // Floating point positive infinity
+ {
+ in: "count: 4 bigfloat: inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(1)),
+ },
+ },
+
+ // Floating point negative infinity
+ {
+ in: "count: 4 bigfloat: -inf",
+ out: &MyMessage{
+ Count: Int32(4),
+ Bigfloat: Float64(math.Inf(-1)),
+ },
+ },
+
+ // Number too large for float32
+ {
+ in: "others:< weight: 12345678901234567890123456789012345678901234567890 >",
+ err: "line 1.17: invalid float32: 12345678901234567890123456789012345678901234567890",
+ },
+
+ // Number posing as a quoted string
+ {
+ in: `inner: < host: 12 >` + "\n",
+ err: `line 1.15: invalid string: 12`,
+ },
+
+ // Quoted string posing as int32
+ {
+ in: `count: "12"`,
+ err: `line 1.7: invalid int32: "12"`,
+ },
+
+ // Quoted string posing a float32
+ {
+ in: `others:< weight: "17.4" >`,
+ err: `line 1.17: invalid float32: "17.4"`,
+ },
+
+ // Enum
+ {
+ in: `count:42 bikeshed: BLUE`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Bikeshed: MyMessage_BLUE.Enum(),
+ },
+ },
+
+ // Repeated field
+ {
+ in: `count:42 pet: "horsey" pet:"bunny"`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Pet: []string{"horsey", "bunny"},
+ },
+ },
+
+ // Repeated message with/without colon and <>/{}
+ {
+ in: `count:42 others:{} others{} others:<> others:{}`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Others: []*OtherMessage{
+ {},
+ {},
+ {},
+ {},
+ },
+ },
+ },
+
+ // Missing colon for inner message
+ {
+ in: `count:42 inner < host: "cauchy.syd" >`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Inner: &InnerMessage{
+ Host: String("cauchy.syd"),
+ },
+ },
+ },
+
+ // Missing colon for string field
+ {
+ in: `name "Dave"`,
+ err: `line 1.5: expected ':', found "\"Dave\""`,
+ },
+
+ // Missing colon for int32 field
+ {
+ in: `count 42`,
+ err: `line 1.6: expected ':', found "42"`,
+ },
+
+ // Missing required field
+ {
+ in: `name: "Pawel"`,
+ err: `proto: required field "testdata.MyMessage.count" not set`,
+ out: &MyMessage{
+ Name: String("Pawel"),
+ },
+ },
+
+ // Repeated non-repeated field
+ {
+ in: `name: "Rob" name: "Russ"`,
+ err: `line 1.12: non-repeated field "name" was repeated`,
+ },
+
+ // Group
+ {
+ in: `count: 17 SomeGroup { group_field: 12 }`,
+ out: &MyMessage{
+ Count: Int32(17),
+ Somegroup: &MyMessage_SomeGroup{
+ GroupField: Int32(12),
+ },
+ },
+ },
+
+ // Semicolon between fields
+ {
+ in: `count:3;name:"Calvin"`,
+ out: &MyMessage{
+ Count: Int32(3),
+ Name: String("Calvin"),
+ },
+ },
+ // Comma between fields
+ {
+ in: `count:4,name:"Ezekiel"`,
+ out: &MyMessage{
+ Count: Int32(4),
+ Name: String("Ezekiel"),
+ },
+ },
+
+ // Extension
+ buildExtStructTest(`count: 42 [testdata.Ext.more]:`),
+ buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`),
+ buildExtDataTest(`count: 42 [testdata.Ext.text]:"Hello, world!" [testdata.Ext.number]:1729`),
+ buildExtRepStringTest(`count: 42 [testdata.greeting]:"bula" [testdata.greeting]:"hola"`),
+
+ // Big all-in-one
+ {
+ in: "count:42 # Meaning\n" +
+ `name:"Dave" ` +
+ `quote:"\"I didn't want to go.\"" ` +
+ `pet:"bunny" ` +
+ `pet:"kitty" ` +
+ `pet:"horsey" ` +
+ `inner:<` +
+ ` host:"footrest.syd" ` +
+ ` port:7001 ` +
+ ` connected:true ` +
+ `> ` +
+ `others:<` +
+ ` key:3735928559 ` +
+ ` value:"\x01A\a\f" ` +
+ `> ` +
+ `others:<` +
+ " weight:58.9 # Atomic weight of Co\n" +
+ ` inner:<` +
+ ` host:"lesha.mtv" ` +
+ ` port:8002 ` +
+ ` >` +
+ `>`,
+ out: &MyMessage{
+ Count: Int32(42),
+ Name: String("Dave"),
+ Quote: String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &InnerMessage{
+ Host: String("footrest.syd"),
+ Port: Int32(7001),
+ Connected: Bool(true),
+ },
+ Others: []*OtherMessage{
+ {
+ Key: Int64(3735928559),
+ Value: []byte{0x1, 'A', '\a', '\f'},
+ },
+ {
+ Weight: Float32(58.9),
+ Inner: &InnerMessage{
+ Host: String("lesha.mtv"),
+ Port: Int32(8002),
+ },
+ },
+ },
+ },
+ },
+}
+
+func TestUnmarshalText(t *testing.T) {
+ for i, test := range unMarshalTextTests {
+ pb := new(MyMessage)
+ err := UnmarshalText(test.in, pb)
+ if test.err == "" {
+ // We don't expect failure.
+ if err != nil {
+ t.Errorf("Test %d: Unexpected error: %v", i, err)
+ } else if !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ } else {
+ // We do expect failure.
+ if err == nil {
+ t.Errorf("Test %d: Didn't get expected error: %v", i, test.err)
+ } else if err.Error() != test.err {
+ t.Errorf("Test %d: Incorrect error.\nHave: %v\nWant: %v",
+ i, err.Error(), test.err)
+ } else if _, ok := err.(*RequiredNotSetError); ok && test.out != nil && !reflect.DeepEqual(pb, test.out) {
+ t.Errorf("Test %d: Incorrect populated \nHave: %v\nWant: %v",
+ i, pb, test.out)
+ }
+ }
+ }
+}
+
+func TestUnmarshalTextCustomMessage(t *testing.T) {
+ msg := &textMessage{}
+ if err := UnmarshalText("custom", msg); err != nil {
+ t.Errorf("Unexpected error from custom unmarshal: %v", err)
+ }
+ if UnmarshalText("not custom", msg) == nil {
+ t.Errorf("Didn't get expected error from custom unmarshal")
+ }
+}
+
+// Regression test; this caused a panic.
+func TestRepeatedEnum(t *testing.T) {
+ pb := new(RepeatedEnum)
+ if err := UnmarshalText("color: RED", pb); err != nil {
+ t.Fatal(err)
+ }
+ exp := &RepeatedEnum{
+ Color: []RepeatedEnum_Color{RepeatedEnum_RED},
+ }
+ if !Equal(pb, exp) {
+ t.Errorf("Incorrect populated \nHave: %v\nWant: %v", pb, exp)
+ }
+}
+
+func TestProto3TextParsing(t *testing.T) {
+ m := new(proto3pb.Message)
+ const in = `name: "Wallace" true_scotsman: true`
+ want := &proto3pb.Message{
+ Name: "Wallace",
+ TrueScotsman: true,
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+func TestMapParsing(t *testing.T) {
+ m := new(MessageWithMap)
+ const in = `name_mapping: name_mapping:` +
+ `msg_mapping:>` +
+ `byte_mapping:`
+ want := &MessageWithMap{
+ NameMapping: map[int32]string{
+ 1: "Beatles",
+ 1234: "Feist",
+ },
+ MsgMapping: map[int64]*FloatingPoint{
+ -4: {F: Float64(2.0)},
+ },
+ ByteMapping: map[bool][]byte{
+ true: []byte("so be it"),
+ },
+ }
+ if err := UnmarshalText(in, m); err != nil {
+ t.Fatal(err)
+ }
+ if !Equal(m, want) {
+ t.Errorf("\n got %v\nwant %v", m, want)
+ }
+}
+
+var benchInput string
+
+func init() {
+ benchInput = "count: 4\n"
+ for i := 0; i < 1000; i++ {
+ benchInput += "pet: \"fido\"\n"
+ }
+
+ // Check it is valid input.
+ pb := new(MyMessage)
+ err := UnmarshalText(benchInput, pb)
+ if err != nil {
+ panic("Bad benchmark input: " + err.Error())
+ }
+}
+
+func BenchmarkUnmarshalText(b *testing.B) {
+ pb := new(MyMessage)
+ for i := 0; i < b.N; i++ {
+ UnmarshalText(benchInput, pb)
+ }
+ b.SetBytes(int64(len(benchInput)))
+}
diff --git a/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
new file mode 100644
index 000000000000..707bedd0006a
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/golang/protobuf/proto/text_test.go
@@ -0,0 +1,436 @@
+// Go support for Protocol Buffers - Google's data interchange format
+//
+// Copyright 2010 The Go Authors. All rights reserved.
+// https://github.com/golang/protobuf
+//
+// Redistribution and use in source and binary forms, with or without
+// modification, are permitted provided that the following conditions are
+// met:
+//
+// * Redistributions of source code must retain the above copyright
+// notice, this list of conditions and the following disclaimer.
+// * Redistributions in binary form must reproduce the above
+// copyright notice, this list of conditions and the following disclaimer
+// in the documentation and/or other materials provided with the
+// distribution.
+// * Neither the name of Google Inc. nor the names of its
+// contributors may be used to endorse or promote products derived from
+// this software without specific prior written permission.
+//
+// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+package proto_test
+
+import (
+ "bytes"
+ "errors"
+ "io/ioutil"
+ "math"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ proto3pb "./proto3_proto"
+ pb "./testdata"
+)
+
+// textMessage implements the methods that allow it to marshal and unmarshal
+// itself as text.
+type textMessage struct {
+}
+
+func (*textMessage) MarshalText() ([]byte, error) {
+ return []byte("custom"), nil
+}
+
+func (*textMessage) UnmarshalText(bytes []byte) error {
+ if string(bytes) != "custom" {
+ return errors.New("expected 'custom'")
+ }
+ return nil
+}
+
+func (*textMessage) Reset() {}
+func (*textMessage) String() string { return "" }
+func (*textMessage) ProtoMessage() {}
+
+func newTestMessage() *pb.MyMessage {
+ msg := &pb.MyMessage{
+ Count: proto.Int32(42),
+ Name: proto.String("Dave"),
+ Quote: proto.String(`"I didn't want to go."`),
+ Pet: []string{"bunny", "kitty", "horsey"},
+ Inner: &pb.InnerMessage{
+ Host: proto.String("footrest.syd"),
+ Port: proto.Int32(7001),
+ Connected: proto.Bool(true),
+ },
+ Others: []*pb.OtherMessage{
+ {
+ Key: proto.Int64(0xdeadbeef),
+ Value: []byte{1, 65, 7, 12},
+ },
+ {
+ Weight: proto.Float32(6.022),
+ Inner: &pb.InnerMessage{
+ Host: proto.String("lesha.mtv"),
+ Port: proto.Int32(8002),
+ },
+ },
+ },
+ Bikeshed: pb.MyMessage_BLUE.Enum(),
+ Somegroup: &pb.MyMessage_SomeGroup{
+ GroupField: proto.Int32(8),
+ },
+ // One normally wouldn't do this.
+ // This is an undeclared tag 13, as a varint (wire type 0) with value 4.
+ XXX_unrecognized: []byte{13<<3 | 0, 4},
+ }
+ ext := &pb.Ext{
+ Data: proto.String("Big gobs for big rats"),
+ }
+ if err := proto.SetExtension(msg, pb.E_Ext_More, ext); err != nil {
+ panic(err)
+ }
+ greetings := []string{"adg", "easy", "cow"}
+ if err := proto.SetExtension(msg, pb.E_Greeting, greetings); err != nil {
+ panic(err)
+ }
+
+ // Add an unknown extension. We marshal a pb.Ext, and fake the ID.
+ b, err := proto.Marshal(&pb.Ext{Data: proto.String("3G skiing")})
+ if err != nil {
+ panic(err)
+ }
+ b = append(proto.EncodeVarint(201<<3|proto.WireBytes), b...)
+ proto.SetRawExtension(msg, 201, b)
+
+ // Extensions can be plain fields, too, so let's test that.
+ b = append(proto.EncodeVarint(202<<3|proto.WireVarint), 19)
+ proto.SetRawExtension(msg, 202, b)
+
+ return msg
+}
+
+const text = `count: 42
+name: "Dave"
+quote: "\"I didn't want to go.\""
+pet: "bunny"
+pet: "kitty"
+pet: "horsey"
+inner: <
+ host: "footrest.syd"
+ port: 7001
+ connected: true
+>
+others: <
+ key: 3735928559
+ value: "\001A\007\014"
+>
+others: <
+ weight: 6.022
+ inner: <
+ host: "lesha.mtv"
+ port: 8002
+ >
+>
+bikeshed: BLUE
+SomeGroup {
+ group_field: 8
+}
+/* 2 unknown bytes */
+13: 4
+[testdata.Ext.more]: <
+ data: "Big gobs for big rats"
+>
+[testdata.greeting]: "adg"
+[testdata.greeting]: "easy"
+[testdata.greeting]: "cow"
+/* 13 unknown bytes */
+201: "\t3G skiing"
+/* 3 unknown bytes */
+202: 19
+`
+
+func TestMarshalText(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, newTestMessage()); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != text {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, text)
+ }
+}
+
+func TestMarshalTextCustomMessage(t *testing.T) {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, &textMessage{}); err != nil {
+ t.Fatalf("proto.MarshalText: %v", err)
+ }
+ s := buf.String()
+ if s != "custom" {
+ t.Errorf("Got %q, expected %q", s, "custom")
+ }
+}
+func TestMarshalTextNil(t *testing.T) {
+ want := ""
+ tests := []proto.Message{nil, (*pb.MyMessage)(nil)}
+ for i, test := range tests {
+ buf := new(bytes.Buffer)
+ if err := proto.MarshalText(buf, test); err != nil {
+ t.Fatal(err)
+ }
+ if got := buf.String(); got != want {
+ t.Errorf("%d: got %q want %q", i, got, want)
+ }
+ }
+}
+
+func TestMarshalTextUnknownEnum(t *testing.T) {
+ // The Color enum only specifies values 0-2.
+ m := &pb.MyMessage{Bikeshed: pb.MyMessage_Color(3).Enum()}
+ got := m.String()
+ const want = `bikeshed:3 `
+ if got != want {
+ t.Errorf("\n got %q\nwant %q", got, want)
+ }
+}
+
+func BenchmarkMarshalTextBuffered(b *testing.B) {
+ buf := new(bytes.Buffer)
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ buf.Reset()
+ proto.MarshalText(buf, m)
+ }
+}
+
+func BenchmarkMarshalTextUnbuffered(b *testing.B) {
+ w := ioutil.Discard
+ m := newTestMessage()
+ for i := 0; i < b.N; i++ {
+ proto.MarshalText(w, m)
+ }
+}
+
+func compact(src string) string {
+ // s/[ \n]+/ /g; s/ $//;
+ dst := make([]byte, len(src))
+ space, comment := false, false
+ j := 0
+ for i := 0; i < len(src); i++ {
+ if strings.HasPrefix(src[i:], "/*") {
+ comment = true
+ i++
+ continue
+ }
+ if comment && strings.HasPrefix(src[i:], "*/") {
+ comment = false
+ i++
+ continue
+ }
+ if comment {
+ continue
+ }
+ c := src[i]
+ if c == ' ' || c == '\n' {
+ space = true
+ continue
+ }
+ if j > 0 && (dst[j-1] == ':' || dst[j-1] == '<' || dst[j-1] == '{') {
+ space = false
+ }
+ if c == '{' {
+ space = false
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ space = false
+ }
+ dst[j] = c
+ j++
+ }
+ if space {
+ dst[j] = ' '
+ j++
+ }
+ return string(dst[0:j])
+}
+
+var compactText = compact(text)
+
+func TestCompactText(t *testing.T) {
+ s := proto.CompactTextString(newTestMessage())
+ if s != compactText {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v\n===\n", s, compactText)
+ }
+}
+
+func TestStringEscaping(t *testing.T) {
+ testCases := []struct {
+ in *pb.Strings
+ out string
+ }{
+ {
+ // Test data from C++ test (TextFormatTest.StringEscape).
+ // Single divergence: we don't escape apostrophes.
+ &pb.Strings{StringField: proto.String("\"A string with ' characters \n and \r newlines and \t tabs and \001 slashes \\ and multiple spaces")},
+ "string_field: \"\\\"A string with ' characters \\n and \\r newlines and \\t tabs and \\001 slashes \\\\ and multiple spaces\"\n",
+ },
+ {
+ // Test data from the same C++ test.
+ &pb.Strings{StringField: proto.String("\350\260\267\346\255\214")},
+ "string_field: \"\\350\\260\\267\\346\\255\\214\"\n",
+ },
+ {
+ // Some UTF-8.
+ &pb.Strings{StringField: proto.String("\x00\x01\xff\x81")},
+ `string_field: "\000\001\377\201"` + "\n",
+ },
+ }
+
+ for i, tc := range testCases {
+ var buf bytes.Buffer
+ if err := proto.MarshalText(&buf, tc.in); err != nil {
+ t.Errorf("proto.MarsalText: %v", err)
+ continue
+ }
+ s := buf.String()
+ if s != tc.out {
+ t.Errorf("#%d: Got:\n%s\nExpected:\n%s\n", i, s, tc.out)
+ continue
+ }
+
+ // Check round-trip.
+ pb := new(pb.Strings)
+ if err := proto.UnmarshalText(s, pb); err != nil {
+ t.Errorf("#%d: UnmarshalText: %v", i, err)
+ continue
+ }
+ if !proto.Equal(pb, tc.in) {
+ t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb)
+ }
+ }
+}
+
+// A limitedWriter accepts some output before it fails.
+// This is a proxy for something like a nearly-full or imminently-failing disk,
+// or a network connection that is about to die.
+type limitedWriter struct {
+ b bytes.Buffer
+ limit int
+}
+
+var outOfSpace = errors.New("proto: insufficient space")
+
+func (w *limitedWriter) Write(p []byte) (n int, err error) {
+ var avail = w.limit - w.b.Len()
+ if avail <= 0 {
+ return 0, outOfSpace
+ }
+ if len(p) <= avail {
+ return w.b.Write(p)
+ }
+ n, _ = w.b.Write(p[:avail])
+ return n, outOfSpace
+}
+
+func TestMarshalTextFailing(t *testing.T) {
+ // Try lots of different sizes to exercise more error code-paths.
+ for lim := 0; lim < len(text); lim++ {
+ buf := new(limitedWriter)
+ buf.limit = lim
+ err := proto.MarshalText(buf, newTestMessage())
+ // We expect a certain error, but also some partial results in the buffer.
+ if err != outOfSpace {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", err, outOfSpace)
+ }
+ s := buf.b.String()
+ x := text[:buf.limit]
+ if s != x {
+ t.Errorf("Got:\n===\n%v===\nExpected:\n===\n%v===\n", s, x)
+ }
+ }
+}
+
+func TestFloats(t *testing.T) {
+ tests := []struct {
+ f float64
+ want string
+ }{
+ {0, "0"},
+ {4.7, "4.7"},
+ {math.Inf(1), "inf"},
+ {math.Inf(-1), "-inf"},
+ {math.NaN(), "nan"},
+ }
+ for _, test := range tests {
+ msg := &pb.FloatingPoint{F: &test.f}
+ got := strings.TrimSpace(msg.String())
+ want := `f:` + test.want
+ if got != want {
+ t.Errorf("f=%f: got %q, want %q", test.f, got, want)
+ }
+ }
+}
+
+func TestRepeatedNilText(t *testing.T) {
+ m := &pb.MessageList{
+ Message: []*pb.MessageList_Message{
+ nil,
+ &pb.MessageList_Message{
+ Name: proto.String("Horse"),
+ },
+ nil,
+ },
+ }
+ want := `Message
+Message {
+ name: "Horse"
+}
+Message
+`
+ if s := proto.MarshalTextString(m); s != want {
+ t.Errorf(" got: %s\nwant: %s", s, want)
+ }
+}
+
+func TestProto3Text(t *testing.T) {
+ tests := []struct {
+ m proto.Message
+ want string
+ }{
+ // zero message
+ {&proto3pb.Message{}, ``},
+ // zero message except for an empty byte slice
+ {&proto3pb.Message{Data: []byte{}}, ``},
+ // trivial case
+ {&proto3pb.Message{Name: "Rob", HeightInCm: 175}, `name:"Rob" height_in_cm:175`},
+ // empty map
+ {&pb.MessageWithMap{}, ``},
+ // non-empty map; current map format is the same as a repeated struct
+ {
+ &pb.MessageWithMap{NameMapping: map[int32]string{1234: "Feist"}},
+ `name_mapping:`,
+ },
+ }
+ for _, test := range tests {
+ got := strings.TrimSpace(test.m.String())
+ if got != test.want {
+ t.Errorf("\n got %s\nwant %s", got, test.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool.go
index 010576c39686..70e2e0a6b960 100644
--- a/Godeps/_workspace/src/github.com/spf13/pflag/bool.go
+++ b/Godeps/_workspace/src/github.com/spf13/pflag/bool.go
@@ -5,6 +5,13 @@ import (
"strconv"
)
+// optional interface to indicate boolean flags that can be
+// supplied without "=value" text
+type boolFlag interface {
+ Value
+ IsBoolFlag() bool
+}
+
// -- bool Value
type boolValue bool
@@ -25,6 +32,8 @@ func (b *boolValue) Type() string {
func (b *boolValue) String() string { return fmt.Sprintf("%v", *b) }
+func (b *boolValue) IsBoolFlag() bool { return true }
+
// BoolVar defines a bool flag with specified name, default value, and usage string.
// The argument p points to a bool variable in which to store the value of the flag.
func (f *FlagSet) BoolVar(p *bool, name string, value bool, usage string) {
diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go
new file mode 100644
index 000000000000..72a12beca985
--- /dev/null
+++ b/Godeps/_workspace/src/github.com/spf13/pflag/bool_test.go
@@ -0,0 +1,163 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pflag_test
+
+import (
+ "fmt"
+ "strconv"
+ "testing"
+
+ . "github.com/ogier/pflag"
+)
+
+// This value can be a boolean ("true", "false") or "maybe"
+type triStateValue int
+
+const (
+ triStateFalse triStateValue = 0
+ triStateTrue triStateValue = 1
+ triStateMaybe triStateValue = 2
+)
+
+const strTriStateMaybe = "maybe"
+
+func (v *triStateValue) IsBoolFlag() bool {
+ return true
+}
+
+func (v *triStateValue) Get() interface{} {
+ return triStateValue(*v)
+}
+
+func (v *triStateValue) Set(s string) error {
+ if s == strTriStateMaybe {
+ *v = triStateMaybe
+ return nil
+ }
+ boolVal, err := strconv.ParseBool(s)
+ if boolVal {
+ *v = triStateTrue
+ } else {
+ *v = triStateFalse
+ }
+ return err
+}
+
+func (v *triStateValue) String() string {
+ if *v == triStateMaybe {
+ return strTriStateMaybe
+ }
+ return fmt.Sprintf("%v", bool(*v == triStateTrue))
+}
+
+// The type of the flag as requred by the pflag.Value interface
+func (v *triStateValue) Type() string {
+ return "version"
+}
+
+func setUpFlagSet(tristate *triStateValue) *FlagSet {
+ f := NewFlagSet("test", ContinueOnError)
+ *tristate = triStateFalse
+ f.VarP(tristate, "tristate", "t", "tristate value (true, maybe or false)")
+ return f
+}
+
+func TestExplicitTrue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=true"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestImplicitTrue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestShortFlag(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"-t"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+}
+
+func TestShortFlagExtraArgument(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ // The"maybe"turns into an arg, since short boolean options will only do true/false
+ err := f.Parse([]string{"-t", "maybe"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateTrue {
+ t.Fatal("expected", triStateTrue, "(triStateTrue) but got", tristate, "instead")
+ }
+ args := f.Args()
+ if len(args) != 1 || args[0] != "maybe" {
+ t.Fatal("expected an extra 'maybe' argument to stick around")
+ }
+}
+
+func TestExplicitMaybe(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=maybe"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateMaybe {
+ t.Fatal("expected", triStateMaybe, "(triStateMaybe) but got", tristate, "instead")
+ }
+}
+
+func TestExplicitFalse(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=false"})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateFalse {
+ t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead")
+ }
+}
+
+func TestImplicitFalse(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{})
+ if err != nil {
+ t.Fatal("expected no error; got", err)
+ }
+ if tristate != triStateFalse {
+ t.Fatal("expected", triStateFalse, "(triStateFalse) but got", tristate, "instead")
+ }
+}
+
+func TestInvalidValue(t *testing.T) {
+ var tristate triStateValue
+ f := setUpFlagSet(&tristate)
+ err := f.Parse([]string{"--tristate=invalid"})
+ if err == nil {
+ t.Fatal("expected an error but did not get any, tristate has value", tristate)
+ }
+}
diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
index 03ebeaad4590..6aaed3c8082a 100644
--- a/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
+++ b/Godeps/_workspace/src/github.com/spf13/pflag/example_test.go
@@ -29,6 +29,10 @@ func (i *interval) String() string {
return fmt.Sprint(*i)
}
+func (i *interval) Type() string {
+ return "interval"
+}
+
// Set is the method to set the flag value, part of the flag.Value interface.
// Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it.
diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go
index 47761a0e8088..2e4cac08493a 100644
--- a/Godeps/_workspace/src/github.com/spf13/pflag/flag.go
+++ b/Godeps/_workspace/src/github.com/spf13/pflag/flag.go
@@ -466,7 +466,7 @@ func (f *FlagSet) parseLongArg(s string, args []string) (a []string, err error)
return
}
if len(split) == 1 {
- if _, ok := flag.Value.(*boolValue); !ok {
+ if bv, ok := flag.Value.(boolFlag); !ok || !bv.IsBoolFlag() {
err = f.failf("flag needs an argument: %s", s)
return
}
@@ -500,7 +500,7 @@ func (f *FlagSet) parseShortArg(s string, args []string) (a []string, err error)
}
}
if alreadythere {
- if _, ok := flag.Value.(*boolValue); ok {
+ if bv, ok := flag.Value.(boolFlag); ok && bv.IsBoolFlag() {
f.setFlag(flag, "true", s)
continue
}
diff --git a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
index 4d95b1596bc6..47865bd96ba9 100644
--- a/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
+++ b/Godeps/_workspace/src/github.com/spf13/pflag/flag_test.go
@@ -245,6 +245,10 @@ func (f *flagVar) Set(value string) error {
return nil
}
+func (f *flagVar) Type() string {
+ return "flagVar"
+}
+
func TestUserDefined(t *testing.T) {
var flags FlagSet
flags.Init("test", ContinueOnError)
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
new file mode 100644
index 000000000000..01bb8d44ee96
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.3
+ - 1.4
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/golang.org/x"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/golang.org/x/oauth2"
+ - go get -v -t -d -tags='appengine appenginevm' golang.org/x/oauth2/...
+
+script:
+ - go test -v -tags='appengine appenginevm' golang.org/x/oauth2/...
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
new file mode 100644
index 000000000000..15167cd746c5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/AUTHORS
@@ -0,0 +1,3 @@
+# This source code refers to The Go Authors for copyright purposes.
+# The master list of authors is in the main Go distribution,
+# visible at http://tip.golang.org/AUTHORS.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
new file mode 100644
index 000000000000..d76faef21a82
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTING.md
@@ -0,0 +1,25 @@
+# Contributing
+
+We don't use GitHub pull requests but use Gerrit for code reviews,
+similar to the Go project.
+
+1. Sign one of the contributor license agreements below.
+2. `go get golang.org/x/review/git-codereview` to install the code reviewing tool.
+3. Get the package by running `go get -d golang.org/x/oauth2`.
+Make changes and create a change by running `git codereview change `, provide a command message, and use `git codereview mail` to create a Gerrit CL.
+Keep amending to the change and mail as your recieve feedback.
+
+For more information about the workflow, see Go's [Contribution Guidelines](https://golang.org/doc/contribute.html).
+
+Before we can accept any pull requests
+we have to jump through a couple of legal hurdles,
+primarily a Contributor License Agreement (CLA):
+
+- **If you are an individual writing original source code**
+ and you're sure you own the intellectual property,
+ then you'll need to sign an [individual CLA](http://code.google.com/legal/individual-cla-v1.0.html).
+- **If you work for a company that wants to allow you to contribute your work**,
+ then you'll need to sign a [corporate CLA](http://code.google.com/legal/corporate-cla-v1.0.html).
+
+You can sign these electronically (just scroll to the bottom).
+After that, we'll be able to accept your pull requests.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
new file mode 100644
index 000000000000..1c4577e96806
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/CONTRIBUTORS
@@ -0,0 +1,3 @@
+# This source code was written by the Go contributors.
+# The master list of contributors is in the main Go distribution,
+# visible at http://tip.golang.org/CONTRIBUTORS.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
new file mode 100644
index 000000000000..d02f24fd5288
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/LICENSE
@@ -0,0 +1,27 @@
+Copyright (c) 2009 The oauth2 Authors. All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Google Inc. nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/README.md b/Godeps/_workspace/src/golang.org/x/oauth2/README.md
new file mode 100644
index 000000000000..ecf9c4e022f3
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/README.md
@@ -0,0 +1,18 @@
+# OAuth2 for Go
+
+[](https://travis-ci.org/golang/oauth2)
+
+oauth2 package contains a client implementation for OAuth 2.0 spec.
+
+## Installation
+
+~~~~
+go get golang.org/x/oauth2
+~~~~
+
+See godoc for further documentation and examples.
+
+* [godoc.org/golang.org/x/oauth2](http://godoc.org/golang.org/x/oauth2)
+* [godoc.org/golang.org/x/oauth2/google](http://godoc.org/golang.org/x/oauth2/google)
+
+
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
new file mode 100644
index 000000000000..d9ce8045ba4b
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/client_appengine.go
@@ -0,0 +1,39 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine,!appenginevm
+
+// App Engine hooks.
+
+package oauth2
+
+import (
+ "log"
+ "net/http"
+ "sync"
+
+ "appengine"
+ "appengine/urlfetch"
+)
+
+var warnOnce sync.Once
+
+func init() {
+ registerContextClientFunc(contextClientAppEngine)
+}
+
+func contextClientAppEngine(ctx Context) (*http.Client, error) {
+ if actx, ok := ctx.(appengine.Context); ok {
+ return urlfetch.Client(actx), nil
+ }
+ // The user did it wrong. We'll log once (and hope they see it
+ // in dev_appserver), but stil return (nil, nil) in case some
+ // other contextClientFunc hook finds a way to proceed.
+ warnOnce.Do(gaeDoingItWrongHelp)
+ return nil, nil
+}
+
+func gaeDoingItWrongHelp() {
+ log.Printf("WARNING: you attempted to use the oauth2 package without passing a valid appengine.Context or *http.Request as the oauth2.Context. App Engine requires that all service RPCs (including urlfetch) be associated with an *http.Request/appengine.Context.")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
new file mode 100644
index 000000000000..e4fef7d772a6
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/example_test.go
@@ -0,0 +1,50 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2_test
+
+import (
+ "fmt"
+ "log"
+ "testing"
+
+ "golang.org/x/oauth2"
+)
+
+// TODO(jbd): Remove after Go 1.4.
+// Related to https://codereview.appspot.com/107320046
+func TestA(t *testing.T) {}
+
+func ExampleConfig() {
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ Scopes: []string{"SCOPE1", "SCOPE2"},
+ Endpoint: oauth2.Endpoint{
+ AuthURL: "https://provider.com/o/oauth2/auth",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ },
+ }
+
+ // Redirect user to consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state", oauth2.AccessTypeOffline)
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Use the authorization code that is pushed to the redirect URL.
+ // NewTransportWithCode will do the handshake to retrieve
+ // an access token and initiate a Transport that is
+ // authorized and authenticated by the retrieved token.
+ var code string
+ if _, err := fmt.Scan(&code); err != nil {
+ log.Fatal(err)
+ }
+ tok, err := conf.Exchange(oauth2.NoContext, code)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ client := conf.Client(oauth2.NoContext, tok)
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
new file mode 100644
index 000000000000..1648cb58daaf
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/github/github.go
@@ -0,0 +1,16 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package github provides constants for using OAuth2 to access Github.
+package github // import "golang.org/x/oauth2/github"
+
+import (
+ "golang.org/x/oauth2"
+)
+
+// Endpoint is Github's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://github.com/login/oauth/authorize",
+ TokenURL: "https://github.com/login/oauth/access_token",
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
new file mode 100644
index 000000000000..c6213d9cea33
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appengine.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appengine,!appenginevm
+
+package google
+
+import (
+ "time"
+
+ "appengine"
+
+ "golang.org/x/oauth2"
+)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// You are required to provide a valid appengine.Context as context.
+func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource {
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scope,
+ fetcherFunc: aeFetcherFunc,
+ }
+}
+
+var aeFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) {
+ c, ok := ctx.(appengine.Context)
+ if !ok {
+ return "", time.Time{}, errInvalidContext
+ }
+ return appengine.AccessToken(c, scope...)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go
new file mode 100644
index 000000000000..12af742d2f59
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/appenginevm.go
@@ -0,0 +1,36 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm !appengine
+
+package google
+
+import (
+ "time"
+
+ "golang.org/x/oauth2"
+ "google.golang.org/appengine"
+)
+
+// AppEngineTokenSource returns a token source that fetches tokens
+// issued to the current App Engine application's service account.
+// If you are implementing a 3-legged OAuth 2.0 flow on App Engine
+// that involves user accounts, see oauth2.Config instead.
+//
+// You are required to provide a valid appengine.Context as context.
+func AppEngineTokenSource(ctx appengine.Context, scope ...string) oauth2.TokenSource {
+ return &appEngineTokenSource{
+ ctx: ctx,
+ scopes: scope,
+ fetcherFunc: aeVMFetcherFunc,
+ }
+}
+
+var aeVMFetcherFunc = func(ctx oauth2.Context, scope ...string) (string, time.Time, error) {
+ c, ok := ctx.(appengine.Context)
+ if !ok {
+ return "", time.Time{}, errInvalidContext
+ }
+ return appengine.AccessToken(c, scope...)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
new file mode 100644
index 000000000000..2958692cefc3
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/example_test.go
@@ -0,0 +1,133 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build appenginevm !appengine
+
+package google_test
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "testing"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/urlfetch"
+)
+
+// Remove after Go 1.4.
+// Related to https://codereview.appspot.com/107320046
+func TestA(t *testing.T) {}
+
+func Example_webServer() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &oauth2.Config{
+ ClientID: "YOUR_CLIENT_ID",
+ ClientSecret: "YOUR_CLIENT_SECRET",
+ RedirectURL: "YOUR_REDIRECT_URL",
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ Endpoint: google.Endpoint,
+ }
+ // Redirect user to Google's consent page to ask for permission
+ // for the scopes specified above.
+ url := conf.AuthCodeURL("state")
+ fmt.Printf("Visit the URL for the auth dialog: %v", url)
+
+ // Handle the exchange code to initiate a transport.
+ tok, err := conf.Exchange(oauth2.NoContext, "authorization-code")
+ if err != nil {
+ log.Fatal(err)
+ }
+ client := conf.Client(oauth2.NoContext, tok)
+ client.Get("...")
+}
+
+func ExampleJWTConfigFromJSON() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ // Navigate to your project, then see the "Credentials" page
+ // under "APIs & Auth".
+ // To create a service account client, click "Create new Client ID",
+ // select "Service Account", and click "Create Client ID". A JSON
+ // key file will then be downloaded to your computer.
+ data, err := ioutil.ReadFile("/path/to/your-project-key.json")
+ if err != nil {
+ log.Fatal(err)
+ }
+ conf, err := google.JWTConfigFromJSON(data, "https://www.googleapis.com/auth/bigquery")
+ if err != nil {
+ log.Fatal(err)
+ }
+ // Initiate an http.Client. The following GET request will be
+ // authorized and authenticated on the behalf of
+ // your service account.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func Example_serviceAccount() {
+ // Your credentials should be obtained from the Google
+ // Developer Console (https://console.developers.google.com).
+ conf := &jwt.Config{
+ Email: "xxx@developer.gserviceaccount.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -passin pass:notasecret -out key.pem -nodes
+ //
+ // The field only supports PEM containers with no passphrase.
+ // The openssl command will convert p12 keys to passphrase-less PEM containers.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Scopes: []string{
+ "https://www.googleapis.com/auth/bigquery",
+ "https://www.googleapis.com/auth/blogger",
+ },
+ TokenURL: google.JWTTokenURL,
+ // If you would like to impersonate a user, you can
+ // create a transport with a subject. The following GET
+ // request will be made on the behalf of user@example.com.
+ // Optional.
+ Subject: "user@example.com",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
+
+func ExampleAppEngineTokenSource() {
+ var req *http.Request // from the ServeHTTP handler
+ ctx := appengine.NewContext(req)
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ Source: google.AppEngineTokenSource(ctx, "https://www.googleapis.com/auth/bigquery"),
+ Base: &urlfetch.Transport{
+ Context: ctx,
+ },
+ },
+ }
+ client.Get("...")
+}
+
+func ExampleComputeTokenSource() {
+ client := &http.Client{
+ Transport: &oauth2.Transport{
+ // Fetch from Google Compute Engine's metadata server to retrieve
+ // an access token for the provided account.
+ // If no account is specified, "default" is used.
+ Source: google.ComputeTokenSource(""),
+ },
+ }
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
new file mode 100644
index 000000000000..bbe6a386f4ee
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/google.go
@@ -0,0 +1,103 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package google provides support for making
+// OAuth2 authorized and authenticated HTTP requests
+// to Google APIs. It supports Web server, client-side,
+// service accounts, Google Compute Engine service accounts,
+// and Google App Engine service accounts authorization
+// and authentications flows:
+//
+// For more information, please read
+// https://developers.google.com/accounts/docs/OAuth2.
+package google // import "golang.org/x/oauth2/google"
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+ "google.golang.org/cloud/compute/metadata"
+)
+
+// TODO(bradfitz,jbd): import "google.golang.org/cloud/compute/metadata" instead of
+// the metaClient and metadata.google.internal stuff below.
+
+// Endpoint is Google's OAuth 2.0 endpoint.
+var Endpoint = oauth2.Endpoint{
+ AuthURL: "https://accounts.google.com/o/oauth2/auth",
+ TokenURL: "https://accounts.google.com/o/oauth2/token",
+}
+
+// JWTTokenURL is Google's OAuth 2.0 token URL to use with the JWT flow.
+const JWTTokenURL = "https://accounts.google.com/o/oauth2/token"
+
+// JWTConfigFromJSON uses a Google Developers service account JSON key file to read
+// the credentials that authorize and authenticate the requests.
+// Create a service account on "Credentials" page under "APIs & Auth" for your
+// project at https://console.developers.google.com to download a JSON key file.
+func JWTConfigFromJSON(jsonKey []byte, scope ...string) (*jwt.Config, error) {
+ var key struct {
+ Email string `json:"client_email"`
+ PrivateKey string `json:"private_key"`
+ }
+ if err := json.Unmarshal(jsonKey, &key); err != nil {
+ return nil, err
+ }
+ return &jwt.Config{
+ Email: key.Email,
+ PrivateKey: []byte(key.PrivateKey),
+ Scopes: scope,
+ TokenURL: JWTTokenURL,
+ }, nil
+}
+
+// ComputeTokenSource returns a token source that fetches access tokens
+// from Google Compute Engine (GCE)'s metadata server. It's only valid to use
+// this token source if your program is running on a GCE instance.
+// If no account is specified, "default" is used.
+// Further information about retrieving access tokens from the GCE metadata
+// server can be found at https://cloud.google.com/compute/docs/authentication.
+func ComputeTokenSource(account string) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, computeSource{account: account})
+}
+
+type computeSource struct {
+ account string
+}
+
+func (cs computeSource) Token() (*oauth2.Token, error) {
+ if !metadata.OnGCE() {
+ return nil, errors.New("oauth2/google: can't get a token from the metadata service; not running on GCE")
+ }
+ acct := cs.account
+ if acct == "" {
+ acct = "default"
+ }
+ tokenJSON, err := metadata.Get("instance/service-accounts/" + acct + "/token")
+ if err != nil {
+ return nil, err
+ }
+ var res struct {
+ AccessToken string `json:"access_token"`
+ ExpiresInSec int `json:"expires_in"`
+ TokenType string `json:"token_type"`
+ }
+ err = json.NewDecoder(strings.NewReader(tokenJSON)).Decode(&res)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2/google: invalid token JSON from metadata: %v", err)
+ }
+ if res.ExpiresInSec == 0 || res.AccessToken == "" {
+ return nil, fmt.Errorf("oauth2/google: incomplete token received from metadata")
+ }
+ return &oauth2.Token{
+ AccessToken: res.AccessToken,
+ TokenType: res.TokenType,
+ Expiry: time.Now().Add(time.Duration(res.ExpiresInSec) * time.Second),
+ }, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go b/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go
new file mode 100644
index 000000000000..d0eb3da0c6aa
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/google/source_appengine.go
@@ -0,0 +1,71 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package google
+
+import (
+ "errors"
+ "sort"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/oauth2"
+)
+
+var (
+ aeTokensMu sync.Mutex // guards aeTokens and appEngineTokenSource.key
+
+ // aeTokens helps the fetched tokens to be reused until their expiration.
+ aeTokens = make(map[string]*tokenLock) // key is '\0'-separated scopes
+)
+
+var errInvalidContext = errors.New("oauth2: a valid appengine.Context is required")
+
+type tokenLock struct {
+ mu sync.Mutex // guards t; held while updating t
+ t *oauth2.Token
+}
+
+type appEngineTokenSource struct {
+ ctx oauth2.Context
+
+ // fetcherFunc makes the actual RPC to fetch a new access
+ // token with an expiry time. Provider of this function is
+ // responsible to assert that the given context is valid.
+ fetcherFunc func(ctx oauth2.Context, scope ...string) (accessToken string, expiry time.Time, err error)
+
+ // scopes and key are guarded by the package-level mutex aeTokensMu
+ scopes []string
+ key string
+}
+
+func (ts *appEngineTokenSource) Token() (*oauth2.Token, error) {
+ aeTokensMu.Lock()
+ if ts.key == "" {
+ sort.Sort(sort.StringSlice(ts.scopes))
+ ts.key = strings.Join(ts.scopes, string(0))
+ }
+ tok, ok := aeTokens[ts.key]
+ if !ok {
+ tok = &tokenLock{}
+ aeTokens[ts.key] = tok
+ }
+ aeTokensMu.Unlock()
+
+ tok.mu.Lock()
+ defer tok.mu.Unlock()
+ if tok.t.Valid() {
+ return tok.t, nil
+ }
+ access, exp, err := ts.fetcherFunc(ts.ctx, ts.scopes...)
+ if err != nil {
+ return nil, err
+ }
+ tok.t = &oauth2.Token{
+ AccessToken: access,
+ Expiry: exp,
+ }
+ return tok.t, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
new file mode 100644
index 000000000000..47c8f14317c0
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/internal/oauth2.go
@@ -0,0 +1,37 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package internal contains support packages for oauth2 package.
+package internal
+
+import (
+ "crypto/rsa"
+ "crypto/x509"
+ "encoding/pem"
+ "errors"
+)
+
+// ParseKey converts the binary contents of a private key file
+// to an *rsa.PrivateKey. It detects whether the private key is in a
+// PEM container or not. If so, it extracts the the private key
+// from PEM container before conversion. It only supports PEM
+// containers with no passphrase.
+func ParseKey(key []byte) (*rsa.PrivateKey, error) {
+ block, _ := pem.Decode(key)
+ if block != nil {
+ key = block.Bytes
+ }
+ parsedKey, err := x509.ParsePKCS8PrivateKey(key)
+ if err != nil {
+ parsedKey, err = x509.ParsePKCS1PrivateKey(key)
+ if err != nil {
+ return nil, err
+ }
+ }
+ parsed, ok := parsedKey.(*rsa.PrivateKey)
+ if !ok {
+ return nil, errors.New("oauth2: private key is invalid")
+ }
+ return parsed, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
new file mode 100644
index 000000000000..362323c4e745
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jws/jws.go
@@ -0,0 +1,160 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jws provides encoding and decoding utilities for
+// signed JWS messages.
+package jws // import "golang.org/x/oauth2/jws"
+
+import (
+ "bytes"
+ "crypto"
+ "crypto/rand"
+ "crypto/rsa"
+ "crypto/sha256"
+ "encoding/base64"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+)
+
+// ClaimSet contains information about the JWT signature including the
+// permissions being requested (scopes), the target of the token, the issuer,
+// the time the token was issued, and the lifetime of the token.
+type ClaimSet struct {
+ Iss string `json:"iss"` // email address of the client_id of the application making the access token request
+ Scope string `json:"scope,omitempty"` // space-delimited list of the permissions the application requests
+ Aud string `json:"aud"` // descriptor of the intended target of the assertion (Optional).
+ Exp int64 `json:"exp"` // the expiration time of the assertion
+ Iat int64 `json:"iat"` // the time the assertion was issued.
+ Typ string `json:"typ,omitempty"` // token type (Optional).
+
+ // Email for which the application is requesting delegated access (Optional).
+ Sub string `json:"sub,omitempty"`
+
+ // The old name of Sub. Client keeps setting Prn to be
+ // complaint with legacy OAuth 2.0 providers. (Optional)
+ Prn string `json:"prn,omitempty"`
+
+ // See http://tools.ietf.org/html/draft-jones-json-web-token-10#section-4.3
+ // This array is marshalled using custom code (see (c *ClaimSet) encode()).
+ PrivateClaims map[string]interface{} `json:"-"`
+
+ exp time.Time
+ iat time.Time
+}
+
+func (c *ClaimSet) encode() (string, error) {
+ if c.exp.IsZero() || c.iat.IsZero() {
+ // Reverting time back for machines whose time is not perfectly in sync.
+ // If client machine's time is in the future according
+ // to Google servers, an access token will not be issued.
+ now := time.Now().Add(-10 * time.Second)
+ c.iat = now
+ c.exp = now.Add(time.Hour)
+ }
+
+ c.Exp = c.exp.Unix()
+ c.Iat = c.iat.Unix()
+
+ b, err := json.Marshal(c)
+ if err != nil {
+ return "", err
+ }
+
+ if len(c.PrivateClaims) == 0 {
+ return base64Encode(b), nil
+ }
+
+ // Marshal private claim set and then append it to b.
+ prv, err := json.Marshal(c.PrivateClaims)
+ if err != nil {
+ return "", fmt.Errorf("jws: invalid map of private claims %v", c.PrivateClaims)
+ }
+
+ // Concatenate public and private claim JSON objects.
+ if !bytes.HasSuffix(b, []byte{'}'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", b)
+ }
+ if !bytes.HasPrefix(prv, []byte{'{'}) {
+ return "", fmt.Errorf("jws: invalid JSON %s", prv)
+ }
+ b[len(b)-1] = ',' // Replace closing curly brace with a comma.
+ b = append(b, prv[1:]...) // Append private claims.
+ return base64Encode(b), nil
+}
+
+// Header represents the header for the signed JWS payloads.
+type Header struct {
+ // The algorithm used for signature.
+ Algorithm string `json:"alg"`
+
+ // Represents the token type.
+ Typ string `json:"typ"`
+}
+
+func (h *Header) encode() (string, error) {
+ b, err := json.Marshal(h)
+ if err != nil {
+ return "", err
+ }
+ return base64Encode(b), nil
+}
+
+// Decode decodes a claim set from a JWS payload.
+func Decode(payload string) (*ClaimSet, error) {
+ // decode returned id token to get expiry
+ s := strings.Split(payload, ".")
+ if len(s) < 2 {
+ // TODO(jbd): Provide more context about the error.
+ return nil, errors.New("jws: invalid token received")
+ }
+ decoded, err := base64Decode(s[1])
+ if err != nil {
+ return nil, err
+ }
+ c := &ClaimSet{}
+ err = json.NewDecoder(bytes.NewBuffer(decoded)).Decode(c)
+ return c, err
+}
+
+// Encode encodes a signed JWS with provided header and claim set.
+func Encode(header *Header, c *ClaimSet, signature *rsa.PrivateKey) (string, error) {
+ head, err := header.encode()
+ if err != nil {
+ return "", err
+ }
+ cs, err := c.encode()
+ if err != nil {
+ return "", err
+ }
+ ss := fmt.Sprintf("%s.%s", head, cs)
+ h := sha256.New()
+ h.Write([]byte(ss))
+ b, err := rsa.SignPKCS1v15(rand.Reader, signature, crypto.SHA256, h.Sum(nil))
+ if err != nil {
+ return "", err
+ }
+ sig := base64Encode(b)
+ return fmt.Sprintf("%s.%s", ss, sig), nil
+}
+
+// base64Encode returns and Base64url encoded version of the input string with any
+// trailing "=" stripped.
+func base64Encode(b []byte) string {
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// base64Decode decodes the Base64url encoded string
+func base64Decode(s string) ([]byte, error) {
+ // add back missing padding
+ switch len(s) % 4 {
+ case 2:
+ s += "=="
+ case 3:
+ s += "="
+ }
+ return base64.URLEncoding.DecodeString(s)
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
new file mode 100644
index 000000000000..6d618836ea37
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/example_test.go
@@ -0,0 +1,31 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt_test
+
+import (
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/jwt"
+)
+
+func ExampleJWTConfig() {
+ conf := &jwt.Config{
+ Email: "xxx@developer.com",
+ // The contents of your RSA private key or your PEM file
+ // that contains a private key.
+ // If you have a p12 file instead, you
+ // can use `openssl` to export the private key into a pem file.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ // It only supports PEM containers with no passphrase.
+ PrivateKey: []byte("-----BEGIN RSA PRIVATE KEY-----..."),
+ Subject: "user@example.com",
+ TokenURL: "https://provider.com/o/oauth2/token",
+ }
+ // Initiate an http.Client, the following GET request will be
+ // authorized and authenticated on the behalf of user@example.com.
+ client := conf.Client(oauth2.NoContext)
+ client.Get("...")
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
new file mode 100644
index 000000000000..a8e21388fc64
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt.go
@@ -0,0 +1,146 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package jwt implements the OAuth 2.0 JSON Web Token flow, commonly
+// known as "two-legged OAuth 2.0".
+//
+// See: https://tools.ietf.org/html/draft-ietf-oauth-jwt-bearer-12
+package jwt
+
+import (
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/internal"
+ "golang.org/x/oauth2/jws"
+)
+
+var (
+ defaultGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer"
+ defaultHeader = &jws.Header{Algorithm: "RS256", Typ: "JWT"}
+)
+
+// Config is the configuration for using JWT to fetch tokens,
+// commonly known as "two-legged OAuth 2.0".
+type Config struct {
+ // Email is the OAuth client identifier used when communicating with
+ // the configured OAuth provider.
+ Email string
+
+ // PrivateKey contains the contents of an RSA private key or the
+ // contents of a PEM file that contains a private key. The provided
+ // private key is used to sign JWT payloads.
+ // PEM containers with a passphrase are not supported.
+ // Use the following command to convert a PKCS 12 file into a PEM.
+ //
+ // $ openssl pkcs12 -in key.p12 -out key.pem -nodes
+ //
+ PrivateKey []byte
+
+ // Subject is the optional user to impersonate.
+ Subject string
+
+ // Scopes optionally specifies a list of requested permission scopes.
+ Scopes []string
+
+ // TokenURL is the endpoint required to complete the 2-legged JWT flow.
+ TokenURL string
+}
+
+// TokenSource returns a JWT TokenSource using the configuration
+// in c and the HTTP client from the provided context.
+func (c *Config) TokenSource(ctx oauth2.Context) oauth2.TokenSource {
+ return oauth2.ReuseTokenSource(nil, jwtSource{ctx, c})
+}
+
+// Client returns an HTTP client wrapping the context's
+// HTTP transport and adding Authorization headers with tokens
+// obtained from c.
+//
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx oauth2.Context) *http.Client {
+ return oauth2.NewClient(ctx, c.TokenSource(ctx))
+}
+
+// jwtSource is a source that always does a signed JWT request for a token.
+// It should typically be wrapped with a reuseTokenSource.
+type jwtSource struct {
+ ctx oauth2.Context
+ conf *Config
+}
+
+func (js jwtSource) Token() (*oauth2.Token, error) {
+ pk, err := internal.ParseKey(js.conf.PrivateKey)
+ if err != nil {
+ return nil, err
+ }
+ hc := oauth2.NewClient(js.ctx, nil)
+ claimSet := &jws.ClaimSet{
+ Iss: js.conf.Email,
+ Scope: strings.Join(js.conf.Scopes, " "),
+ Aud: js.conf.TokenURL,
+ }
+ if subject := js.conf.Subject; subject != "" {
+ claimSet.Sub = subject
+ // prn is the old name of sub. Keep setting it
+ // to be compatible with legacy OAuth 2.0 providers.
+ claimSet.Prn = subject
+ }
+ payload, err := jws.Encode(defaultHeader, claimSet, pk)
+ if err != nil {
+ return nil, err
+ }
+ v := url.Values{}
+ v.Set("grant_type", defaultGrantType)
+ v.Set("assertion", payload)
+ resp, err := hc.PostForm(js.conf.TokenURL, v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(resp.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if c := resp.StatusCode; c < 200 || c > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", resp.Status, body)
+ }
+ // tokenRes is the JSON response body.
+ var tokenRes struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ IDToken string `json:"id_token"`
+ ExpiresIn int64 `json:"expires_in"` // relative seconds from now
+ }
+ if err := json.Unmarshal(body, &tokenRes); err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ token := &oauth2.Token{
+ AccessToken: tokenRes.AccessToken,
+ TokenType: tokenRes.TokenType,
+ }
+ raw := make(map[string]interface{})
+ json.Unmarshal(body, &raw) // no error checks for optional fields
+ token = token.WithExtra(raw)
+
+ if secs := tokenRes.ExpiresIn; secs > 0 {
+ token.Expiry = time.Now().Add(time.Duration(secs) * time.Second)
+ }
+ if v := tokenRes.IDToken; v != "" {
+ // decode returned id token to get expiry
+ claimSet, err := jws.Decode(v)
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: error decoding JWT token: %v", err)
+ }
+ token.Expiry = time.Unix(claimSet.Exp, 0)
+ }
+ return token, nil
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
new file mode 100644
index 000000000000..da922c3d00d0
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/jwt/jwt_test.go
@@ -0,0 +1,134 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package jwt
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/oauth2"
+)
+
+var dummyPrivateKey = []byte(`-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAx4fm7dngEmOULNmAs1IGZ9Apfzh+BkaQ1dzkmbUgpcoghucE
+DZRnAGd2aPyB6skGMXUytWQvNYav0WTR00wFtX1ohWTfv68HGXJ8QXCpyoSKSSFY
+fuP9X36wBSkSX9J5DVgiuzD5VBdzUISSmapjKm+DcbRALjz6OUIPEWi1Tjl6p5RK
+1w41qdbmt7E5/kGhKLDuT7+M83g4VWhgIvaAXtnhklDAggilPPa8ZJ1IFe31lNlr
+k4DRk38nc6sEutdf3RL7QoH7FBusI7uXV03DC6dwN1kP4GE7bjJhcRb/7jYt7CQ9
+/E9Exz3c0yAp0yrTg0Fwh+qxfH9dKwN52S7SBwIDAQABAoIBAQCaCs26K07WY5Jt
+3a2Cw3y2gPrIgTCqX6hJs7O5ByEhXZ8nBwsWANBUe4vrGaajQHdLj5OKfsIDrOvn
+2NI1MqflqeAbu/kR32q3tq8/Rl+PPiwUsW3E6Pcf1orGMSNCXxeducF2iySySzh3
+nSIhCG5uwJDWI7a4+9KiieFgK1pt/Iv30q1SQS8IEntTfXYwANQrfKUVMmVF9aIK
+6/WZE2yd5+q3wVVIJ6jsmTzoDCX6QQkkJICIYwCkglmVy5AeTckOVwcXL0jqw5Kf
+5/soZJQwLEyBoQq7Kbpa26QHq+CJONetPP8Ssy8MJJXBT+u/bSseMb3Zsr5cr43e
+DJOhwsThAoGBAPY6rPKl2NT/K7XfRCGm1sbWjUQyDShscwuWJ5+kD0yudnT/ZEJ1
+M3+KS/iOOAoHDdEDi9crRvMl0UfNa8MAcDKHflzxg2jg/QI+fTBjPP5GOX0lkZ9g
+z6VePoVoQw2gpPFVNPPTxKfk27tEzbaffvOLGBEih0Kb7HTINkW8rIlzAoGBAM9y
+1yr+jvfS1cGFtNU+Gotoihw2eMKtIqR03Yn3n0PK1nVCDKqwdUqCypz4+ml6cxRK
+J8+Pfdh7D+ZJd4LEG6Y4QRDLuv5OA700tUoSHxMSNn3q9As4+T3MUyYxWKvTeu3U
+f2NWP9ePU0lV8ttk7YlpVRaPQmc1qwooBA/z/8AdAoGAW9x0HWqmRICWTBnpjyxx
+QGlW9rQ9mHEtUotIaRSJ6K/F3cxSGUEkX1a3FRnp6kPLcckC6NlqdNgNBd6rb2rA
+cPl/uSkZP42Als+9YMoFPU/xrrDPbUhu72EDrj3Bllnyb168jKLa4VBOccUvggxr
+Dm08I1hgYgdN5huzs7y6GeUCgYEAj+AZJSOJ6o1aXS6rfV3mMRve9bQ9yt8jcKXw
+5HhOCEmMtaSKfnOF1Ziih34Sxsb7O2428DiX0mV/YHtBnPsAJidL0SdLWIapBzeg
+KHArByIRkwE6IvJvwpGMdaex1PIGhx5i/3VZL9qiq/ElT05PhIb+UXgoWMabCp84
+OgxDK20CgYAeaFo8BdQ7FmVX2+EEejF+8xSge6WVLtkaon8bqcn6P0O8lLypoOhd
+mJAYH8WU+UAy9pecUnDZj14LAGNVmYcse8HFX71MoshnvCTFEPVo4rZxIAGwMpeJ
+5jgQ3slYLpqrGlcbLgUXBUgzEO684Wk/UV9DFPlHALVqCfXQ9dpJPg==
+-----END RSA PRIVATE KEY-----`)
+
+func TestJWTFetch_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{
+ "access_token": "90d64460d14870c08c81352a05dedd3465940a7c",
+ "scope": "user",
+ "token_type": "bearer",
+ "expires_in": 3600
+ }`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !tok.Valid() {
+ t.Errorf("Token invalid")
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v", tok.TokenType)
+ }
+ if tok.Expiry.IsZero() {
+ t.Errorf("Unexpected token expiry, %#v", tok.Expiry)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestJWTFetch_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok == nil {
+ t.Fatalf("token is nil")
+ }
+ if tok.Valid() {
+ t.Errorf("token is valid. want invalid.")
+ }
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected non-empty access token %q.", tok.AccessToken)
+ }
+ if want := "bearer"; tok.TokenType != want {
+ t.Errorf("TokenType = %q; want %q", tok.TokenType, want)
+ }
+ scope := tok.Extra("scope")
+ if want := "user"; scope != want {
+ t.Errorf("token scope = %q; want %q", scope, want)
+ }
+}
+
+func TestJWTFetch_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := &Config{
+ Email: "aaa@xxx.com",
+ PrivateKey: dummyPrivateKey,
+ TokenURL: ts.URL,
+ }
+ tok, err := conf.TokenSource(oauth2.NoContext).Token()
+ if err == nil {
+ t.Error("got a token; expected error")
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
new file mode 100644
index 000000000000..90f983bc3605
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2.go
@@ -0,0 +1,462 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package oauth2 provides support for making
+// OAuth2 authorized and authenticated HTTP requests.
+// It can additionally grant authorization with Bearer JWT.
+package oauth2 // import "golang.org/x/oauth2"
+
+import (
+ "bytes"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "sync"
+ "time"
+
+ "golang.org/x/net/context"
+)
+
+// Context can be an golang.org/x/net.Context, or an App Engine Context.
+// If you don't care and aren't running on App Engine, you may use NoContext.
+type Context interface{}
+
+// NoContext is the default context. If you're not running this code
+// on App Engine or not using golang.org/x/net.Context to provide a custom
+// HTTP client, you should use NoContext.
+var NoContext Context = nil
+
+// Config describes a typical 3-legged OAuth2 flow, with both the
+// client application information and the server's endpoint URLs.
+type Config struct {
+ // ClientID is the application's ID.
+ ClientID string
+
+ // ClientSecret is the application's secret.
+ ClientSecret string
+
+ // Endpoint contains the resource server's token endpoint
+ // URLs. These are constants specific to each server and are
+ // often available via site-specific packages, such as
+ // google.Endpoint or github.Endpoint.
+ Endpoint Endpoint
+
+ // RedirectURL is the URL to redirect users going through
+ // the OAuth flow, after the resource owner's URLs.
+ RedirectURL string
+
+ // Scope specifies optional requested permissions.
+ Scopes []string
+}
+
+// A TokenSource is anything that can return a token.
+type TokenSource interface {
+ // Token returns a token or an error.
+ // Token must be safe for concurrent use by multiple goroutines.
+ // The returned Token must not be modified.
+ Token() (*Token, error)
+}
+
+// Endpoint contains the OAuth 2.0 provider's authorization and token
+// endpoint URLs.
+type Endpoint struct {
+ AuthURL string
+ TokenURL string
+}
+
+var (
+ // AccessTypeOnline and AccessTypeOffline are options passed
+ // to the Options.AuthCodeURL method. They modify the
+ // "access_type" field that gets sent in the URL returned by
+ // AuthCodeURL.
+ //
+ // Online (the default if neither is specified) is the default.
+ // If your application needs to refresh access tokens when the
+ // user is not present at the browser, then use offline. This
+ // will result in your application obtaining a refresh token
+ // the first time your application exchanges an authorization
+ // code for a user.
+ AccessTypeOnline AuthCodeOption = setParam{"access_type", "online"}
+ AccessTypeOffline AuthCodeOption = setParam{"access_type", "offline"}
+
+ // ApprovalForce forces the users to view the consent dialog
+ // and confirm the permissions request at the URL returned
+ // from AuthCodeURL, even if they've already done so.
+ ApprovalForce AuthCodeOption = setParam{"approval_prompt", "force"}
+)
+
+type setParam struct{ k, v string }
+
+func (p setParam) setValue(m url.Values) { m.Set(p.k, p.v) }
+
+// An AuthCodeOption is passed to Config.AuthCodeURL.
+type AuthCodeOption interface {
+ setValue(url.Values)
+}
+
+// AuthCodeURL returns a URL to OAuth 2.0 provider's consent page
+// that asks for permissions for the required scopes explicitly.
+//
+// State is a token to protect the user from CSRF attacks. You must
+// always provide a non-zero string and validate that it matches the
+// the state query parameter on your redirect callback.
+// See http://tools.ietf.org/html/rfc6749#section-10.12 for more info.
+//
+// Opts may include AccessTypeOnline or AccessTypeOffline, as well
+// as ApprovalForce.
+func (c *Config) AuthCodeURL(state string, opts ...AuthCodeOption) string {
+ var buf bytes.Buffer
+ buf.WriteString(c.Endpoint.AuthURL)
+ v := url.Values{
+ "response_type": {"code"},
+ "client_id": {c.ClientID},
+ "redirect_uri": condVal(c.RedirectURL),
+ "scope": condVal(strings.Join(c.Scopes, " ")),
+ "state": condVal(state),
+ }
+ for _, opt := range opts {
+ opt.setValue(v)
+ }
+ if strings.Contains(c.Endpoint.AuthURL, "?") {
+ buf.WriteByte('&')
+ } else {
+ buf.WriteByte('?')
+ }
+ buf.WriteString(v.Encode())
+ return buf.String()
+}
+
+// Exchange converts an authorization code into a token.
+//
+// It is used after a resource provider redirects the user back
+// to the Redirect URI (the URL obtained from AuthCodeURL).
+//
+// The HTTP client to use is derived from the context. If nil,
+// http.DefaultClient is used. See the Context type's documentation.
+//
+// The code will be in the *http.Request.FormValue("code"). Before
+// calling Exchange, be sure to validate FormValue("state").
+func (c *Config) Exchange(ctx Context, code string) (*Token, error) {
+ return retrieveToken(ctx, c, url.Values{
+ "grant_type": {"authorization_code"},
+ "code": {code},
+ "redirect_uri": condVal(c.RedirectURL),
+ "scope": condVal(strings.Join(c.Scopes, " ")),
+ })
+}
+
+// contextClientFunc is a func which tries to return an *http.Client
+// given a Context value. If it returns an error, the search stops
+// with that error. If it returns (nil, nil), the search continues
+// down the list of registered funcs.
+type contextClientFunc func(Context) (*http.Client, error)
+
+var contextClientFuncs []contextClientFunc
+
+func registerContextClientFunc(fn contextClientFunc) {
+ contextClientFuncs = append(contextClientFuncs, fn)
+}
+
+func contextClient(ctx Context) (*http.Client, error) {
+ for _, fn := range contextClientFuncs {
+ c, err := fn(ctx)
+ if err != nil {
+ return nil, err
+ }
+ if c != nil {
+ return c, nil
+ }
+ }
+ if xc, ok := ctx.(context.Context); ok {
+ if hc, ok := xc.Value(HTTPClient).(*http.Client); ok {
+ return hc, nil
+ }
+ }
+ return http.DefaultClient, nil
+}
+
+func contextTransport(ctx Context) http.RoundTripper {
+ hc, err := contextClient(ctx)
+ if err != nil {
+ // This is a rare error case (somebody using nil on App Engine),
+ // so I'd rather not everybody do an error check on this Client
+ // method. They can get the error that they're doing it wrong
+ // later, at client.Get/PostForm time.
+ return errorTransport{err}
+ }
+ return hc.Transport
+}
+
+// Client returns an HTTP client using the provided token.
+// The token will auto-refresh as necessary. The underlying
+// HTTP transport will be obtained using the provided context.
+// The returned client and its Transport should not be modified.
+func (c *Config) Client(ctx Context, t *Token) *http.Client {
+ return NewClient(ctx, c.TokenSource(ctx, t))
+}
+
+// TokenSource returns a TokenSource that returns t until t expires,
+// automatically refreshing it as necessary using the provided context.
+// See the the Context documentation.
+//
+// Most users will use Config.Client instead.
+func (c *Config) TokenSource(ctx Context, t *Token) TokenSource {
+ nwn := &reuseTokenSource{t: t}
+ nwn.new = tokenRefresher{
+ ctx: ctx,
+ conf: c,
+ oldToken: &nwn.t,
+ }
+ return nwn
+}
+
+// tokenRefresher is a TokenSource that makes "grant_type"=="refresh_token"
+// HTTP requests to renew a token using a RefreshToken.
+type tokenRefresher struct {
+ ctx Context // used to get HTTP requests
+ conf *Config
+ oldToken **Token // pointer to old *Token w/ RefreshToken
+}
+
+func (tf tokenRefresher) Token() (*Token, error) {
+ t := *tf.oldToken
+ if t == nil {
+ return nil, errors.New("oauth2: attempted use of nil Token")
+ }
+ if t.RefreshToken == "" {
+ return nil, errors.New("oauth2: token expired and refresh token is not set")
+ }
+ return retrieveToken(tf.ctx, tf.conf, url.Values{
+ "grant_type": {"refresh_token"},
+ "refresh_token": {t.RefreshToken},
+ })
+}
+
+// reuseTokenSource is a TokenSource that holds a single token in memory
+// and validates its expiry before each call to retrieve it with
+// Token. If it's expired, it will be auto-refreshed using the
+// new TokenSource.
+//
+// The first call to TokenRefresher must be SetToken.
+type reuseTokenSource struct {
+ new TokenSource // called when t is expired.
+
+ mu sync.Mutex // guards t
+ t *Token
+}
+
+// Token returns the current token if it's still valid, else will
+// refresh the current token (using r.Context for HTTP client
+// information) and return the new one.
+func (s *reuseTokenSource) Token() (*Token, error) {
+ s.mu.Lock()
+ defer s.mu.Unlock()
+ if s.t.Valid() {
+ return s.t, nil
+ }
+ t, err := s.new.Token()
+ if err != nil {
+ return nil, err
+ }
+ s.t = t
+ return t, nil
+}
+
+func retrieveToken(ctx Context, c *Config, v url.Values) (*Token, error) {
+ hc, err := contextClient(ctx)
+ if err != nil {
+ return nil, err
+ }
+ v.Set("client_id", c.ClientID)
+ bustedAuth := !providerAuthHeaderWorks(c.Endpoint.TokenURL)
+ if bustedAuth && c.ClientSecret != "" {
+ v.Set("client_secret", c.ClientSecret)
+ }
+ req, err := http.NewRequest("POST", c.Endpoint.TokenURL, strings.NewReader(v.Encode()))
+ if err != nil {
+ return nil, err
+ }
+ req.Header.Set("Content-Type", "application/x-www-form-urlencoded")
+ if !bustedAuth && c.ClientSecret != "" {
+ req.SetBasicAuth(c.ClientID, c.ClientSecret)
+ }
+ r, err := hc.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer r.Body.Close()
+ body, err := ioutil.ReadAll(io.LimitReader(r.Body, 1<<20))
+ if err != nil {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v", err)
+ }
+ if code := r.StatusCode; code < 200 || code > 299 {
+ return nil, fmt.Errorf("oauth2: cannot fetch token: %v\nResponse: %s", r.Status, body)
+ }
+
+ var token *Token
+ content, _, _ := mime.ParseMediaType(r.Header.Get("Content-Type"))
+ switch content {
+ case "application/x-www-form-urlencoded", "text/plain":
+ vals, err := url.ParseQuery(string(body))
+ if err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: vals.Get("access_token"),
+ TokenType: vals.Get("token_type"),
+ RefreshToken: vals.Get("refresh_token"),
+ raw: vals,
+ }
+ e := vals.Get("expires_in")
+ if e == "" {
+ // TODO(jbd): Facebook's OAuth2 implementation is broken and
+ // returns expires_in field in expires. Remove the fallback to expires,
+ // when Facebook fixes their implementation.
+ e = vals.Get("expires")
+ }
+ expires, _ := strconv.Atoi(e)
+ if expires != 0 {
+ token.Expiry = time.Now().Add(time.Duration(expires) * time.Second)
+ }
+ default:
+ var tj tokenJSON
+ if err = json.Unmarshal(body, &tj); err != nil {
+ return nil, err
+ }
+ token = &Token{
+ AccessToken: tj.AccessToken,
+ TokenType: tj.TokenType,
+ RefreshToken: tj.RefreshToken,
+ Expiry: tj.expiry(),
+ raw: make(map[string]interface{}),
+ }
+ json.Unmarshal(body, &token.raw) // no error checks for optional fields
+ }
+ // Don't overwrite `RefreshToken` with an empty value
+ // if this was a token refreshing request.
+ if token.RefreshToken == "" {
+ token.RefreshToken = v.Get("refresh_token")
+ }
+ return token, nil
+}
+
+// tokenJSON is the struct representing the HTTP response from OAuth2
+// providers returning a token in JSON form.
+type tokenJSON struct {
+ AccessToken string `json:"access_token"`
+ TokenType string `json:"token_type"`
+ RefreshToken string `json:"refresh_token"`
+ ExpiresIn int32 `json:"expires_in"`
+ Expires int32 `json:"expires"` // broken Facebook spelling of expires_in
+}
+
+func (e *tokenJSON) expiry() (t time.Time) {
+ if v := e.ExpiresIn; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ if v := e.Expires; v != 0 {
+ return time.Now().Add(time.Duration(v) * time.Second)
+ }
+ return
+}
+
+func condVal(v string) []string {
+ if v == "" {
+ return nil
+ }
+ return []string{v}
+}
+
+// providerAuthHeaderWorks reports whether the OAuth2 server identified by the tokenURL
+// implements the OAuth2 spec correctly
+// See https://code.google.com/p/goauth2/issues/detail?id=31 for background.
+// In summary:
+// - Reddit only accepts client secret in the Authorization header
+// - Dropbox accepts either it in URL param or Auth header, but not both.
+// - Google only accepts URL param (not spec compliant?), not Auth header
+func providerAuthHeaderWorks(tokenURL string) bool {
+ if strings.HasPrefix(tokenURL, "https://accounts.google.com/") ||
+ strings.HasPrefix(tokenURL, "https://github.com/") ||
+ strings.HasPrefix(tokenURL, "https://api.instagram.com/") ||
+ strings.HasPrefix(tokenURL, "https://www.douban.com/") ||
+ strings.HasPrefix(tokenURL, "https://api.dropbox.com/") ||
+ strings.HasPrefix(tokenURL, "https://api.soundcloud.com/") ||
+ strings.HasPrefix(tokenURL, "https://www.linkedin.com/") {
+ // Some sites fail to implement the OAuth2 spec fully.
+ return false
+ }
+
+ // Assume the provider implements the spec properly
+ // otherwise. We can add more exceptions as they're
+ // discovered. We will _not_ be adding configurable hooks
+ // to this package to let users select server bugs.
+ return true
+}
+
+// HTTPClient is the context key to use with golang.org/x/net/context's
+// WithValue function to associate an *http.Client value with a context.
+var HTTPClient contextKey
+
+// contextKey is just an empty struct. It exists so HTTPClient can be
+// an immutable public variable with a unique type. It's immutable
+// because nobody else can create a contextKey, being unexported.
+type contextKey struct{}
+
+// NewClient creates an *http.Client from a Context and TokenSource.
+// The returned client is not valid beyond the lifetime of the context.
+//
+// As a special case, if src is nil, a non-OAuth2 client is returned
+// using the provided context. This exists to support related OAuth2
+// packages.
+func NewClient(ctx Context, src TokenSource) *http.Client {
+ if src == nil {
+ c, err := contextClient(ctx)
+ if err != nil {
+ return &http.Client{Transport: errorTransport{err}}
+ }
+ return c
+ }
+ return &http.Client{
+ Transport: &Transport{
+ Base: contextTransport(ctx),
+ Source: ReuseTokenSource(nil, src),
+ },
+ }
+}
+
+// ReuseTokenSource returns a TokenSource which repeatedly returns the
+// same token as long as it's valid, starting with t.
+// When its cached token is invalid, a new token is obtained from src.
+//
+// ReuseTokenSource is typically used to reuse tokens from a cache
+// (such as a file on disk) between runs of a program, rather than
+// obtaining new tokens unnecessarily.
+//
+// The initial token t may be nil, in which case the TokenSource is
+// wrapped in a caching version if it isn't one already. This also
+// means it's always safe to wrap ReuseTokenSource around any other
+// TokenSource without adverse effects.
+func ReuseTokenSource(t *Token, src TokenSource) TokenSource {
+ // Don't wrap a reuseTokenSource in itself. That would work,
+ // but cause an unnecessary number of mutex operations.
+ // Just build the equivalent one.
+ if rt, ok := src.(*reuseTokenSource); ok {
+ if t == nil {
+ // Just use it directly.
+ return rt
+ }
+ src = rt.new
+ }
+ return &reuseTokenSource{
+ t: t,
+ new: src,
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
new file mode 100644
index 000000000000..804098ac9df8
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/oauth2_test.go
@@ -0,0 +1,260 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+
+ "golang.org/x/net/context"
+)
+
+type mockTransport struct {
+ rt func(req *http.Request) (resp *http.Response, err error)
+}
+
+func (t *mockTransport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
+ return t.rt(req)
+}
+
+type mockCache struct {
+ token *Token
+ readErr error
+}
+
+func (c *mockCache) ReadToken() (*Token, error) {
+ return c.token, c.readErr
+}
+
+func (c *mockCache) WriteToken(*Token) {
+ // do nothing
+}
+
+func newConf(url string) *Config {
+ return &Config{
+ ClientID: "CLIENT_ID",
+ ClientSecret: "CLIENT_SECRET",
+ RedirectURL: "REDIRECT_URL",
+ Scopes: []string{"scope1", "scope2"},
+ Endpoint: Endpoint{
+ AuthURL: url + "/auth",
+ TokenURL: url + "/token",
+ },
+ }
+}
+
+func TestAuthCodeURL(t *testing.T) {
+ conf := newConf("server")
+ url := conf.AuthCodeURL("foo", AccessTypeOffline, ApprovalForce)
+ if url != "server/auth?access_type=offline&approval_prompt=force&client_id=CLIENT_ID&redirect_uri=REDIRECT_URL&response_type=code&scope=scope1+scope2&state=foo" {
+ t.Errorf("Auth code URL doesn't match the expected, found: %v", url)
+ }
+}
+
+func TestAuthCodeURL_Optional(t *testing.T) {
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "/auth-url",
+ TokenURL: "/token-url",
+ },
+ }
+ url := conf.AuthCodeURL("")
+ if url != "/auth-url?client_id=CLIENT_ID&response_type=code" {
+ t.Fatalf("Auth code URL doesn't match the expected, found: %v", url)
+ }
+}
+
+func TestExchangeRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/x-www-form-urlencoded")
+ w.Write([]byte("access_token=90d64460d14870c08c81352a05dedd3465940a7c&scope=user&token_type=bearer"))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestExchangeRequest_JSONResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected exchange request URL, %v is found.", r.URL)
+ }
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "Basic Q0xJRU5UX0lEOkNMSUVOVF9TRUNSRVQ=" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ t.Errorf("Failed reading request body: %s.", err)
+ }
+ if string(body) != "client_id=CLIENT_ID&code=exchange-code&grant_type=authorization_code&redirect_uri=REDIRECT_URL&scope=scope1+scope2" {
+ t.Errorf("Unexpected exchange payload, %v is found.", string(body))
+ }
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token": "90d64460d14870c08c81352a05dedd3465940a7c", "scope": "user", "token_type": "bearer", "expires_in": 86400}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "exchange-code")
+ if err != nil {
+ t.Error(err)
+ }
+ if !tok.Valid() {
+ t.Fatalf("Token invalid. Got: %#v", tok)
+ }
+ if tok.AccessToken != "90d64460d14870c08c81352a05dedd3465940a7c" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+ if tok.TokenType != "bearer" {
+ t.Errorf("Unexpected token type, %#v.", tok.TokenType)
+ }
+ scope := tok.Extra("scope")
+ if scope != "user" {
+ t.Errorf("Unexpected value for scope: %v", scope)
+ }
+}
+
+func TestExchangeRequest_BadResponse(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ tok, err := conf.Exchange(NoContext, "code")
+ if err != nil {
+ t.Fatal(err)
+ }
+ if tok.AccessToken != "" {
+ t.Errorf("Unexpected access token, %#v.", tok.AccessToken)
+ }
+}
+
+func TestExchangeRequest_BadResponseType(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ w.Header().Set("Content-Type", "application/json")
+ w.Write([]byte(`{"access_token":123, "scope": "user", "token_type": "bearer"}`))
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ _, err := conf.Exchange(NoContext, "exchange-code")
+ if err == nil {
+ t.Error("expected error from invalid access_token type")
+ }
+}
+
+func TestExchangeRequest_NonBasicAuth(t *testing.T) {
+ tr := &mockTransport{
+ rt: func(r *http.Request) (w *http.Response, err error) {
+ headerAuth := r.Header.Get("Authorization")
+ if headerAuth != "" {
+ t.Errorf("Unexpected authorization header, %v is found.", headerAuth)
+ }
+ return nil, errors.New("no response")
+ },
+ }
+ c := &http.Client{Transport: tr}
+ conf := &Config{
+ ClientID: "CLIENT_ID",
+ Endpoint: Endpoint{
+ AuthURL: "https://accounts.google.com/auth",
+ TokenURL: "https://accounts.google.com/token",
+ },
+ }
+
+ ctx := context.WithValue(context.Background(), HTTPClient, c)
+ conf.Exchange(ctx, "code")
+}
+
+func TestTokenRefreshRequest(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(NoContext, &Token{RefreshToken: "REFRESH_TOKEN"})
+ c.Get(ts.URL + "/somethingelse")
+}
+
+func TestFetchWithNoRefreshToken(t *testing.T) {
+ ts := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
+ if r.URL.String() == "/somethingelse" {
+ return
+ }
+ if r.URL.String() != "/token" {
+ t.Errorf("Unexpected token refresh request URL, %v is found.", r.URL)
+ }
+ headerContentType := r.Header.Get("Content-Type")
+ if headerContentType != "application/x-www-form-urlencoded" {
+ t.Errorf("Unexpected Content-Type header, %v is found.", headerContentType)
+ }
+ body, _ := ioutil.ReadAll(r.Body)
+ if string(body) != "client_id=CLIENT_ID&grant_type=refresh_token&refresh_token=REFRESH_TOKEN" {
+ t.Errorf("Unexpected refresh token payload, %v is found.", string(body))
+ }
+ }))
+ defer ts.Close()
+ conf := newConf(ts.URL)
+ c := conf.Client(NoContext, nil)
+ _, err := c.Get(ts.URL + "/somethingelse")
+ if err == nil {
+ t.Errorf("Fetch should return an error if no refresh token is set")
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token.go b/Godeps/_workspace/src/golang.org/x/oauth2/token.go
new file mode 100644
index 000000000000..e04a2dd9cfc7
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/token.go
@@ -0,0 +1,99 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "net/http"
+ "net/url"
+ "time"
+)
+
+// Token represents the crendentials used to authorize
+// the requests to access protected resources on the OAuth 2.0
+// provider's backend.
+//
+// Most users of this package should not access fields of Token
+// directly. They're exported mostly for use by related packages
+// implementing derivative OAuth2 flows.
+type Token struct {
+ // AccessToken is the token that authorizes and authenticates
+ // the requests.
+ AccessToken string `json:"access_token"`
+
+ // TokenType is the type of token.
+ // The Type method returns either this or "Bearer", the default.
+ TokenType string `json:"token_type,omitempty"`
+
+ // RefreshToken is a token that's used by the application
+ // (as opposed to the user) to refresh the access token
+ // if it expires.
+ RefreshToken string `json:"refresh_token,omitempty"`
+
+ // Expiry is the optional expiration time of the access token.
+ //
+ // If zero, TokenSource implementations will reuse the same
+ // token forever and RefreshToken or equivalent
+ // mechanisms for that TokenSource will not be used.
+ Expiry time.Time `json:"expiry,omitempty"`
+
+ // raw optionally contains extra metadata from the server
+ // when updating a token.
+ raw interface{}
+}
+
+// Type returns t.TokenType if non-empty, else "Bearer".
+func (t *Token) Type() string {
+ if t.TokenType != "" {
+ return t.TokenType
+ }
+ return "Bearer"
+}
+
+// SetAuthHeader sets the Authorization header to r using the access
+// token in t.
+//
+// This method is unnecessary when using Transport or an HTTP Client
+// returned by this package.
+func (t *Token) SetAuthHeader(r *http.Request) {
+ r.Header.Set("Authorization", t.Type()+" "+t.AccessToken)
+}
+
+// WithExtra returns a new Token that's a clone of t, but using the
+// provided raw extra map. This is only intended for use by packages
+// implementing derivative OAuth2 flows.
+func (t *Token) WithExtra(extra interface{}) *Token {
+ t2 := new(Token)
+ *t2 = *t
+ t2.raw = extra
+ return t2
+}
+
+// Extra returns an extra field.
+// Extra fields are key-value pairs returned by the server as a
+// part of the token retrieval response.
+func (t *Token) Extra(key string) interface{} {
+ if vals, ok := t.raw.(url.Values); ok {
+ // TODO(jbd): Cast numeric values to int64 or float64.
+ return vals.Get(key)
+ }
+ if raw, ok := t.raw.(map[string]interface{}); ok {
+ return raw[key]
+ }
+ return nil
+}
+
+// expired reports whether the token is expired.
+// t must be non-nil.
+func (t *Token) expired() bool {
+ if t.Expiry.IsZero() {
+ return false
+ }
+ return t.Expiry.Before(time.Now())
+}
+
+// Valid reports whether t is non-nil, has an AccessToken, and is not expired.
+func (t *Token) Valid() bool {
+ return t != nil && t.AccessToken != "" && !t.expired()
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
new file mode 100644
index 000000000000..74d6366568a5
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/token_test.go
@@ -0,0 +1,30 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import "testing"
+
+func TestTokenExtra(t *testing.T) {
+ type testCase struct {
+ key string
+ val interface{}
+ want interface{}
+ }
+ const key = "extra-key"
+ cases := []testCase{
+ {key: key, val: "abc", want: "abc"},
+ {key: key, val: 123, want: 123},
+ {key: key, val: "", want: ""},
+ {key: "other-key", val: "def", want: nil},
+ }
+ for _, tc := range cases {
+ extra := make(map[string]interface{})
+ extra[tc.key] = tc.val
+ tok := &Token{raw: extra}
+ if got, want := tok.Extra(key), tc.want; got != want {
+ t.Errorf("Extra(%q) = %q; want %q", key, got, want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go
new file mode 100644
index 000000000000..10339a0be7d4
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport.go
@@ -0,0 +1,138 @@
+// Copyright 2014 The oauth2 Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package oauth2
+
+import (
+ "errors"
+ "io"
+ "net/http"
+ "sync"
+)
+
+// Transport is an http.RoundTripper that makes OAuth 2.0 HTTP requests,
+// wrapping a base RoundTripper and adding an Authorization header
+// with a token from the supplied Sources.
+//
+// Transport is a low-level mechanism. Most code will use the
+// higher-level Config.Client method instead.
+type Transport struct {
+ // Source supplies the token to add to outgoing requests'
+ // Authorization headers.
+ Source TokenSource
+
+ // Base is the base RoundTripper used to make HTTP requests.
+ // If nil, http.DefaultTransport is used.
+ Base http.RoundTripper
+
+ mu sync.Mutex // guards modReq
+ modReq map[*http.Request]*http.Request // original -> modified
+}
+
+// RoundTrip authorizes and authenticates the request with an
+// access token. If no token exists or token is expired,
+// tries to refresh/fetch a new token.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ if t.Source == nil {
+ return nil, errors.New("oauth2: Transport's Source is nil")
+ }
+ token, err := t.Source.Token()
+ if err != nil {
+ return nil, err
+ }
+
+ req2 := cloneRequest(req) // per RoundTripper contract
+ token.SetAuthHeader(req2)
+ t.setModReq(req, req2)
+ res, err := t.base().RoundTrip(req2)
+ if err != nil {
+ t.setModReq(req, nil)
+ return nil, err
+ }
+ res.Body = &onEOFReader{
+ rc: res.Body,
+ fn: func() { t.setModReq(req, nil) },
+ }
+ return res, nil
+}
+
+// CancelRequest cancels an in-flight request by closing its connection.
+func (t *Transport) CancelRequest(req *http.Request) {
+ type canceler interface {
+ CancelRequest(*http.Request)
+ }
+ if cr, ok := t.base().(canceler); ok {
+ t.mu.Lock()
+ modReq := t.modReq[req]
+ delete(t.modReq, req)
+ t.mu.Unlock()
+ cr.CancelRequest(modReq)
+ }
+}
+
+func (t *Transport) base() http.RoundTripper {
+ if t.Base != nil {
+ return t.Base
+ }
+ return http.DefaultTransport
+}
+
+func (t *Transport) setModReq(orig, mod *http.Request) {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+ if t.modReq == nil {
+ t.modReq = make(map[*http.Request]*http.Request)
+ }
+ if mod == nil {
+ delete(t.modReq, orig)
+ } else {
+ t.modReq[orig] = mod
+ }
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header, len(r.Header))
+ for k, s := range r.Header {
+ r2.Header[k] = append([]string(nil), s...)
+ }
+ return r2
+}
+
+type onEOFReader struct {
+ rc io.ReadCloser
+ fn func()
+}
+
+func (r *onEOFReader) Read(p []byte) (n int, err error) {
+ n, err = r.rc.Read(p)
+ if err == io.EOF {
+ r.runFunc()
+ }
+ return
+}
+
+func (r *onEOFReader) Close() error {
+ err := r.rc.Close()
+ r.runFunc()
+ return err
+}
+
+func (r *onEOFReader) runFunc() {
+ if fn := r.fn; fn != nil {
+ fn()
+ r.fn = nil
+ }
+}
+
+type errorTransport struct{ err error }
+
+func (t errorTransport) RoundTrip(*http.Request) (*http.Response, error) {
+ return nil, t.err
+}
diff --git a/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
new file mode 100644
index 000000000000..efb8232ac4c7
--- /dev/null
+++ b/Godeps/_workspace/src/golang.org/x/oauth2/transport_test.go
@@ -0,0 +1,53 @@
+package oauth2
+
+import (
+ "net/http"
+ "net/http/httptest"
+ "testing"
+ "time"
+)
+
+type tokenSource struct{ token *Token }
+
+func (t *tokenSource) Token() (*Token, error) {
+ return t.token, nil
+}
+
+func TestTransportTokenSource(t *testing.T) {
+ ts := &tokenSource{
+ token: &Token{
+ AccessToken: "abc",
+ },
+ }
+ tr := &Transport{
+ Source: ts,
+ }
+ server := newMockServer(func(w http.ResponseWriter, r *http.Request) {
+ if r.Header.Get("Authorization") != "Bearer abc" {
+ t.Errorf("Transport doesn't set the Authorization header from the fetched token")
+ }
+ })
+ defer server.Close()
+ client := http.Client{Transport: tr}
+ client.Get(server.URL)
+}
+
+func TestTokenValidNoAccessToken(t *testing.T) {
+ token := &Token{}
+ if token.Valid() {
+ t.Errorf("Token should not be valid with no access token")
+ }
+}
+
+func TestExpiredWithExpiry(t *testing.T) {
+ token := &Token{
+ Expiry: time.Now().Add(-5 * time.Hour),
+ }
+ if token.Valid() {
+ t.Errorf("Token should not be valid if it expired in the past")
+ }
+}
+
+func newMockServer(handler func(w http.ResponseWriter, r *http.Request)) *httptest.Server {
+ return httptest.NewServer(http.HandlerFunc(handler))
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/.travis.yml b/Godeps/_workspace/src/google.golang.org/appengine/.travis.yml
new file mode 100644
index 000000000000..bfa8658a41a5
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/.travis.yml
@@ -0,0 +1,14 @@
+language: go
+
+go:
+ - 1.4
+
+install:
+ - export GOPATH="$HOME/gopath"
+ - mkdir -p "$GOPATH/src/google.golang.org"
+ - mv "$TRAVIS_BUILD_DIR" "$GOPATH/src/google.golang.org/appengine"
+ - go get -v -t -d google.golang.org/appengine/...
+
+script:
+ - go test -v google.golang.org/appengine/...
+ - go test -v -race google.golang.org/appengine/...
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/LICENSE b/Godeps/_workspace/src/google.golang.org/appengine/LICENSE
new file mode 100644
index 000000000000..d64569567334
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/LICENSE
@@ -0,0 +1,202 @@
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ APPENDIX: How to apply the Apache License to your work.
+
+ To apply the Apache License to your work, attach the following
+ boilerplate notice, with the fields enclosed by brackets "[]"
+ replaced with your own identifying information. (Don't include
+ the brackets!) The text should be enclosed in the appropriate
+ comment syntax for the file format. We also recommend that a
+ file or class name and description of purpose be included on the
+ same "printed page" as the copyright notice for easier
+ identification within third-party archives.
+
+ Copyright [yyyy] [name of copyright owner]
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/README.md b/Godeps/_workspace/src/google.golang.org/appengine/README.md
new file mode 100644
index 000000000000..06d1e60bf728
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/README.md
@@ -0,0 +1,65 @@
+# Go App Engine for Managed VMs
+
+[](https://travis-ci.org/golang/appengine)
+
+This repository supports the Go runtime for Managed VMs on App Engine.
+It provides APIs for interacting with App Engine services.
+Its canonical import path is `google.golang.org/appengine`.
+
+See https://cloud.google.com/appengine/docs/go/managed-vms/
+for more information.
+
+## Directory structure
+The top level directory of this repository is the `appengine` package. It
+contains the
+basic types (e.g. `appengine.Context`) that are used across APIs. Specific API
+packages are in subdirectories (e.g. `datastore`).
+
+There is an `internal` subdirectory that contains service protocol buffers,
+plus packages required for connectivity to make API calls. App Engine apps
+should not directly import any package under `internal`.
+
+## Updating a Go App Engine app
+
+This section describes how to update a traditional Go App Engine app to run on Managed VMs.
+
+### 1. Update YAML files
+
+The `app.yaml` file (and YAML files for modules) should have these new lines added:
+```
+vm: true
+manual_scaling:
+ instances: 1
+```
+See https://cloud.google.com/appengine/docs/go/modules/#Go_Instance_scaling_and_class for details.
+
+### 2. Update import paths
+
+The import paths for App Engine packages are now fully qualified, based at `google.golang.org/appengine`.
+You will need to update your code to use import paths starting with that; for instance,
+code importing `appengine/datastore` will now need to import `google.golang.org/appengine/datastore`.
+You can do that manually, or by running this command to recursively update all Go source files in the current directory:
+(may require GNU sed)
+```
+sed -i '/"appengine/{s,"appengine,"google.golang.org/appengine,;s,appengine_,appengine/,}' \
+ $(find . -name '*.go')
+```
+
+### 3. Update code using deprecated, removed or modified APIs
+
+Most App Engine services are available with exactly the same API.
+A few APIs were cleaned up, and some are not available yet.
+This list summarises the differences:
+
+* `appengine.Datacenter` now takes an `appengine.Context` argument.
+* `datastore.PropertyLoadSaver` has been simplified to use slices in place of channels.
+* `search.FieldLoadSaver` now handles document metadata.
+* `taskqueue.QueueStats` no longer takes a maxTasks argument. That argument has been
+ deprecated and unused for a long time.
+* `appengine/aetest`, `appengine/blobstore`, `appengine/cloudsql`
+ and `appengine/runtime` have not been ported yet.
+* `appengine.BackendHostname` and `appengine.BackendInstance` were for the deprecated backends feature.
+ Use `appengine.ModuleHostname`and `appengine.ModuleName` instead.
+* `appengine.IsCapabilityDisabled` and `appengine/capability` are obsolete.
+* Most of `appengine/file` is deprecated. Use [Google Cloud Storage](https://godoc.org/google.golang.org/cloud/storage) instead.
+* `appengine/socket` is deprecated. Use the standard `net` package instead.
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/appengine.go b/Godeps/_workspace/src/google.golang.org/appengine/appengine.go
new file mode 100644
index 000000000000..af12492929cb
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/appengine.go
@@ -0,0 +1,78 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package appengine provides basic functionality for Google App Engine.
+//
+// For more information on how to write Go apps for Google App Engine, see:
+// https://cloud.google.com/appengine/docs/go/
+package appengine // import "google.golang.org/appengine"
+
+import (
+ "net/http"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsDevAppServer reports whether the App Engine app is running in the
+// development App Server.
+func IsDevAppServer() bool {
+ // TODO(dsymonds): Detect this.
+ return false
+}
+
+// Context represents the context of an in-flight HTTP request.
+type Context interface {
+ // Debugf formats its arguments according to the format, analogous to fmt.Printf,
+ // and records the text as a log message at Debug level.
+ Debugf(format string, args ...interface{})
+
+ // Infof is like Debugf, but at Info level.
+ Infof(format string, args ...interface{})
+
+ // Warningf is like Debugf, but at Warning level.
+ Warningf(format string, args ...interface{})
+
+ // Errorf is like Debugf, but at Error level.
+ Errorf(format string, args ...interface{})
+
+ // Criticalf is like Debugf, but at Critical level.
+ Criticalf(format string, args ...interface{})
+
+ // The remaining methods are for internal use only.
+ // Developer-facing APIs wrap these methods to provide a more friendly API.
+
+ // Internal use only.
+ Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error
+ // Internal use only. Use AppID instead.
+ FullyQualifiedAppID() string
+ // Internal use only.
+ Request() interface{}
+}
+
+// NewContext returns a context for an in-flight HTTP request.
+// Repeated calls will return the same value.
+func NewContext(req *http.Request) Context {
+ return internal.NewContext(req)
+}
+
+// TODO(dsymonds): Add BackgroundContext function?
+
+// BlobKey is a key for a blobstore blob.
+//
+// Conceptually, this type belongs in the blobstore package, but it lives in
+// the appengine package to avoid a circular dependency: blobstore depends on
+// datastore, and datastore needs to refer to the BlobKey type.
+type BlobKey string
+
+// GeoPoint represents a location as latitude/longitude in degrees.
+type GeoPoint struct {
+ Lat, Lng float64
+}
+
+// Valid returns whether a GeoPoint is within [-90, 90] latitude and [-180, 180] longitude.
+func (g GeoPoint) Valid() bool {
+ return -90 <= g.Lat && g.Lat <= 90 && -180 <= g.Lng && g.Lng <= 180
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/appengine_test.go b/Godeps/_workspace/src/google.golang.org/appengine/appengine_test.go
new file mode 100644
index 000000000000..e1cfb82cce09
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/appengine_test.go
@@ -0,0 +1,45 @@
+package appengine
+
+import (
+ "testing"
+)
+
+func TestValidGeoPoint(t *testing.T) {
+ testCases := []struct {
+ desc string
+ pt GeoPoint
+ want bool
+ }{
+ {
+ "valid",
+ GeoPoint{67.21, 13.37},
+ true,
+ },
+ {
+ "high lat",
+ GeoPoint{-90.01, 13.37},
+ false,
+ },
+ {
+ "low lat",
+ GeoPoint{90.01, 13.37},
+ false,
+ },
+ {
+ "high lng",
+ GeoPoint{67.21, 182},
+ false,
+ },
+ {
+ "low lng",
+ GeoPoint{67.21, -181},
+ false,
+ },
+ }
+
+ for _, tc := range testCases {
+ if got := tc.pt.Valid(); got != tc.want {
+ t.Errorf("%s: got %v, want %v", tc.desc, got, tc.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/channel/channel.go b/Godeps/_workspace/src/google.golang.org/appengine/channel/channel.go
new file mode 100644
index 000000000000..5782fffcfb40
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/channel/channel.go
@@ -0,0 +1,81 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package channel implements the server side of App Engine's Channel API.
+
+Create creates a new channel associated with the given clientID,
+which must be unique to the client that will use the returned token.
+
+ token, err := channel.Create(c, "player1")
+ if err != nil {
+ // handle error
+ }
+ // return token to the client in an HTTP response
+
+Send sends a message to the client over the channel identified by clientID.
+
+ channel.Send(c, "player1", "Game over!")
+*/
+package channel // import "google.golang.org/appengine/channel"
+
+import (
+ "encoding/json"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/channel"
+)
+
+// Create creates a channel and returns a token for use by the client.
+// The clientID is an application-provided string used to identify the client.
+func Create(c appengine.Context, clientID string) (token string, err error) {
+ req := &pb.CreateChannelRequest{
+ ApplicationKey: &clientID,
+ }
+ resp := &pb.CreateChannelResponse{}
+ err = c.Call(service, "CreateChannel", req, resp, nil)
+ token = resp.GetToken()
+ return token, remapError(err)
+}
+
+// Send sends a message on the channel associated with clientID.
+func Send(c appengine.Context, clientID, message string) error {
+ req := &pb.SendMessageRequest{
+ ApplicationKey: &clientID,
+ Message: &message,
+ }
+ resp := &basepb.VoidProto{}
+ return remapError(c.Call(service, "SendChannelMessage", req, resp, nil))
+}
+
+// SendJSON is a helper function that sends a JSON-encoded value
+// on the channel associated with clientID.
+func SendJSON(c appengine.Context, clientID string, value interface{}) error {
+ m, err := json.Marshal(value)
+ if err != nil {
+ return err
+ }
+ return Send(c, clientID, string(m))
+}
+
+// remapError fixes any APIError referencing "xmpp" into one referencing "channel".
+func remapError(err error) error {
+ if e, ok := err.(*internal.APIError); ok {
+ if e.Service == "xmpp" {
+ e.Service = "channel"
+ }
+ }
+ return err
+}
+
+var service = "xmpp" // prod
+
+func init() {
+ if appengine.IsDevAppServer() {
+ service = "channel" // dev
+ }
+ internal.RegisterErrorCodeMap("channel", pb.ChannelServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/channel/channel_test.go b/Godeps/_workspace/src/google.golang.org/appengine/channel/channel_test.go
new file mode 100644
index 000000000000..d04733798cee
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/channel/channel_test.go
@@ -0,0 +1,17 @@
+package channel
+
+import (
+ "testing"
+
+ "google.golang.org/appengine/internal"
+)
+
+func TestRemapError(t *testing.T) {
+ err := &internal.APIError{
+ Service: "xmpp",
+ }
+ err = remapError(err).(*internal.APIError)
+ if err.Service != "channel" {
+ t.Errorf("err.Service = %q, want %q", err.Service, "channel")
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore.go
new file mode 100644
index 000000000000..e29b96db6492
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore.go
@@ -0,0 +1,405 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ // ErrInvalidEntityType is returned when functions like Get or Next are
+ // passed a dst or src argument of invalid type.
+ ErrInvalidEntityType = errors.New("datastore: invalid entity type")
+ // ErrInvalidKey is returned when an invalid key is presented.
+ ErrInvalidKey = errors.New("datastore: invalid key")
+ // ErrNoSuchEntity is returned when no entity was found for a given key.
+ ErrNoSuchEntity = errors.New("datastore: no such entity")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct.
+// StructType is the type of the struct pointed to by the destination argument
+// passed to Get or to Iterator.Next.
+type ErrFieldMismatch struct {
+ StructType reflect.Type
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("datastore: cannot load field %q into a %q: %s",
+ e.FieldName, e.StructType, e.Reason)
+}
+
+// protoToKey converts a Reference proto to a *Key.
+func protoToKey(r *pb.Reference) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Path.Element {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToProto converts a *Key to a Reference proto.
+func keyToProto(defaultAppID string, k *Key) *pb.Reference {
+ appID := k.appID
+ if appID == "" {
+ appID = defaultAppID
+ }
+ n := 0
+ for i := k; i != nil; i = i.parent {
+ n++
+ }
+ e := make([]*pb.Path_Element, n)
+ for i := k; i != nil; i = i.parent {
+ n--
+ e[n] = &pb.Path_Element{
+ Type: &i.kind,
+ }
+ // At most one of {Name,Id} should be set.
+ // Neither will be set for incomplete keys.
+ if i.stringID != "" {
+ e[n].Name = &i.stringID
+ } else if i.intID != 0 {
+ e[n].Id = &i.intID
+ }
+ }
+ var namespace *string
+ if k.namespace != "" {
+ namespace = proto.String(k.namespace)
+ }
+ return &pb.Reference{
+ App: proto.String(appID),
+ NameSpace: namespace,
+ Path: &pb.Path{
+ Element: e,
+ },
+ }
+}
+
+// multiKeyToProto is a batch version of keyToProto.
+func multiKeyToProto(appID string, key []*Key) []*pb.Reference {
+ ret := make([]*pb.Reference, len(key))
+ for i, k := range key {
+ ret[i] = keyToProto(appID, k)
+ }
+ return ret
+}
+
+// multiValid is a batch version of Key.valid. It returns an error, not a
+// []bool.
+func multiValid(key []*Key) error {
+ invalid := false
+ for _, k := range key {
+ if !k.valid() {
+ invalid = true
+ break
+ }
+ }
+ if !invalid {
+ return nil
+ }
+ err := make(appengine.MultiError, len(key))
+ for i, k := range key {
+ if !k.valid() {
+ err[i] = ErrInvalidKey
+ }
+ }
+ return err
+}
+
+// It's unfortunate that the two semantically equivalent concepts pb.Reference
+// and pb.PropertyValue_ReferenceValue aren't the same type. For example, the
+// two have different protobuf field numbers.
+
+// referenceValueToKey is the same as protoToKey except the input is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func referenceValueToKey(r *pb.PropertyValue_ReferenceValue) (k *Key, err error) {
+ appID := r.GetApp()
+ namespace := r.GetNameSpace()
+ for _, e := range r.Pathelement {
+ k = &Key{
+ kind: e.GetType(),
+ stringID: e.GetName(),
+ intID: e.GetId(),
+ parent: k,
+ appID: appID,
+ namespace: namespace,
+ }
+ if !k.valid() {
+ return nil, ErrInvalidKey
+ }
+ }
+ return
+}
+
+// keyToReferenceValue is the same as keyToProto except the output is a
+// PropertyValue_ReferenceValue instead of a Reference.
+func keyToReferenceValue(defaultAppID string, k *Key) *pb.PropertyValue_ReferenceValue {
+ ref := keyToProto(defaultAppID, k)
+ pe := make([]*pb.PropertyValue_ReferenceValue_PathElement, len(ref.Path.Element))
+ for i, e := range ref.Path.Element {
+ pe[i] = &pb.PropertyValue_ReferenceValue_PathElement{
+ Type: e.Type,
+ Id: e.Id,
+ Name: e.Name,
+ }
+ }
+ return &pb.PropertyValue_ReferenceValue{
+ App: ref.App,
+ NameSpace: ref.NameSpace,
+ Pathelement: pe,
+ }
+}
+
+type multiArgType int
+
+const (
+ multiArgTypeInvalid multiArgType = iota
+ multiArgTypePropertyLoadSaver
+ multiArgTypeStruct
+ multiArgTypeStructPtr
+ multiArgTypeInterface
+)
+
+// checkMultiArg checks that v has type []S, []*S, []I, or []P, for some struct
+// type S, for some interface type I, or some non-interface non-pointer type P
+// such that P or *P implements PropertyLoadSaver.
+//
+// It returns what category the slice's elements are, and the reflect.Type
+// that represents S, I or P.
+//
+// As a special case, PropertyList is an invalid type for v.
+func checkMultiArg(v reflect.Value) (m multiArgType, elemType reflect.Type) {
+ if v.Kind() != reflect.Slice {
+ return multiArgTypeInvalid, nil
+ }
+ if v.Type() == typeOfPropertyList {
+ return multiArgTypeInvalid, nil
+ }
+ elemType = v.Type().Elem()
+ if reflect.PtrTo(elemType).Implements(typeOfPropertyLoadSaver) {
+ return multiArgTypePropertyLoadSaver, elemType
+ }
+ switch elemType.Kind() {
+ case reflect.Struct:
+ return multiArgTypeStruct, elemType
+ case reflect.Interface:
+ return multiArgTypeInterface, elemType
+ case reflect.Ptr:
+ elemType = elemType.Elem()
+ if elemType.Kind() == reflect.Struct {
+ return multiArgTypeStructPtr, elemType
+ }
+ }
+ return multiArgTypeInvalid, nil
+}
+
+// Get loads the entity stored for k into dst, which must be a struct pointer
+// or implement PropertyLoadSaver. If there is no such entity for the key, Get
+// returns ErrNoSuchEntity.
+//
+// The values of dst's unmatched struct fields are not modified, and matching
+// slice-typed fields are not reset before appending to them. In particular, it
+// is recommended to pass a pointer to a zero valued struct on each Get call.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer.
+func Get(c appengine.Context, key *Key, dst interface{}) error {
+ if dst == nil { // GetMulti catches nil interface; we need to catch nil ptr here
+ return ErrInvalidEntityType
+ }
+ err := GetMulti(c, []*Key{key}, []interface{}{dst})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// GetMulti is a batch version of Get.
+//
+// dst must be a []S, []*S, []I or []P, for some struct type S, some interface
+// type I, or some non-interface non-pointer type P such that P or *P
+// implements PropertyLoadSaver. If an []I, each element must be a valid dst
+// for Get: it must be a struct pointer or implement PropertyLoadSaver.
+//
+// As a special case, PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when []PropertyList was intended.
+func GetMulti(c appengine.Context, key []*Key, dst interface{}) error {
+ v := reflect.ValueOf(dst)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return errors.New("datastore: dst has invalid type")
+ }
+ if len(key) != v.Len() {
+ return errors.New("datastore: key and dst slices have different length")
+ }
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.GetRequest{
+ Key: multiKeyToProto(c.FullyQualifiedAppID(), key),
+ }
+ res := &pb.GetResponse{}
+ if err := c.Call("datastore_v3", "Get", req, res, nil); err != nil {
+ return err
+ }
+ if len(key) != len(res.Entity) {
+ return errors.New("datastore: internal error: server returned the wrong number of entities")
+ }
+ multiErr, any := make(appengine.MultiError, len(key)), false
+ for i, e := range res.Entity {
+ if e.Entity == nil {
+ multiErr[i] = ErrNoSuchEntity
+ } else {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
+ elem.Set(reflect.New(elem.Type().Elem()))
+ }
+ multiErr[i] = loadEntity(elem.Interface(), e.Entity)
+ }
+ if multiErr[i] != nil {
+ any = true
+ }
+ }
+ if any {
+ return multiErr
+ }
+ return nil
+}
+
+// Put saves the entity src into the datastore with key k. src must be a struct
+// pointer or implement PropertyLoadSaver; if a struct pointer then any
+// unexported fields of that struct will be skipped. If k is an incomplete key,
+// the returned key will be a unique key generated by the datastore.
+func Put(c appengine.Context, key *Key, src interface{}) (*Key, error) {
+ k, err := PutMulti(c, []*Key{key}, []interface{}{src})
+ if err != nil {
+ if me, ok := err.(appengine.MultiError); ok {
+ return nil, me[0]
+ }
+ return nil, err
+ }
+ return k[0], nil
+}
+
+// PutMulti is a batch version of Put.
+//
+// src must satisfy the same conditions as the dst argument to GetMulti.
+func PutMulti(c appengine.Context, key []*Key, src interface{}) ([]*Key, error) {
+ v := reflect.ValueOf(src)
+ multiArgType, _ := checkMultiArg(v)
+ if multiArgType == multiArgTypeInvalid {
+ return nil, errors.New("datastore: src has invalid type")
+ }
+ if len(key) != v.Len() {
+ return nil, errors.New("datastore: key and src slices have different length")
+ }
+ if len(key) == 0 {
+ return nil, nil
+ }
+ appID := c.FullyQualifiedAppID()
+ if err := multiValid(key); err != nil {
+ return nil, err
+ }
+ req := &pb.PutRequest{}
+ for i := range key {
+ elem := v.Index(i)
+ if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
+ elem = elem.Addr()
+ }
+ sProto, err := saveEntity(appID, key[i], elem.Interface())
+ if err != nil {
+ return nil, err
+ }
+ req.Entity = append(req.Entity, sProto)
+ }
+ res := &pb.PutResponse{}
+ if err := c.Call("datastore_v3", "Put", req, res, nil); err != nil {
+ return nil, err
+ }
+ if len(key) != len(res.Key) {
+ return nil, errors.New("datastore: internal error: server returned the wrong number of keys")
+ }
+ ret := make([]*Key, len(key))
+ for i := range ret {
+ var err error
+ ret[i], err = protoToKey(res.Key[i])
+ if err != nil || ret[i].Incomplete() {
+ return nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ }
+ return ret, nil
+}
+
+// Delete deletes the entity for the given key.
+func Delete(c appengine.Context, key *Key) error {
+ err := DeleteMulti(c, []*Key{key})
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti is a batch version of Delete.
+func DeleteMulti(c appengine.Context, key []*Key) error {
+ if len(key) == 0 {
+ return nil
+ }
+ if err := multiValid(key); err != nil {
+ return err
+ }
+ req := &pb.DeleteRequest{
+ Key: multiKeyToProto(c.FullyQualifiedAppID(), key),
+ }
+ res := &pb.DeleteResponse{}
+ return c.Call("datastore_v3", "Delete", req, res, nil)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ // pb.Query is the only type that has a name_space field.
+ // All other namespace support in datastore is in the keys.
+ switch m := m.(type) {
+ case *pb.Query:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ }
+}
+
+func init() {
+ internal.NamespaceMods["datastore_v3"] = namespaceMod
+ internal.RegisterErrorCodeMap("datastore_v3", pb.Error_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("datastore_v3", int32(pb.Error_TIMEOUT))
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore_test.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore_test.go
new file mode 100644
index 000000000000..593f9fac7d86
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/datastore_test.go
@@ -0,0 +1,1499 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/json"
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+const testAppID = "testApp"
+
+type (
+ myBlob []byte
+ myByte byte
+ myString string
+)
+
+func makeMyByteSlice(n int) []myByte {
+ b := make([]myByte, n)
+ for i := range b {
+ b[i] = myByte(i)
+ }
+ return b
+}
+
+func makeInt8Slice(n int) []int8 {
+ b := make([]int8, n)
+ for i := range b {
+ b[i] = int8(i)
+ }
+ return b
+}
+
+func makeUint8Slice(n int) []uint8 {
+ b := make([]uint8, n)
+ for i := range b {
+ b[i] = uint8(i)
+ }
+ return b
+}
+
+func newKey(stringID string, parent *Key) *Key {
+ return &Key{
+ kind: "kind",
+ stringID: stringID,
+ intID: 0,
+ parent: parent,
+ appID: testAppID,
+ }
+}
+
+var (
+ testKey0 = newKey("name0", nil)
+ testKey1a = newKey("name1", nil)
+ testKey1b = newKey("name1", nil)
+ testKey2a = newKey("name2", testKey0)
+ testKey2b = newKey("name2", testKey0)
+ testGeoPt0 = appengine.GeoPoint{Lat: 1.2, Lng: 3.4}
+ testGeoPt1 = appengine.GeoPoint{Lat: 5, Lng: 10}
+ testBadGeoPt = appengine.GeoPoint{Lat: 1000, Lng: 34}
+)
+
+type B0 struct {
+ B []byte
+}
+
+type B1 struct {
+ B []int8
+}
+
+type B2 struct {
+ B myBlob
+}
+
+type B3 struct {
+ B []myByte
+}
+
+type B4 struct {
+ B [][]byte
+}
+
+type B5 struct {
+ B ByteString
+}
+
+type C0 struct {
+ I int
+ C chan int
+}
+
+type C1 struct {
+ I int
+ C *chan int
+}
+
+type C2 struct {
+ I int
+ C []chan int
+}
+
+type C3 struct {
+ C string
+}
+
+type E struct{}
+
+type G0 struct {
+ G appengine.GeoPoint
+}
+
+type G1 struct {
+ G []appengine.GeoPoint
+}
+
+type K0 struct {
+ K *Key
+}
+
+type K1 struct {
+ K []*Key
+}
+
+type N0 struct {
+ X0
+ Nonymous X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N1 struct {
+ X0
+ Nonymous []X0
+ Ignore string `datastore:"-"`
+ Other string
+}
+
+type N2 struct {
+ N1 `datastore:"red"`
+ Green N1 `datastore:"green"`
+ Blue N1
+ White N1 `datastore:"-"`
+}
+
+type O0 struct {
+ I int64
+}
+
+type O1 struct {
+ I int32
+}
+
+type U0 struct {
+ U uint
+}
+
+type U1 struct {
+ U string
+}
+
+type T struct {
+ T time.Time
+}
+
+type X0 struct {
+ S string
+ I int
+ i int
+}
+
+type X1 struct {
+ S myString
+ I int32
+ J int64
+}
+
+type X2 struct {
+ Z string
+ i int
+}
+
+type X3 struct {
+ S bool
+ I int
+}
+
+type Y0 struct {
+ B bool
+ F []float64
+ G []float64
+}
+
+type Y1 struct {
+ B bool
+ F float64
+}
+
+type Y2 struct {
+ B bool
+ F []int64
+}
+
+type Tagged struct {
+ A int `datastore:"a,noindex"`
+ B []int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+
+ Y0 `datastore:"-"`
+ Z chan int `datastore:"-,"`
+}
+
+type InvalidTagged1 struct {
+ I int `datastore:"\t"`
+}
+
+type InvalidTagged2 struct {
+ I int
+ J int `datastore:"I"`
+}
+
+type Inner1 struct {
+ W int32
+ X string
+}
+
+type Inner2 struct {
+ Y float64
+}
+
+type Inner3 struct {
+ Z bool
+}
+
+type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+}
+
+type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+}
+
+type Dotted struct {
+ A DottedA `datastore:"A0.A1.A2"`
+}
+
+type DottedA struct {
+ B DottedB `datastore:"B3"`
+}
+
+type DottedB struct {
+ C int `datastore:"C4.C5"`
+}
+
+type SliceOfSlices struct {
+ I int
+ S []struct {
+ J int
+ F []float64
+ }
+}
+
+type Recursive struct {
+ I int
+ R []Recursive
+}
+
+type MutuallyRecursive0 struct {
+ I int
+ R []MutuallyRecursive1
+}
+
+type MutuallyRecursive1 struct {
+ I int
+ R []MutuallyRecursive0
+}
+
+type Doubler struct {
+ S string
+ I int64
+ B bool
+}
+
+func (d *Doubler) Load(props []Property) error {
+ return LoadStruct(d, props)
+}
+
+func (d *Doubler) Save() ([]Property, error) {
+ // Save the default Property slice to an in-memory buffer (a PropertyList).
+ props, err := SaveStruct(d)
+ if err != nil {
+ return nil, err
+ }
+ var list PropertyList
+ if err := list.Load(props); err != nil {
+ return nil, err
+ }
+
+ // Edit that PropertyList, and send it on.
+ for i := range list {
+ switch v := list[i].Value.(type) {
+ case string:
+ // + means string concatenation.
+ list[i].Value = v + v
+ case int64:
+ // + means integer addition.
+ list[i].Value = v + v
+ }
+ }
+ return list.Save()
+}
+
+var _ PropertyLoadSaver = (*Doubler)(nil)
+
+type Deriver struct {
+ S, Derived, Ignored string
+}
+
+func (e *Deriver) Load(props []Property) error {
+ for _, p := range props {
+ if p.Name != "S" {
+ continue
+ }
+ e.S = p.Value.(string)
+ e.Derived = "derived+" + e.S
+ }
+ return nil
+}
+
+func (e *Deriver) Save() ([]Property, error) {
+ return []Property{
+ {
+ Name: "S",
+ Value: e.S,
+ },
+ }, nil
+}
+
+var _ PropertyLoadSaver = (*Deriver)(nil)
+
+type BadMultiPropEntity struct{}
+
+func (e *BadMultiPropEntity) Load(props []Property) error {
+ return errors.New("unimplemented")
+}
+
+func (e *BadMultiPropEntity) Save() ([]Property, error) {
+ // Write multiple properties with the same name "I", but Multiple is false.
+ var props []Property
+ for i := 0; i < 3; i++ {
+ props = append(props, Property{
+ Name: "I",
+ Value: int64(i),
+ })
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = (*BadMultiPropEntity)(nil)
+
+type BK struct {
+ Key appengine.BlobKey
+}
+
+type testCase struct {
+ desc string
+ src interface{}
+ want interface{}
+ putErr string
+ getErr string
+}
+
+var testCases = []testCase{
+ {
+ "chan save fails",
+ &C0{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "*chan save fails",
+ &C1{I: -1},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "[]chan save fails",
+ &C2{I: -1, C: make([]chan int, 8)},
+ &E{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "chan load fails",
+ &C3{C: "not a chan"},
+ &C0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "*chan load fails",
+ &C3{C: "not a *chan"},
+ &C1{},
+ "",
+ "type mismatch",
+ },
+ {
+ "[]chan load fails",
+ &C3{C: "not a []chan"},
+ &C2{},
+ "",
+ "type mismatch",
+ },
+ {
+ "empty struct",
+ &E{},
+ &E{},
+ "",
+ "",
+ },
+ {
+ "geopoint",
+ &G0{G: testGeoPt0},
+ &G0{G: testGeoPt0},
+ "",
+ "",
+ },
+ {
+ "geopoint invalid",
+ &G0{G: testBadGeoPt},
+ &G0{},
+ "invalid GeoPoint value",
+ "",
+ },
+ {
+ "geopoint as props",
+ &G0{G: testGeoPt0},
+ &PropertyList{
+ Property{Name: "G", Value: testGeoPt0, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "geopoint slice",
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ &G1{G: []appengine.GeoPoint{testGeoPt0, testGeoPt1}},
+ "",
+ "",
+ },
+ {
+ "key",
+ &K0{K: testKey1a},
+ &K0{K: testKey1b},
+ "",
+ "",
+ },
+ {
+ "key with parent",
+ &K0{K: testKey2a},
+ &K0{K: testKey2b},
+ "",
+ "",
+ },
+ {
+ "nil key",
+ &K0{},
+ &K0{},
+ "",
+ "",
+ },
+ {
+ "all nil keys in slice",
+ &K1{[]*Key{nil, nil}},
+ &K1{[]*Key{nil, nil}},
+ "",
+ "",
+ },
+ {
+ "some nil keys in slice",
+ &K1{[]*Key{testKey1a, nil, testKey2a}},
+ &K1{[]*Key{testKey1b, nil, testKey2b}},
+ "",
+ "",
+ },
+ {
+ "overflow",
+ &O0{I: 1 << 48},
+ &O1{},
+ "",
+ "overflow",
+ },
+ {
+ "time",
+ &T{T: time.Unix(1e9, 0)},
+ &T{T: time.Unix(1e9, 0)},
+ "",
+ "",
+ },
+ {
+ "time as props",
+ &T{T: time.Unix(1e9, 0)},
+ &PropertyList{
+ Property{Name: "T", Value: time.Unix(1e9, 0), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "uint save",
+ &U0{U: 1},
+ &U0{},
+ "unsupported struct field",
+ "",
+ },
+ {
+ "uint load",
+ &U1{U: "not a uint"},
+ &U0{},
+ "",
+ "type mismatch",
+ },
+ {
+ "zero",
+ &X0{},
+ &X0{},
+ "",
+ "",
+ },
+ {
+ "basic",
+ &X0{S: "one", I: 2, i: 3},
+ &X0{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "save string/int load myString/int32",
+ &X0{S: "one", I: 2, i: 3},
+ &X1{S: "one", I: 2},
+ "",
+ "",
+ },
+ {
+ "missing fields",
+ &X0{S: "one", I: 2, i: 3},
+ &X2{},
+ "",
+ "no such struct field",
+ },
+ {
+ "save string load bool",
+ &X0{S: "one", I: 2, i: 3},
+ &X3{I: 2},
+ "",
+ "type mismatch",
+ },
+ {
+ "basic slice",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ "",
+ "",
+ },
+ {
+ "save []float64 load float64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y1{B: true},
+ "",
+ "requires a slice",
+ },
+ {
+ "save []float64 load []int64",
+ &Y0{B: true, F: []float64{7, 8, 9}},
+ &Y2{B: true},
+ "",
+ "type mismatch",
+ },
+ {
+ "single slice is too long",
+ &Y0{F: make([]float64, maxIndexedProperties+1)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "two slices are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), G: make([]float64, maxIndexedProperties)},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "one slice and one scalar are too long",
+ &Y0{F: make([]float64, maxIndexedProperties), B: true},
+ &Y0{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "long blob",
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B0{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "long []int8 is too long",
+ &B1{B: makeInt8Slice(maxIndexedProperties + 1)},
+ &B1{},
+ "too many indexed properties",
+ "",
+ },
+ {
+ "short []int8",
+ &B1{B: makeInt8Slice(3)},
+ &B1{B: makeInt8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long myBlob",
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ &B2{B: makeUint8Slice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short myBlob",
+ &B2{B: makeUint8Slice(3)},
+ &B2{B: makeUint8Slice(3)},
+ "",
+ "",
+ },
+ {
+ "long []myByte",
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ &B3{B: makeMyByteSlice(maxIndexedProperties + 1)},
+ "",
+ "",
+ },
+ {
+ "short []myByte",
+ &B3{B: makeMyByteSlice(3)},
+ &B3{B: makeMyByteSlice(3)},
+ "",
+ "",
+ },
+ {
+ "slice of blobs",
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ &B4{B: [][]byte{
+ makeUint8Slice(3),
+ makeUint8Slice(4),
+ makeUint8Slice(5),
+ }},
+ "",
+ "",
+ },
+ {
+ "short ByteString",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &B5{B: ByteString(makeUint8Slice(3))},
+ "",
+ "",
+ },
+ {
+ "short ByteString as props",
+ &B5{B: ByteString(makeUint8Slice(3))},
+ &PropertyList{
+ Property{Name: "B", Value: ByteString(makeUint8Slice(3)), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "short ByteString into string",
+ &B5{B: ByteString("legacy")},
+ &struct{ B string }{"legacy"},
+ "",
+ "",
+ },
+ {
+ "[]byte must be noindex",
+ &PropertyList{
+ Property{Name: "B", Value: makeUint8Slice(3), NoIndex: false},
+ },
+ nil,
+ "cannot index a []byte valued Property",
+ "",
+ },
+ {
+ "save tagged load props",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7},
+ &PropertyList{
+ // A and B are renamed to a and b; A and C are noindex, I is ignored.
+ // Indexed properties are loaded before raw properties. Thus, the
+ // result is: b, b, b, D, E, a, c.
+ Property{Name: "b", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(22), NoIndex: false, Multiple: true},
+ Property{Name: "b", Value: int64(23), NoIndex: false, Multiple: true},
+ Property{Name: "D", Value: int64(4), NoIndex: false, Multiple: false},
+ Property{Name: "E", Value: int64(5), NoIndex: false, Multiple: false},
+ Property{Name: "a", Value: int64(1), NoIndex: true, Multiple: false},
+ Property{Name: "C", Value: int64(3), NoIndex: true, Multiple: false},
+ Property{Name: "J", Value: int64(7), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save tagged load tagged",
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, I: 6, J: 7},
+ &Tagged{A: 1, B: []int{21, 22, 23}, C: 3, D: 4, E: 5, J: 7},
+ "",
+ "",
+ },
+ {
+ "save props load tagged",
+ &PropertyList{
+ Property{Name: "A", Value: int64(11), NoIndex: true, Multiple: false},
+ Property{Name: "a", Value: int64(12), NoIndex: true, Multiple: false},
+ },
+ &Tagged{A: 12},
+ "",
+ `cannot load field "A"`,
+ },
+ {
+ "invalid tagged1",
+ &InvalidTagged1{I: 1},
+ &InvalidTagged1{},
+ "struct tag has invalid property name",
+ "",
+ },
+ {
+ "invalid tagged2",
+ &InvalidTagged2{I: 1, J: 2},
+ &InvalidTagged2{},
+ "struct tag has repeated property name",
+ "",
+ },
+ {
+ "doubler",
+ &Doubler{S: "s", I: 1, B: true},
+ &Doubler{S: "ss", I: 2, B: true},
+ "",
+ "",
+ },
+ {
+ "save struct load props",
+ &X0{S: "s", I: 1},
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load struct",
+ &PropertyList{
+ Property{Name: "S", Value: "s", NoIndex: false, Multiple: false},
+ Property{Name: "I", Value: int64(1), NoIndex: false, Multiple: false},
+ },
+ &X0{S: "s", I: 1},
+ "",
+ "",
+ },
+ {
+ "nil-value props",
+ &PropertyList{
+ Property{Name: "I", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "B", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "S", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "F", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "K", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "T", Value: nil, NoIndex: false, Multiple: false},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: int64(7), NoIndex: false, Multiple: true},
+ Property{Name: "J", Value: nil, NoIndex: false, Multiple: true},
+ },
+ &struct {
+ I int64
+ B bool
+ S string
+ F float64
+ K *Key
+ T time.Time
+ J []int64
+ }{
+ J: []int64{0, 7, 0},
+ },
+ "",
+ "",
+ },
+ {
+ "save outer load props",
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load outer-equivalent",
+ &PropertyList{
+ Property{Name: "A", Value: int64(1), NoIndex: false, Multiple: false},
+ Property{Name: "I.W", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "ten", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "twenty", NoIndex: false, Multiple: true},
+ Property{Name: "I.W", Value: int64(30), NoIndex: false, Multiple: true},
+ Property{Name: "I.X", Value: "thirty", NoIndex: false, Multiple: true},
+ Property{Name: "J.Y", Value: float64(3.14), NoIndex: false, Multiple: false},
+ Property{Name: "Z", Value: true, NoIndex: false, Multiple: false},
+ },
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ "",
+ "",
+ },
+ {
+ "save outer-equivalent load outer",
+ &OuterEquivalent{
+ A: 1,
+ IDotW: []int32{10, 20, 30},
+ IDotX: []string{"ten", "twenty", "thirty"},
+ JDotY: 3.14,
+ Z: true,
+ },
+ &Outer{
+ A: 1,
+ I: []Inner1{
+ {10, "ten"},
+ {20, "twenty"},
+ {30, "thirty"},
+ },
+ J: Inner2{
+ Y: 3.14,
+ },
+ Inner3: Inner3{
+ Z: true,
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names save",
+ &Dotted{A: DottedA{B: DottedB{C: 88}}},
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(88), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "dotted names load",
+ &PropertyList{
+ Property{Name: "A0.A1.A2.B3.C4.C5", Value: int64(99), NoIndex: false, Multiple: false},
+ },
+ &Dotted{A: DottedA{B: DottedB{C: 99}}},
+ "",
+ "",
+ },
+ {
+ "save struct load deriver",
+ &X0{S: "s", I: 1},
+ &Deriver{S: "s", Derived: "derived+s"},
+ "",
+ "",
+ },
+ {
+ "save deriver load struct",
+ &Deriver{S: "s", Derived: "derived+s", Ignored: "ignored"},
+ &X0{S: "s"},
+ "",
+ "",
+ },
+ {
+ "bad multi-prop entity",
+ &BadMultiPropEntity{},
+ &BadMultiPropEntity{},
+ "Multiple is false",
+ "",
+ },
+ // Regression: CL 25062824 broke handling of appengine.BlobKey fields.
+ {
+ "appengine.BlobKey",
+ &BK{Key: "blah"},
+ &BK{Key: "blah"},
+ "",
+ "",
+ },
+ {
+ "zero time.Time",
+ &T{T: time.Time{}},
+ &T{T: time.Time{}},
+ "",
+ "",
+ },
+ {
+ "time.Time near Unix zero time",
+ &T{T: time.Unix(0, 4e3)},
+ &T{T: time.Unix(0, 4e3)},
+ "",
+ "",
+ },
+ {
+ "time.Time, far in the future",
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{T: time.Date(99999, 1, 1, 0, 0, 0, 0, time.UTC)},
+ "",
+ "",
+ },
+ {
+ "time.Time, very far in the past",
+ &T{T: time.Date(-300000, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "time.Time, very far in the future",
+ &T{T: time.Date(294248, 1, 1, 0, 0, 0, 0, time.UTC)},
+ &T{},
+ "time value out of range",
+ "",
+ },
+ {
+ "structs",
+ &N0{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: X0{S: "four", I: 5, i: 6},
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N0{
+ X0: X0{S: "one", I: 2},
+ Nonymous: X0{S: "four", I: 5},
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "slice of structs",
+ &N1{
+ X0: X0{S: "one", I: 2, i: 3},
+ Nonymous: []X0{
+ {S: "four", I: 5, i: 6},
+ {S: "seven", I: 8, i: 9},
+ {S: "ten", I: 11, i: 12},
+ {S: "thirteen", I: 14, i: 15},
+ },
+ Ignore: "ignore",
+ Other: "other",
+ },
+ &N1{
+ X0: X0{S: "one", I: 2},
+ Nonymous: []X0{
+ {S: "four", I: 5},
+ {S: "seven", I: 8},
+ {S: "ten", I: 11},
+ {S: "thirteen", I: 14},
+ },
+ Other: "other",
+ },
+ "",
+ "",
+ },
+ {
+ "structs with slices of structs",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs load props",
+ &N2{
+ N1: N1{
+ X0: X0{S: "rouge"},
+ Nonymous: []X0{
+ {S: "rosso0"},
+ {S: "rosso1"},
+ },
+ },
+ Green: N1{
+ X0: X0{S: "vert"},
+ Nonymous: []X0{
+ {S: "verde0"},
+ {S: "verde1"},
+ {S: "verde2"},
+ },
+ },
+ Blue: N1{
+ X0: X0{S: "bleu"},
+ Nonymous: []X0{
+ {S: "blu0"},
+ {S: "blu1"},
+ {S: "blu2"},
+ {S: "blu3"},
+ },
+ },
+ },
+ &PropertyList{
+ Property{Name: "red.S", Value: "rouge", NoIndex: false, Multiple: false},
+ Property{Name: "red.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "red.Nonymous.S", Value: "rosso0", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.S", Value: "rosso1", NoIndex: false, Multiple: true},
+ Property{Name: "red.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "red.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "green.S", Value: "vert", NoIndex: false, Multiple: false},
+ Property{Name: "green.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.S", Value: "verde0", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde1", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.S", Value: "verde2", NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "green.Other", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.S", Value: "bleu", NoIndex: false, Multiple: false},
+ Property{Name: "Blue.I", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "Blue.Nonymous.S", Value: "blu0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu2", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blu3", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(0), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Other", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "save props load structs with ragged fields",
+ &PropertyList{
+ Property{Name: "red.S", Value: "rot", NoIndex: false, Multiple: false},
+ Property{Name: "green.Nonymous.I", Value: int64(10), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(11), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(12), NoIndex: false, Multiple: true},
+ Property{Name: "green.Nonymous.I", Value: int64(13), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau0", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(20), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau1", NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.I", Value: int64(21), NoIndex: false, Multiple: true},
+ Property{Name: "Blue.Nonymous.S", Value: "blau2", NoIndex: false, Multiple: true},
+ },
+ &N2{
+ N1: N1{
+ X0: X0{S: "rot"},
+ },
+ Green: N1{
+ Nonymous: []X0{
+ {I: 10},
+ {I: 11},
+ {I: 12},
+ {I: 13},
+ },
+ },
+ Blue: N1{
+ Nonymous: []X0{
+ {S: "blau0", I: 20},
+ {S: "blau1", I: 21},
+ {S: "blau2"},
+ },
+ },
+ },
+ "",
+ "",
+ },
+ {
+ "save structs with noindex tags",
+ &struct {
+ A struct {
+ X string `datastore:",noindex"`
+ Y string
+ } `datastore:",noindex"`
+ B struct {
+ X string `datastore:",noindex"`
+ Y string
+ }
+ }{},
+ &PropertyList{
+ Property{Name: "B.Y", Value: "", NoIndex: false, Multiple: false},
+ Property{Name: "A.X", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "A.Y", Value: "", NoIndex: true, Multiple: false},
+ Property{Name: "B.X", Value: "", NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "embedded struct with name override",
+ &struct {
+ Inner1 `datastore:"foo"`
+ }{},
+ &PropertyList{
+ Property{Name: "foo.W", Value: int64(0), NoIndex: false, Multiple: false},
+ Property{Name: "foo.X", Value: "", NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "slice of slices",
+ &SliceOfSlices{},
+ nil,
+ "flattening nested structs leads to a slice of slices",
+ "",
+ },
+ {
+ "recursive struct",
+ &Recursive{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "mutually recursive struct",
+ &MutuallyRecursive0{},
+ nil,
+ "recursive struct",
+ "",
+ },
+ {
+ "non-exported struct fields",
+ &struct {
+ i, J int64
+ }{i: 1, J: 2},
+ &PropertyList{
+ Property{Name: "J", Value: int64(2), NoIndex: false, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage",
+ &struct {
+ J json.RawMessage
+ }{
+ J: json.RawMessage("rawr"),
+ },
+ &PropertyList{
+ Property{Name: "J", Value: []byte("rawr"), NoIndex: true, Multiple: false},
+ },
+ "",
+ "",
+ },
+ {
+ "json.RawMessage to myBlob",
+ &struct {
+ B json.RawMessage
+ }{
+ B: json.RawMessage("rawr"),
+ },
+ &B2{B: myBlob("rawr")},
+ "",
+ "",
+ },
+}
+
+// checkErr returns the empty string if either both want and err are zero,
+// or if want is a non-empty substring of err's string representation.
+func checkErr(want string, err error) string {
+ if err != nil {
+ got := err.Error()
+ if want == "" || strings.Index(got, want) == -1 {
+ return got
+ }
+ } else if want != "" {
+ return fmt.Sprintf("want error %q", want)
+ }
+ return ""
+}
+
+func TestRoundTrip(t *testing.T) {
+ for _, tc := range testCases {
+ p, err := saveEntity(testAppID, testKey0, tc.src)
+ if s := checkErr(tc.putErr, err); s != "" {
+ t.Errorf("%s: save: %s", tc.desc, s)
+ continue
+ }
+ if p == nil {
+ continue
+ }
+ var got interface{}
+ if _, ok := tc.want.(*PropertyList); ok {
+ got = new(PropertyList)
+ } else {
+ got = reflect.New(reflect.TypeOf(tc.want).Elem()).Interface()
+ }
+ err = loadEntity(got, p)
+ if s := checkErr(tc.getErr, err); s != "" {
+ t.Errorf("%s: load: %s", tc.desc, s)
+ continue
+ }
+ equal := false
+ if gotT, ok := got.(*T); ok {
+ // Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
+ // We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
+ equal = gotT.T.Equal(tc.want.(*T).T)
+ } else {
+ equal = reflect.DeepEqual(got, tc.want)
+ }
+ if !equal {
+ t.Errorf("%s: compare: got %v want %v", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func TestQueryConstruction(t *testing.T) {
+ tests := []struct {
+ q, exp *Query
+ err string
+ }{
+ {
+ q: NewQuery("Foo"),
+ exp: &Query{
+ kind: "Foo",
+ limit: -1,
+ },
+ },
+ {
+ // Regular filtered query with standard spacing.
+ q: NewQuery("Foo").Filter("foo >", 7),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterThan,
+ Value: 7,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with no spacing.
+ q: NewQuery("Foo").Filter("foo=", 6),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: equal,
+ Value: 6,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with funky spacing.
+ q: NewQuery("Foo").Filter(" foo< ", 8),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: lessThan,
+ Value: 8,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Filtered query with multicharacter op.
+ q: NewQuery("Foo").Filter("foo >=", 9),
+ exp: &Query{
+ kind: "Foo",
+ filter: []filter{
+ {
+ FieldName: "foo",
+ Op: greaterEq,
+ Value: 9,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with ordering.
+ q: NewQuery("Foo").Order("bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: ascending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with reverse ordering, and funky spacing.
+ q: NewQuery("Foo").Order(" - bar"),
+ exp: &Query{
+ kind: "Foo",
+ order: []order{
+ {
+ FieldName: "bar",
+ Direction: descending,
+ },
+ },
+ limit: -1,
+ },
+ },
+ {
+ // Query with an empty ordering.
+ q: NewQuery("Foo").Order(""),
+ err: "empty order",
+ },
+ {
+ // Query with a + ordering.
+ q: NewQuery("Foo").Order("+bar"),
+ err: "invalid order",
+ },
+ }
+ for i, test := range tests {
+ if test.q.err != nil {
+ got := test.q.err.Error()
+ if !strings.Contains(got, test.err) {
+ t.Errorf("%d: error mismatch: got %q want something containing %q", i, got, test.err)
+ }
+ continue
+ }
+ if !reflect.DeepEqual(test.q, test.exp) {
+ t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
+ }
+ }
+}
+
+func TestStringMeaning(t *testing.T) {
+ var xx [4]interface{}
+ xx[0] = &struct {
+ X string
+ }{"xx0"}
+ xx[1] = &struct {
+ X string `datastore:",noindex"`
+ }{"xx1"}
+ xx[2] = &struct {
+ X []byte
+ }{[]byte("xx2")}
+ xx[3] = &struct {
+ X []byte `datastore:",noindex"`
+ }{[]byte("xx3")}
+
+ indexed := [4]bool{
+ true,
+ false,
+ false, // A []byte is always no-index.
+ false,
+ }
+ want := [4]pb.Property_Meaning{
+ pb.Property_NO_MEANING,
+ pb.Property_TEXT,
+ pb.Property_BLOB,
+ pb.Property_BLOB,
+ }
+
+ for i, x := range xx {
+ props, err := SaveStruct(x)
+ if err != nil {
+ t.Errorf("i=%d: SaveStruct: %v", i, err)
+ continue
+ }
+ e, err := propertiesToProto("appID", testKey0, props)
+ if err != nil {
+ t.Errorf("i=%d: propertiesToProto: %v", i, err)
+ continue
+ }
+ var p *pb.Property
+ switch {
+ case indexed[i] && len(e.Property) == 1:
+ p = e.Property[0]
+ case !indexed[i] && len(e.RawProperty) == 1:
+ p = e.RawProperty[0]
+ default:
+ t.Errorf("i=%d: EntityProto did not have expected property slice", i)
+ continue
+ }
+ if got := p.GetMeaning(); got != want[i] {
+ t.Errorf("i=%d: meaning: got %v, want %v", i, got, want[i])
+ continue
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/doc.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/doc.go
new file mode 100644
index 000000000000..38164e936e6f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/doc.go
@@ -0,0 +1,316 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package datastore provides a client for App Engine's datastore service.
+
+
+Basic Operations
+
+Entities are the unit of storage and are associated with a key. A key
+consists of an optional parent key, a string application ID, a string kind
+(also known as an entity type), and either a StringID or an IntID. A
+StringID is also known as an entity name or key name.
+
+It is valid to create a key with a zero StringID and a zero IntID; this is
+called an incomplete key, and does not refer to any saved entity. Putting an
+entity into the datastore under an incomplete key will cause a unique key
+to be generated for that entity, with a non-zero IntID.
+
+An entity's contents are a mapping from case-sensitive field names to values.
+Valid value types are:
+ - signed integers (int, int8, int16, int32 and int64),
+ - bool,
+ - string,
+ - float32 and float64,
+ - []byte (up to 1 megabyte in length),
+ - any type whose underlying type is one of the above predeclared types,
+ - ByteString,
+ - *Key,
+ - time.Time (stored with microsecond precision),
+ - appengine.BlobKey,
+ - appengine.GeoPoint,
+ - structs whose fields are all valid value types,
+ - slices of any of the above.
+
+Slices of structs are valid, as are structs that contain slices. However, if
+one struct contains another, then at most one of those can be repeated. This
+disqualifies recursively defined struct types: any struct T that (directly or
+indirectly) contains a []T.
+
+The Get and Put functions load and save an entity's contents. An entity's
+contents are typically represented by a struct pointer.
+
+Example code:
+
+ type Entity struct {
+ Value string
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+
+ k := datastore.NewKey(c, "Entity", "stringID", 0, nil)
+ e := new(Entity)
+ if err := datastore.Get(c, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ old := e.Value
+ e.Value = r.URL.Path
+
+ if _, err := datastore.Put(c, k, e); err != nil {
+ http.Error(w, err.Error(), 500)
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "old=%q\nnew=%q\n", old, e.Value)
+ }
+
+GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
+Delete functions. They take a []*Key instead of a *Key, and may return an
+appengine.MultiError when encountering partial failure.
+
+
+Properties
+
+An entity's contents can be represented by a variety of types. These are
+typically struct pointers, but can also be any type that implements the
+PropertyLoadSaver interface. If using a struct pointer, you do not have to
+explicitly implement the PropertyLoadSaver interface; the datastore will
+automatically convert via reflection. If a struct pointer does implement that
+interface then those methods will be used in preference to the default
+behavior for struct pointers. Struct pointers are more strongly typed and are
+easier to use; PropertyLoadSavers are more flexible.
+
+The actual types passed do not have to match between Get and Put calls or even
+across different App Engine requests. It is valid to put a *PropertyList and
+get that same entity as a *myStruct, or put a *myStruct0 and get a *myStruct1.
+Conceptually, any entity is saved as a sequence of properties, and is loaded
+into the destination value on a property-by-property basis. When loading into
+a struct pointer, an entity that cannot be completely represented (such as a
+missing field) will result in an ErrFieldMismatch error but it is up to the
+caller whether this error is fatal, recoverable or ignorable.
+
+By default, for struct pointers, all properties are potentially indexed, and
+the property name is the same as the field name (and hence must start with an
+upper case letter). Fields may have a `datastore:"name,options"` tag. The tag
+name is the property name, which must be one or more valid Go identifiers
+joined by ".", but may start with a lower case letter. An empty tag name means
+to just use the field name. A "-" tag name means that the datastore will
+ignore that field. If options is "noindex" then the field will not be indexed.
+If the options is "" then the comma may be omitted. There are no other
+recognized options.
+
+Fields (except for []byte) are indexed by default. Strings longer than 500
+characters cannot be indexed; fields used to store long strings should be
+tagged with "noindex". Similarly, ByteStrings longer than 500 bytes cannot be
+indexed.
+
+Example code:
+
+ // A and B are renamed to a and b.
+ // A, C and J are not indexed.
+ // D's tag is equivalent to having no tag at all (E).
+ // I is ignored entirely by the datastore.
+ // J has tag information for both the datastore and json packages.
+ type TaggedStruct struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ }
+
+
+Structured Properties
+
+If the struct pointed to contains other structs, then the nested or embedded
+structs are flattened. For example, given these definitions:
+
+ type Inner1 struct {
+ W int32
+ X string
+ }
+
+ type Inner2 struct {
+ Y float64
+ }
+
+ type Inner3 struct {
+ Z bool
+ }
+
+ type Outer struct {
+ A int16
+ I []Inner1
+ J Inner2
+ Inner3
+ }
+
+then an Outer's properties would be equivalent to those of:
+
+ type OuterEquivalent struct {
+ A int16
+ IDotW []int32 `datastore:"I.W"`
+ IDotX []string `datastore:"I.X"`
+ JDotY float64 `datastore:"J.Y"`
+ Z bool
+ }
+
+If Outer's embedded Inner3 field was tagged as `datastore:"Foo"` then the
+equivalent field would instead be: FooDotZ bool `datastore:"Foo.Z"`.
+
+If an outer struct is tagged "noindex" then all of its implicit flattened
+fields are effectively "noindex".
+
+
+The PropertyLoadSaver Interface
+
+An entity's contents can also be represented by any type that implements the
+PropertyLoadSaver interface. This type may be a struct pointer, but it does
+not have to be. The datastore package will call Load when getting the entity's
+contents, and Save when putting the entity's contents.
+Possible uses include deriving non-stored fields, verifying fields, or indexing
+a field only if its value is positive.
+
+Example code:
+
+ type CustomPropsExample struct {
+ I, J int
+ // Sum is not stored, but should always be equal to I + J.
+ Sum int `datastore:"-"`
+ }
+
+ func (x *CustomPropsExample) Load(c <-chan Property) error {
+ // Load I and J as usual.
+ if err := datastore.LoadStruct(x, c); err != nil {
+ return err
+ }
+ // Derive the Sum field.
+ x.Sum = x.I + x.J
+ return nil
+ }
+
+ func (x *CustomPropsExample) Save(c chan<- Property) error {
+ defer close(c)
+ // Validate the Sum field.
+ if x.Sum != x.I + x.J {
+ return errors.New("CustomPropsExample has inconsistent sum")
+ }
+ // Save I and J as usual. The code below is equivalent to calling
+ // "return datastore.SaveStruct(x, c)", but is done manually for
+ // demonstration purposes.
+ c <- datastore.Property{
+ Name: "I",
+ Value: int64(x.I),
+ }
+ c <- datastore.Property{
+ Name: "J",
+ Value: int64(x.J),
+ }
+ return nil
+ }
+
+The *PropertyList type implements PropertyLoadSaver, and can therefore hold an
+arbitrary entity's contents.
+
+
+Queries
+
+Queries retrieve entities based on their properties or key's ancestry. Running
+a query yields an iterator of results: either keys or (key, entity) pairs.
+Queries are re-usable and it is safe to call Query.Run from concurrent
+goroutines. Iterators are not safe for concurrent use.
+
+Queries are immutable, and are either created by calling NewQuery, or derived
+from an existing query by calling a method like Filter or Order that returns a
+new query value. A query is typically constructed by calling NewQuery followed
+by a chain of zero or more such methods. These methods are:
+ - Ancestor and Filter constrain the entities returned by running a query.
+ - Order affects the order in which they are returned.
+ - Project constrains the fields returned.
+ - Distinct de-duplicates projected entities.
+ - KeysOnly makes the iterator return only keys, not (key, entity) pairs.
+ - Start, End, Offset and Limit define which sub-sequence of matching entities
+ to return. Start and End take cursors, Offset and Limit take integers. Start
+ and Offset affect the first result, End and Limit affect the last result.
+ If both Start and Offset are set, then the offset is relative to Start.
+ If both End and Limit are set, then the earliest constraint wins. Limit is
+ relative to Start+Offset, not relative to End. As a special case, a
+ negative limit means unlimited.
+
+Example code:
+
+ type Widget struct {
+ Description string
+ Price int
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ q := datastore.NewQuery("Widget").
+ Filter("Price <", 1000).
+ Order("-Price")
+ b := new(bytes.Buffer)
+ for t := q.Run(c); ; {
+ var x Widget
+ key, err := t.Next(&x)
+ if err == datastore.Done {
+ break
+ }
+ if err != nil {
+ serveError(c, w, err)
+ return
+ }
+ fmt.Fprintf(b, "Key=%v\nWidget=%#v\n\n", key, x)
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ io.Copy(w, b)
+ }
+
+
+Transactions
+
+RunInTransaction runs a function in a transaction.
+
+Example code:
+
+ type Counter struct {
+ Count int
+ }
+
+ func inc(c appengine.Context, key *datastore.Key) (int, error) {
+ var x Counter
+ if err := datastore.Get(c, key, &x); err != nil && err != datastore.ErrNoSuchEntity {
+ return 0, err
+ }
+ x.Count++
+ if _, err := datastore.Put(c, key, &x); err != nil {
+ return 0, err
+ }
+ return x.Count, nil
+ }
+
+ func handle(w http.ResponseWriter, r *http.Request) {
+ c := appengine.NewContext(r)
+ var count int
+ err := datastore.RunInTransaction(c, func(c appengine.Context) error {
+ var err1 error
+ count, err1 = inc(c, datastore.NewKey(c, "Counter", "singleton", 0, nil))
+ return err1
+ }, nil)
+ if err != nil {
+ serveError(c, w, err)
+ return
+ }
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ fmt.Fprintf(w, "Count=%d", count)
+ }
+*/
+package datastore // import "google.golang.org/appengine/datastore"
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/key.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/key.go
new file mode 100644
index 000000000000..77dd9e03bea3
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/key.go
@@ -0,0 +1,309 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/base64"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "strconv"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+// Key represents the datastore key for a stored entity, and is immutable.
+type Key struct {
+ kind string
+ stringID string
+ intID int64
+ parent *Key
+ appID string
+ namespace string
+}
+
+// Kind returns the key's kind (also known as entity type).
+func (k *Key) Kind() string {
+ return k.kind
+}
+
+// StringID returns the key's string ID (also known as an entity name or key
+// name), which may be "".
+func (k *Key) StringID() string {
+ return k.stringID
+}
+
+// IntID returns the key's integer ID, which may be 0.
+func (k *Key) IntID() int64 {
+ return k.intID
+}
+
+// Parent returns the key's parent key, which may be nil.
+func (k *Key) Parent() *Key {
+ return k.parent
+}
+
+// AppID returns the key's application ID.
+func (k *Key) AppID() string {
+ return k.appID
+}
+
+// Namespace returns the key's namespace.
+func (k *Key) Namespace() string {
+ return k.namespace
+}
+
+// Incomplete returns whether the key does not refer to a stored entity.
+// In particular, whether the key has a zero StringID and a zero IntID.
+func (k *Key) Incomplete() bool {
+ return k.stringID == "" && k.intID == 0
+}
+
+// valid returns whether the key is valid.
+func (k *Key) valid() bool {
+ if k == nil {
+ return false
+ }
+ for ; k != nil; k = k.parent {
+ if k.kind == "" || k.appID == "" {
+ return false
+ }
+ if k.stringID != "" && k.intID != 0 {
+ return false
+ }
+ if k.parent != nil {
+ if k.parent.Incomplete() {
+ return false
+ }
+ if k.parent.appID != k.appID || k.parent.namespace != k.namespace {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// Equal returns whether two keys are equal.
+func (k *Key) Equal(o *Key) bool {
+ for k != nil && o != nil {
+ if k.kind != o.kind || k.stringID != o.stringID || k.intID != o.intID || k.appID != o.appID || k.namespace != o.namespace {
+ return false
+ }
+ k, o = k.parent, o.parent
+ }
+ return k == o
+}
+
+// root returns the furthest ancestor of a key, which may be itself.
+func (k *Key) root() *Key {
+ for k.parent != nil {
+ k = k.parent
+ }
+ return k
+}
+
+// marshal marshals the key's string representation to the buffer.
+func (k *Key) marshal(b *bytes.Buffer) {
+ if k.parent != nil {
+ k.parent.marshal(b)
+ }
+ b.WriteByte('/')
+ b.WriteString(k.kind)
+ b.WriteByte(',')
+ if k.stringID != "" {
+ b.WriteString(k.stringID)
+ } else {
+ b.WriteString(strconv.FormatInt(k.intID, 10))
+ }
+}
+
+// String returns a string representation of the key.
+func (k *Key) String() string {
+ if k == nil {
+ return ""
+ }
+ b := bytes.NewBuffer(make([]byte, 0, 512))
+ k.marshal(b)
+ return b.String()
+}
+
+type gobKey struct {
+ Kind string
+ StringID string
+ IntID int64
+ Parent *gobKey
+ AppID string
+ Namespace string
+}
+
+func keyToGobKey(k *Key) *gobKey {
+ if k == nil {
+ return nil
+ }
+ return &gobKey{
+ Kind: k.kind,
+ StringID: k.stringID,
+ IntID: k.intID,
+ Parent: keyToGobKey(k.parent),
+ AppID: k.appID,
+ Namespace: k.namespace,
+ }
+}
+
+func gobKeyToKey(gk *gobKey) *Key {
+ if gk == nil {
+ return nil
+ }
+ return &Key{
+ kind: gk.Kind,
+ stringID: gk.StringID,
+ intID: gk.IntID,
+ parent: gobKeyToKey(gk.Parent),
+ appID: gk.AppID,
+ namespace: gk.Namespace,
+ }
+}
+
+func (k *Key) GobEncode() ([]byte, error) {
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(keyToGobKey(k)); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func (k *Key) GobDecode(buf []byte) error {
+ gk := new(gobKey)
+ if err := gob.NewDecoder(bytes.NewBuffer(buf)).Decode(gk); err != nil {
+ return err
+ }
+ *k = *gobKeyToKey(gk)
+ return nil
+}
+
+func (k *Key) MarshalJSON() ([]byte, error) {
+ return []byte(`"` + k.Encode() + `"`), nil
+}
+
+func (k *Key) UnmarshalJSON(buf []byte) error {
+ if len(buf) < 2 || buf[0] != '"' || buf[len(buf)-1] != '"' {
+ return errors.New("datastore: bad JSON key")
+ }
+ k2, err := DecodeKey(string(buf[1 : len(buf)-1]))
+ if err != nil {
+ return err
+ }
+ *k = *k2
+ return nil
+}
+
+// Encode returns an opaque representation of the key
+// suitable for use in HTML and URLs.
+// This is compatible with the Python and Java runtimes.
+func (k *Key) Encode() string {
+ ref := keyToProto("", k)
+
+ b, err := proto.Marshal(ref)
+ if err != nil {
+ panic(err)
+ }
+
+ // Trailing padding is stripped.
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// DecodeKey decodes a key from the opaque representation returned by Encode.
+func DecodeKey(encoded string) (*Key, error) {
+ // Re-add padding.
+ if m := len(encoded) % 4; m != 0 {
+ encoded += strings.Repeat("=", 4-m)
+ }
+
+ b, err := base64.URLEncoding.DecodeString(encoded)
+ if err != nil {
+ return nil, err
+ }
+
+ ref := new(pb.Reference)
+ if err := proto.Unmarshal(b, ref); err != nil {
+ return nil, err
+ }
+
+ return protoToKey(ref)
+}
+
+// NewIncompleteKey creates a new incomplete key.
+// kind cannot be empty.
+func NewIncompleteKey(c appengine.Context, kind string, parent *Key) *Key {
+ return NewKey(c, kind, "", 0, parent)
+}
+
+// NewKey creates a new key.
+// kind cannot be empty.
+// Either one or both of stringID and intID must be zero. If both are zero,
+// the key returned is incomplete.
+// parent must either be a complete key or nil.
+func NewKey(c appengine.Context, kind, stringID string, intID int64, parent *Key) *Key {
+ // If there's a parent key, use its namespace.
+ // Otherwise, do a fake RPC to try to get a namespace if c is a namespacedContext (or wraps one).
+ var namespace string
+ if parent != nil {
+ namespace = parent.namespace
+ } else {
+ namespace = internal.VirtAPI(c, "GetNamespace")
+ }
+
+ return &Key{
+ kind: kind,
+ stringID: stringID,
+ intID: intID,
+ parent: parent,
+ appID: c.FullyQualifiedAppID(),
+ namespace: namespace,
+ }
+}
+
+// AllocateIDs returns a range of n integer IDs with the given kind and parent
+// combination. kind cannot be empty; parent may be nil. The IDs in the range
+// returned will not be used by the datastore's automatic ID sequence generator
+// and may be used with NewKey without conflict.
+//
+// The range is inclusive at the low end and exclusive at the high end. In
+// other words, valid intIDs x satisfy low <= x && x < high.
+//
+// If no error is returned, low + n == high.
+func AllocateIDs(c appengine.Context, kind string, parent *Key, n int) (low, high int64, err error) {
+ if kind == "" {
+ return 0, 0, errors.New("datastore: AllocateIDs given an empty kind")
+ }
+ if n < 0 {
+ return 0, 0, fmt.Errorf("datastore: AllocateIDs given a negative count: %d", n)
+ }
+ if n == 0 {
+ return 0, 0, nil
+ }
+ req := &pb.AllocateIdsRequest{
+ ModelKey: keyToProto("", NewIncompleteKey(c, kind, parent)),
+ Size: proto.Int64(int64(n)),
+ }
+ res := &pb.AllocateIdsResponse{}
+ if err := c.Call("datastore_v3", "AllocateIds", req, res, nil); err != nil {
+ return 0, 0, err
+ }
+ // The protobuf is inclusive at both ends. Idiomatic Go (e.g. slices, for loops)
+ // is inclusive at the low end and exclusive at the high end, so we add 1.
+ low = res.GetStart()
+ high = res.GetEnd() + 1
+ if low+int64(n) != high {
+ return 0, 0, fmt.Errorf("datastore: internal error: could not allocate %d IDs", n)
+ }
+ return low, high, nil
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/key_test.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/key_test.go
new file mode 100644
index 000000000000..0944983ea783
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/key_test.go
@@ -0,0 +1,214 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+)
+
+func TestKeyEncoding(t *testing.T) {
+ testCases := []struct {
+ desc string
+ key *Key
+ exp string
+ }{
+ {
+ desc: "A simple key with an int ID",
+ key: &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIMCxIGUGVyc29uGAEM",
+ },
+ {
+ desc: "A simple key with a string ID",
+ key: &Key{
+ kind: "Graph",
+ stringID: "graph:7-day-active",
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIdCxIFR3JhcGgiEmdyYXBoOjctZGF5LWFjdGl2ZQw",
+ },
+ {
+ desc: "A key with a parent",
+ key: &Key{
+ kind: "WordIndex",
+ intID: 1033,
+ parent: &Key{
+ kind: "WordIndex",
+ intID: 1020032,
+ appID: "glibrary",
+ },
+ appID: "glibrary",
+ },
+ exp: "aghnbGlicmFyeXIhCxIJV29yZEluZGV4GIChPgwLEglXb3JkSW5kZXgYiQgM",
+ },
+ }
+ for _, tc := range testCases {
+ enc := tc.key.Encode()
+ if enc != tc.exp {
+ t.Errorf("%s: got %q, want %q", tc.desc, enc, tc.exp)
+ }
+
+ key, err := DecodeKey(tc.exp)
+ if err != nil {
+ t.Errorf("%s: failed decoding key: %v", tc.desc, err)
+ continue
+ }
+ if !key.Equal(tc.key) {
+ t.Errorf("%s: decoded key %v, want %v", tc.desc, key, tc.key)
+ }
+ }
+}
+
+func TestKeyGob(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 3,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(k); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ k2 := new(Key)
+ if err := gob.NewDecoder(buf).Decode(k2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("gob round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyGob(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(s1); err != nil {
+ t.Fatalf("gob encode failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := gob.NewDecoder(buf).Decode(s2); err != nil {
+ t.Fatalf("gob decode failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("gob round trip of nil key produced %v", s2.Key)
+ }
+}
+
+func TestKeyJSON(t *testing.T) {
+ k := &Key{
+ kind: "Gopher",
+ intID: 2,
+ parent: &Key{
+ kind: "Mom",
+ stringID: "narwhal",
+ appID: "gopher-con",
+ },
+ appID: "gopher-con",
+ }
+ exp := `"` + k.Encode() + `"`
+
+ buf, err := json.Marshal(k)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+ if s := string(buf); s != exp {
+ t.Errorf("JSON encoding of key %v: got %q, want %q", k, s, exp)
+ }
+
+ k2 := new(Key)
+ if err := json.Unmarshal(buf, k2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if !k2.Equal(k) {
+ t.Errorf("JSON round trip of %v produced %v", k, k2)
+ }
+}
+
+func TestNilKeyJSON(t *testing.T) {
+ type S struct {
+ Key *Key
+ }
+ s1 := new(S)
+
+ buf, err := json.Marshal(s1)
+ if err != nil {
+ t.Fatalf("json.Marshal failed: %v", err)
+ }
+
+ s2 := new(S)
+ if err := json.Unmarshal(buf, s2); err != nil {
+ t.Fatalf("json.Unmarshal failed: %v", err)
+ }
+ if s2.Key != nil {
+ t.Errorf("JSON round trip of nil key produced %v", s2.Key)
+ }
+}
+
+type fakeKeyer struct {
+ appengine.Context
+}
+
+func (fakeKeyer) FullyQualifiedAppID() string { return "s~some-app" }
+func (fakeKeyer) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ return nil
+}
+
+func TestIncompleteKeyWithParent(t *testing.T) {
+ var c appengine.Context = fakeKeyer{}
+
+ // fadduh is a complete key.
+ fadduh := NewKey(c, "Person", "", 1, nil)
+ if fadduh.Incomplete() {
+ t.Fatalf("fadduh is incomplete")
+ }
+
+ // robert is an incomplete key with fadduh as a parent.
+ robert := NewIncompleteKey(c, "Person", fadduh)
+ if !robert.Incomplete() {
+ t.Fatalf("robert is complete")
+ }
+
+ // Both should be valid keys.
+ if !fadduh.valid() {
+ t.Errorf("fadduh is invalid: %v", fadduh)
+ }
+ if !robert.valid() {
+ t.Errorf("robert is invalid: %v", robert)
+ }
+}
+
+func TestNamespace(t *testing.T) {
+ key := &Key{
+ kind: "Person",
+ intID: 1,
+ appID: "s~some-app",
+ namespace: "mynamespace",
+ }
+ if g, w := key.Namespace(), "mynamespace"; g != w {
+ t.Errorf("key.Namespace() = %q, want %q", g, w)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/load.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/load.go
new file mode 100644
index 000000000000..3f3c80c36d5e
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/load.go
@@ -0,0 +1,334 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "time"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ typeOfBlobKey = reflect.TypeOf(appengine.BlobKey(""))
+ typeOfByteSlice = reflect.TypeOf([]byte(nil))
+ typeOfByteString = reflect.TypeOf(ByteString(nil))
+ typeOfGeoPoint = reflect.TypeOf(appengine.GeoPoint{})
+ typeOfTime = reflect.TypeOf(time.Time{})
+)
+
+// typeMismatchReason returns a string explaining why the property p could not
+// be stored in an entity field of type v.Type().
+func typeMismatchReason(p Property, v reflect.Value) string {
+ entityType := "empty"
+ switch p.Value.(type) {
+ case int64:
+ entityType = "int"
+ case bool:
+ entityType = "bool"
+ case string:
+ entityType = "string"
+ case float64:
+ entityType = "float"
+ case *Key:
+ entityType = "*datastore.Key"
+ case time.Time:
+ entityType = "time.Time"
+ case appengine.BlobKey:
+ entityType = "appengine.BlobKey"
+ case appengine.GeoPoint:
+ entityType = "appengine.GeoPoint"
+ case ByteString:
+ entityType = "datastore.ByteString"
+ case []byte:
+ entityType = "[]byte"
+ }
+ return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
+}
+
+type propertyLoader struct {
+ // m holds the number of times a substruct field like "Foo.Bar.Baz" has
+ // been seen so far. The map is constructed lazily.
+ m map[string]int
+}
+
+func (l *propertyLoader) load(codec *structCodec, structValue reflect.Value, p Property, requireSlice bool) string {
+ var v reflect.Value
+ // Traverse a struct's struct-typed fields.
+ for name := p.Name; ; {
+ decoder, ok := codec.byName[name]
+ if !ok {
+ return "no such struct field"
+ }
+ v = structValue.Field(decoder.index)
+ if !v.IsValid() {
+ return "no such struct field"
+ }
+ if !v.CanSet() {
+ return "cannot set struct field"
+ }
+
+ if decoder.substructCodec == nil {
+ break
+ }
+
+ if v.Kind() == reflect.Slice {
+ if l.m == nil {
+ l.m = make(map[string]int)
+ }
+ index := l.m[p.Name]
+ l.m[p.Name] = index + 1
+ for v.Len() <= index {
+ v.Set(reflect.Append(v, reflect.New(v.Type().Elem()).Elem()))
+ }
+ structValue = v.Index(index)
+ requireSlice = false
+ } else {
+ structValue = v
+ }
+ // Strip the "I." from "I.X".
+ name = name[len(codec.byIndex[decoder.index].name):]
+ codec = decoder.substructCodec
+ }
+
+ var slice reflect.Value
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ slice = v
+ v = reflect.New(v.Type().Elem()).Elem()
+ } else if requireSlice {
+ return "multiple-valued property requires a slice field type"
+ }
+
+ // Convert indexValues to a Go value with a meaning derived from the
+ // destination type.
+ pValue := p.Value
+ if iv, ok := pValue.(indexValue); ok {
+ meaning := pb.Property_NO_MEANING
+ switch v.Type() {
+ case typeOfBlobKey:
+ meaning = pb.Property_BLOBKEY
+ case typeOfByteSlice:
+ meaning = pb.Property_BLOB
+ case typeOfByteString:
+ meaning = pb.Property_BYTESTRING
+ case typeOfGeoPoint:
+ meaning = pb.Property_GEORSS_POINT
+ case typeOfTime:
+ meaning = pb.Property_GD_WHEN
+ }
+ var err error
+ pValue, err = propValue(iv.value, meaning)
+ if err != nil {
+ return err.Error()
+ }
+ }
+
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ x, ok := pValue.(int64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowInt(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetInt(x)
+ case reflect.Bool:
+ x, ok := pValue.(bool)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBool(x)
+ case reflect.String:
+ switch x := pValue.(type) {
+ case appengine.BlobKey:
+ v.SetString(string(x))
+ case ByteString:
+ v.SetString(string(x))
+ case string:
+ v.SetString(x)
+ default:
+ if pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ }
+ case reflect.Float32, reflect.Float64:
+ x, ok := pValue.(float64)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.OverflowFloat(x) {
+ return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
+ }
+ v.SetFloat(x)
+ case reflect.Ptr:
+ x, ok := pValue.(*Key)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if _, ok := v.Interface().(*Key); !ok {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case reflect.Struct:
+ switch v.Type() {
+ case typeOfTime:
+ x, ok := pValue.(time.Time)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ case typeOfGeoPoint:
+ x, ok := pValue.(appengine.GeoPoint)
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ v.Set(reflect.ValueOf(x))
+ default:
+ return typeMismatchReason(p, v)
+ }
+ case reflect.Slice:
+ x, ok := pValue.([]byte)
+ if !ok {
+ if y, yok := pValue.(ByteString); yok {
+ x, ok = []byte(y), true
+ }
+ }
+ if !ok && pValue != nil {
+ return typeMismatchReason(p, v)
+ }
+ if v.Type().Elem().Kind() != reflect.Uint8 {
+ return typeMismatchReason(p, v)
+ }
+ v.SetBytes(x)
+ default:
+ return typeMismatchReason(p, v)
+ }
+ if slice.IsValid() {
+ slice.Set(reflect.Append(slice, v))
+ }
+ return ""
+}
+
+// loadEntity loads an EntityProto into PropertyLoadSaver or struct pointer.
+func loadEntity(dst interface{}, src *pb.EntityProto) (err error) {
+ props, err := protoToProperties(src)
+ if err != nil {
+ return err
+ }
+ if e, ok := dst.(PropertyLoadSaver); ok {
+ return e.Load(props)
+ }
+ return LoadStruct(dst, props)
+}
+
+func (s structPLS) Load(props []Property) error {
+ var fieldName, reason string
+ var l propertyLoader
+ for _, p := range props {
+ if errStr := l.load(s.codec, s.v, p, p.Multiple); errStr != "" {
+ // We don't return early, as we try to load as many properties as possible.
+ // It is valid to load an entity into a struct that cannot fully represent it.
+ // That case returns an error, but the caller is free to ignore it.
+ fieldName, reason = p.Name, errStr
+ }
+ }
+ if reason != "" {
+ return &ErrFieldMismatch{
+ StructType: s.v.Type(),
+ FieldName: fieldName,
+ Reason: reason,
+ }
+ }
+ return nil
+}
+
+func protoToProperties(src *pb.EntityProto) ([]Property, error) {
+ props, rawProps := src.Property, src.RawProperty
+ out := make([]Property, 0, len(props)+len(rawProps))
+ for {
+ var (
+ x *pb.Property
+ noIndex bool
+ )
+ if len(props) > 0 {
+ x, props = props[0], props[1:]
+ } else if len(rawProps) > 0 {
+ x, rawProps = rawProps[0], rawProps[1:]
+ noIndex = true
+ } else {
+ break
+ }
+
+ var value interface{}
+ if x.Meaning != nil && *x.Meaning == pb.Property_INDEX_VALUE {
+ value = indexValue{x.Value}
+ } else {
+ var err error
+ value, err = propValue(x.Value, x.GetMeaning())
+ if err != nil {
+ return nil, err
+ }
+ }
+ out = append(out, Property{
+ Name: x.GetName(),
+ Value: value,
+ NoIndex: noIndex,
+ Multiple: x.GetMultiple(),
+ })
+ }
+ return out, nil
+}
+
+// propValue returns a Go value that combines the raw PropertyValue with a
+// meaning. For example, an Int64Value with GD_WHEN becomes a time.Time.
+func propValue(v *pb.PropertyValue, m pb.Property_Meaning) (interface{}, error) {
+ switch {
+ case v.Int64Value != nil:
+ if m == pb.Property_GD_WHEN {
+ return fromUnixMicro(*v.Int64Value), nil
+ } else {
+ return *v.Int64Value, nil
+ }
+ case v.BooleanValue != nil:
+ return *v.BooleanValue, nil
+ case v.StringValue != nil:
+ if m == pb.Property_BLOB {
+ return []byte(*v.StringValue), nil
+ } else if m == pb.Property_BLOBKEY {
+ return appengine.BlobKey(*v.StringValue), nil
+ } else if m == pb.Property_BYTESTRING {
+ return ByteString(*v.StringValue), nil
+ } else {
+ return *v.StringValue, nil
+ }
+ case v.DoubleValue != nil:
+ return *v.DoubleValue, nil
+ case v.Referencevalue != nil:
+ key, err := referenceValueToKey(v.Referencevalue)
+ if err != nil {
+ return nil, err
+ }
+ return key, nil
+ case v.Pointvalue != nil:
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ return appengine.GeoPoint{Lat: v.Pointvalue.GetX(), Lng: v.Pointvalue.GetY()}, nil
+ }
+ return nil, nil
+}
+
+// indexValue is a Property value that is created when entities are loaded from
+// an index, such as from a projection query.
+//
+// Such Property values do not contain all of the metadata required to be
+// faithfully represented as a Go value, and are instead represented as an
+// opaque indexValue. Load the properties into a concrete struct type (e.g. by
+// passing a struct pointer to Iterator.Next) to reconstruct actual Go values
+// of type int, string, time.Time, etc.
+type indexValue struct {
+ value *pb.PropertyValue
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop.go
new file mode 100644
index 000000000000..b0a1e0fd8dff
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop.go
@@ -0,0 +1,294 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "fmt"
+ "reflect"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// Entities with more than this many indexed properties will not be saved.
+const maxIndexedProperties = 5000
+
+// []byte fields more than 1 megabyte long will not be loaded or saved.
+const maxBlobLen = 1 << 20
+
+// Property is a name/value pair plus some metadata. A datastore entity's
+// contents are loaded and saved as a sequence of Properties. An entity can
+// have multiple Properties with the same name, provided that p.Multiple is
+// true on all of that entity's Properties with that name.
+type Property struct {
+ // Name is the property name.
+ Name string
+ // Value is the property value. The valid types are:
+ // - int64
+ // - bool
+ // - string
+ // - float64
+ // - ByteString
+ // - *Key
+ // - time.Time
+ // - appengine.BlobKey
+ // - appengine.GeoPoint
+ // - []byte (up to 1 megabyte in length)
+ // This set is smaller than the set of valid struct field types that the
+ // datastore can load and save. A Property Value cannot be a slice (apart
+ // from []byte); use multiple Properties instead. Also, a Value's type
+ // must be explicitly on the list above; it is not sufficient for the
+ // underlying type to be on that list. For example, a Value of "type
+ // myInt64 int64" is invalid. Smaller-width integers and floats are also
+ // invalid. Again, this is more restrictive than the set of valid struct
+ // field types.
+ //
+ // A Value will have an opaque type when loading entities from an index,
+ // such as via a projection query. Load entities into a struct instead
+ // of a PropertyLoadSaver when using a projection query.
+ //
+ // A Value may also be the nil interface value; this is equivalent to
+ // Python's None but not directly representable by a Go struct. Loading
+ // a nil-valued property into a struct will set that field to the zero
+ // value.
+ Value interface{}
+ // NoIndex is whether the datastore cannot index this property.
+ NoIndex bool
+ // Multiple is whether the entity can have multiple properties with
+ // the same name. Even if a particular instance only has one property with
+ // a certain name, Multiple should be true if a struct would best represent
+ // it as a field of type []T instead of type T.
+ Multiple bool
+}
+
+// ByteString is a short byte slice (up to 500 bytes) that can be indexed.
+type ByteString []byte
+
+// PropertyLoadSaver can be converted from and to a slice of Properties.
+type PropertyLoadSaver interface {
+ Load([]Property) error
+ Save() ([]Property, error)
+}
+
+// PropertyList converts a []Property to implement PropertyLoadSaver.
+type PropertyList []Property
+
+var (
+ typeOfPropertyLoadSaver = reflect.TypeOf((*PropertyLoadSaver)(nil)).Elem()
+ typeOfPropertyList = reflect.TypeOf(PropertyList(nil))
+)
+
+// Load loads all of the provided properties into l.
+// It does not first reset *l to an empty slice.
+func (l *PropertyList) Load(p []Property) error {
+ *l = append(*l, p...)
+ return nil
+}
+
+// Save saves all of l's properties as a slice or Properties.
+func (l *PropertyList) Save() ([]Property, error) {
+ return *l, nil
+}
+
+// validPropertyName returns whether name consists of one or more valid Go
+// identifiers joined by ".".
+func validPropertyName(name string) bool {
+ if name == "" {
+ return false
+ }
+ for _, s := range strings.Split(name, ".") {
+ if s == "" {
+ return false
+ }
+ first := true
+ for _, c := range s {
+ if first {
+ first = false
+ if c != '_' && !unicode.IsLetter(c) {
+ return false
+ }
+ } else {
+ if c != '_' && !unicode.IsLetter(c) && !unicode.IsDigit(c) {
+ return false
+ }
+ }
+ }
+ }
+ return true
+}
+
+// structTag is the parsed `datastore:"name,options"` tag of a struct field.
+// If a field has no tag, or the tag has an empty name, then the structTag's
+// name is just the field name. A "-" name means that the datastore ignores
+// that field.
+type structTag struct {
+ name string
+ noIndex bool
+}
+
+// structCodec describes how to convert a struct to and from a sequence of
+// properties.
+type structCodec struct {
+ // byIndex gives the structTag for the i'th field.
+ byIndex []structTag
+ // byName gives the field codec for the structTag with the given name.
+ byName map[string]fieldCodec
+ // hasSlice is whether a struct or any of its nested or embedded structs
+ // has a slice-typed field (other than []byte).
+ hasSlice bool
+ // complete is whether the structCodec is complete. An incomplete
+ // structCodec may be encountered when walking a recursive struct.
+ complete bool
+}
+
+// fieldCodec is a struct field's index and, if that struct field's type is
+// itself a struct, that substruct's structCodec.
+type fieldCodec struct {
+ index int
+ substructCodec *structCodec
+}
+
+// structCodecs collects the structCodecs that have already been calculated.
+var (
+ structCodecsMutex sync.Mutex
+ structCodecs = make(map[reflect.Type]*structCodec)
+)
+
+// getStructCodec returns the structCodec for the given struct type.
+func getStructCodec(t reflect.Type) (*structCodec, error) {
+ structCodecsMutex.Lock()
+ defer structCodecsMutex.Unlock()
+ return getStructCodecLocked(t)
+}
+
+// getStructCodecLocked implements getStructCodec. The structCodecsMutex must
+// be held when calling this function.
+func getStructCodecLocked(t reflect.Type) (ret *structCodec, retErr error) {
+ c, ok := structCodecs[t]
+ if ok {
+ return c, nil
+ }
+ c = &structCodec{
+ byIndex: make([]structTag, t.NumField()),
+ byName: make(map[string]fieldCodec),
+ }
+
+ // Add c to the structCodecs map before we are sure it is good. If t is
+ // a recursive type, it needs to find the incomplete entry for itself in
+ // the map.
+ structCodecs[t] = c
+ defer func() {
+ if retErr != nil {
+ delete(structCodecs, t)
+ }
+ }()
+
+ for i := range c.byIndex {
+ f := t.Field(i)
+ name, opts := f.Tag.Get("datastore"), ""
+ if i := strings.Index(name, ","); i != -1 {
+ name, opts = name[:i], name[i+1:]
+ }
+ if name == "" {
+ if !f.Anonymous {
+ name = f.Name
+ }
+ } else if name == "-" {
+ c.byIndex[i] = structTag{name: name}
+ continue
+ } else if !validPropertyName(name) {
+ return nil, fmt.Errorf("datastore: struct tag has invalid property name: %q", name)
+ }
+
+ substructType, fIsSlice := reflect.Type(nil), false
+ switch f.Type.Kind() {
+ case reflect.Struct:
+ substructType = f.Type
+ case reflect.Slice:
+ if f.Type.Elem().Kind() == reflect.Struct {
+ substructType = f.Type.Elem()
+ }
+ fIsSlice = f.Type != typeOfByteSlice
+ c.hasSlice = c.hasSlice || fIsSlice
+ }
+
+ if substructType != nil && substructType != typeOfTime && substructType != typeOfGeoPoint {
+ if name != "" {
+ name = name + "."
+ }
+ sub, err := getStructCodecLocked(substructType)
+ if err != nil {
+ return nil, err
+ }
+ if !sub.complete {
+ return nil, fmt.Errorf("datastore: recursive struct: field %q", f.Name)
+ }
+ if fIsSlice && sub.hasSlice {
+ return nil, fmt.Errorf(
+ "datastore: flattening nested structs leads to a slice of slices: field %q", f.Name)
+ }
+ c.hasSlice = c.hasSlice || sub.hasSlice
+ for relName := range sub.byName {
+ absName := name + relName
+ if _, ok := c.byName[absName]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", absName)
+ }
+ c.byName[absName] = fieldCodec{index: i, substructCodec: sub}
+ }
+ } else {
+ if _, ok := c.byName[name]; ok {
+ return nil, fmt.Errorf("datastore: struct tag has repeated property name: %q", name)
+ }
+ c.byName[name] = fieldCodec{index: i}
+ }
+
+ c.byIndex[i] = structTag{
+ name: name,
+ noIndex: opts == "noindex",
+ }
+ }
+ c.complete = true
+ return c, nil
+}
+
+// structPLS adapts a struct to be a PropertyLoadSaver.
+type structPLS struct {
+ v reflect.Value
+ codec *structCodec
+}
+
+// newStructPLS returns a PropertyLoadSaver for the struct pointer p.
+func newStructPLS(p interface{}) (PropertyLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidEntityType
+ }
+ v = v.Elem()
+ codec, err := getStructCodec(v.Type())
+ if err != nil {
+ return nil, err
+ }
+ return structPLS{v, codec}, nil
+}
+
+// LoadStruct loads the properties from p to dst.
+// dst must be a struct pointer.
+func LoadStruct(dst interface{}, p []Property) error {
+ x, err := newStructPLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(p)
+}
+
+// SaveStruct returns the properties from src as a slice of Properties.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Property, error) {
+ x, err := newStructPLS(src)
+ if err != nil {
+ return nil, err
+ }
+ return x.Save()
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop_test.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop_test.go
new file mode 100644
index 000000000000..db2e54290ee5
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/prop_test.go
@@ -0,0 +1,559 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "google.golang.org/appengine"
+)
+
+func TestValidPropertyName(t *testing.T) {
+ testCases := []struct {
+ name string
+ want bool
+ }{
+ // Invalid names.
+ {"", false},
+ {"'", false},
+ {".", false},
+ {"..", false},
+ {".foo", false},
+ {"0", false},
+ {"00", false},
+ {"X.X.4.X.X", false},
+ {"\n", false},
+ {"\x00", false},
+ {"abc\xffz", false},
+ {"foo.", false},
+ {"foo..", false},
+ {"foo..bar", false},
+ {"☃", false},
+ {`"`, false},
+ // Valid names.
+ {"AB", true},
+ {"Abc", true},
+ {"X.X.X.X.X", true},
+ {"_", true},
+ {"_0", true},
+ {"a", true},
+ {"a_B", true},
+ {"f00", true},
+ {"f0o", true},
+ {"fo0", true},
+ {"foo", true},
+ {"foo.bar", true},
+ {"foo.bar.baz", true},
+ {"世界", true},
+ }
+ for _, tc := range testCases {
+ got := validPropertyName(tc.name)
+ if got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.name, got, tc.want)
+ }
+ }
+}
+
+func TestStructCodec(t *testing.T) {
+ type oStruct struct {
+ O int
+ }
+ type pStruct struct {
+ P int
+ Q int
+ }
+ type rStruct struct {
+ R int
+ S pStruct
+ T oStruct
+ oStruct
+ }
+ type uStruct struct {
+ U int
+ v int
+ }
+ oStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "O"},
+ },
+ byName: map[string]fieldCodec{
+ "O": {index: 0},
+ },
+ complete: true,
+ }
+ pStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "P"},
+ {name: "Q"},
+ },
+ byName: map[string]fieldCodec{
+ "P": {index: 0},
+ "Q": {index: 1},
+ },
+ complete: true,
+ }
+ rStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "R"},
+ {name: "S."},
+ {name: "T."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "R": {index: 0},
+ "S.P": {index: 1, substructCodec: pStructCodec},
+ "S.Q": {index: 1, substructCodec: pStructCodec},
+ "T.O": {index: 2, substructCodec: oStructCodec},
+ "O": {index: 3, substructCodec: oStructCodec},
+ },
+ complete: true,
+ }
+ uStructCodec := &structCodec{
+ byIndex: []structTag{
+ {name: "U"},
+ {name: "v"},
+ },
+ byName: map[string]fieldCodec{
+ "U": {index: 0},
+ "v": {index: 1},
+ },
+ complete: true,
+ }
+
+ testCases := []struct {
+ desc string
+ structValue interface{}
+ want *structCodec
+ }{
+ {
+ "oStruct",
+ oStruct{},
+ oStructCodec,
+ },
+ {
+ "pStruct",
+ pStruct{},
+ pStructCodec,
+ },
+ {
+ "rStruct",
+ rStruct{},
+ rStructCodec,
+ },
+ {
+ "uStruct",
+ uStruct{},
+ uStructCodec,
+ },
+ {
+ "non-basic fields",
+ struct {
+ B appengine.BlobKey
+ K *Key
+ T time.Time
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "B"},
+ {name: "K"},
+ {name: "T"},
+ },
+ byName: map[string]fieldCodec{
+ "B": {index: 0},
+ "K": {index: 1},
+ "T": {index: 2},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with ignored embed",
+ struct {
+ A int `datastore:"a,noindex"`
+ B int `datastore:"b"`
+ C int `datastore:",noindex"`
+ D int `datastore:""`
+ E int
+ I int `datastore:"-"`
+ J int `datastore:",noindex" json:"j"`
+ oStruct `datastore:"-"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "a", noIndex: true},
+ {name: "b", noIndex: false},
+ {name: "C", noIndex: true},
+ {name: "D", noIndex: false},
+ {name: "E", noIndex: false},
+ {name: "-", noIndex: false},
+ {name: "J", noIndex: true},
+ {name: "-", noIndex: false},
+ },
+ byName: map[string]fieldCodec{
+ "a": {index: 0},
+ "b": {index: 1},
+ "C": {index: 2},
+ "D": {index: 3},
+ "E": {index: 4},
+ "J": {index: 6},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported fields",
+ struct {
+ A int
+ b int
+ C int `datastore:"x"`
+ d int `datastore:"Y"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A"},
+ {name: "b"},
+ {name: "x"},
+ {name: "Y"},
+ },
+ byName: map[string]fieldCodec{
+ "A": {index: 0},
+ "b": {index: 1},
+ "x": {index: 2},
+ "Y": {index: 3},
+ },
+ complete: true,
+ },
+ },
+ {
+ "nested and embedded structs",
+ struct {
+ A int
+ B int
+ CC oStruct
+ DDD rStruct
+ oStruct
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A"},
+ {name: "B"},
+ {name: "CC."},
+ {name: "DDD."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "A": {index: 0},
+ "B": {index: 1},
+ "CC.O": {index: 2, substructCodec: oStructCodec},
+ "DDD.R": {index: 3, substructCodec: rStructCodec},
+ "DDD.S.P": {index: 3, substructCodec: rStructCodec},
+ "DDD.S.Q": {index: 3, substructCodec: rStructCodec},
+ "DDD.T.O": {index: 3, substructCodec: rStructCodec},
+ "DDD.O": {index: 3, substructCodec: rStructCodec},
+ "O": {index: 4, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "struct tags with nested and embedded structs",
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"w"`
+ C oStruct `datastore:"xx"`
+ D rStruct `datastore:"y"`
+ oStruct `datastore:"z"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "-"},
+ {name: "w"},
+ {name: "xx."},
+ {name: "y."},
+ {name: "z."},
+ },
+ byName: map[string]fieldCodec{
+ "w": {index: 1},
+ "xx.O": {index: 2, substructCodec: oStructCodec},
+ "y.R": {index: 3, substructCodec: rStructCodec},
+ "y.S.P": {index: 3, substructCodec: rStructCodec},
+ "y.S.Q": {index: 3, substructCodec: rStructCodec},
+ "y.T.O": {index: 3, substructCodec: rStructCodec},
+ "y.O": {index: 3, substructCodec: rStructCodec},
+ "z.O": {index: 4, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "unexported nested and embedded structs",
+ struct {
+ a int
+ B int
+ c uStruct
+ D uStruct
+ uStruct
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "a"},
+ {name: "B"},
+ {name: "c."},
+ {name: "D."},
+ {name: ""},
+ },
+ byName: map[string]fieldCodec{
+ "a": {index: 0},
+ "B": {index: 1},
+ "c.U": {index: 2, substructCodec: uStructCodec},
+ "c.v": {index: 2, substructCodec: uStructCodec},
+ "D.U": {index: 3, substructCodec: uStructCodec},
+ "D.v": {index: 3, substructCodec: uStructCodec},
+ "U": {index: 4, substructCodec: uStructCodec},
+ "v": {index: 4, substructCodec: uStructCodec},
+ },
+ complete: true,
+ },
+ },
+ {
+ "noindex nested struct",
+ struct {
+ A oStruct `datastore:",noindex"`
+ }{},
+ &structCodec{
+ byIndex: []structTag{
+ {name: "A.", noIndex: true},
+ },
+ byName: map[string]fieldCodec{
+ "A.O": {index: 0, substructCodec: oStructCodec},
+ },
+ complete: true,
+ },
+ },
+ }
+
+ for _, tc := range testCases {
+ got, err := getStructCodec(reflect.TypeOf(tc.structValue))
+ if err != nil {
+ t.Errorf("%s: getStructCodec: %v", tc.desc, err)
+ continue
+ }
+ if !reflect.DeepEqual(got, tc.want) {
+ t.Errorf("%s\ngot %v\nwant %v\n", tc.desc, got, tc.want)
+ continue
+ }
+ }
+}
+
+func TestRepeatedPropertyName(t *testing.T) {
+ good := []interface{}{
+ struct {
+ A int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"b"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"B"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"-"`
+ }{},
+ struct {
+ A int `datastore:"-"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"D"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ A int `datastore:"B"`
+ B int
+ }{},
+ struct {
+ A int
+ B int `datastore:"A"`
+ }{},
+ struct {
+ A int `datastore:"C"`
+ B int `datastore:"C"`
+ }{},
+ struct {
+ A int `datastore:"B"`
+ B int `datastore:"C"`
+ C int `datastore:"B"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func TestFlatteningNestedStructs(t *testing.T) {
+ type deepGood struct {
+ A struct {
+ B []struct {
+ C struct {
+ D int
+ }
+ }
+ }
+ }
+ type deepBad struct {
+ A struct {
+ B []struct {
+ C struct {
+ D []int
+ }
+ }
+ }
+ }
+ type iSay struct {
+ Tomato int
+ }
+ type youSay struct {
+ Tomato int
+ }
+ type tweedledee struct {
+ Dee int `datastore:"D"`
+ }
+ type tweedledum struct {
+ Dum int `datastore:"D"`
+ }
+
+ good := []interface{}{
+ struct {
+ X []struct {
+ Y string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []byte
+ }
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ }{},
+ struct {
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ P []int
+ X struct {
+ Y []int
+ }
+ Q []int
+ }{},
+ struct {
+ deepGood
+ }{},
+ struct {
+ DG deepGood
+ }{},
+ struct {
+ Foo struct {
+ Z int `datastore:"X"`
+ } `datastore:"A"`
+ Bar struct {
+ Z int `datastore:"Y"`
+ } `datastore:"A"`
+ }{},
+ }
+ bad := []interface{}{
+ struct {
+ X []struct {
+ Y []string
+ }
+ }{},
+ struct {
+ X []struct {
+ Y []int
+ }
+ }{},
+ struct {
+ deepBad
+ }{},
+ struct {
+ DB deepBad
+ }{},
+ struct {
+ iSay
+ youSay
+ }{},
+ struct {
+ tweedledee
+ tweedledum
+ }{},
+ struct {
+ Foo struct {
+ Z int
+ } `datastore:"A"`
+ Bar struct {
+ Z int
+ } `datastore:"A"`
+ }{},
+ }
+ testGetStructCodec(t, good, bad)
+}
+
+func testGetStructCodec(t *testing.T, good []interface{}, bad []interface{}) {
+ for _, x := range good {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err != nil {
+ t.Errorf("type %T: got non-nil error (%s), want nil", x, err)
+ }
+ }
+ for _, x := range bad {
+ if _, err := getStructCodec(reflect.TypeOf(x)); err == nil {
+ t.Errorf("type %T: got nil error, want non-nil", x)
+ }
+ }
+}
+
+func TestNilKeyIsStored(t *testing.T) {
+ x := struct {
+ K *Key
+ I int
+ }{}
+ p := PropertyList{}
+ // Save x as properties.
+ p1, _ := SaveStruct(&x)
+ p.Load(p1)
+ // Set x's fields to non-zero.
+ x.K = &Key{}
+ x.I = 2
+ // Load x from properties.
+ p2, _ := p.Save()
+ LoadStruct(&x, p2)
+ // Check that x's fields were set to zero.
+ if x.K != nil {
+ t.Errorf("K field was not zero")
+ }
+ if x.I != 0 {
+ t.Errorf("I field was not zero")
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/query.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/query.go
new file mode 100644
index 000000000000..0448002c5ae6
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/query.go
@@ -0,0 +1,712 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "encoding/base64"
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+type operator int
+
+const (
+ lessThan operator = iota
+ lessEq
+ equal
+ greaterEq
+ greaterThan
+)
+
+var operatorToProto = map[operator]*pb.Query_Filter_Operator{
+ lessThan: pb.Query_Filter_LESS_THAN.Enum(),
+ lessEq: pb.Query_Filter_LESS_THAN_OR_EQUAL.Enum(),
+ equal: pb.Query_Filter_EQUAL.Enum(),
+ greaterEq: pb.Query_Filter_GREATER_THAN_OR_EQUAL.Enum(),
+ greaterThan: pb.Query_Filter_GREATER_THAN.Enum(),
+}
+
+// filter is a conditional filter on query results.
+type filter struct {
+ FieldName string
+ Op operator
+ Value interface{}
+}
+
+type sortDirection int
+
+const (
+ ascending sortDirection = iota
+ descending
+)
+
+var sortDirectionToProto = map[sortDirection]*pb.Query_Order_Direction{
+ ascending: pb.Query_Order_ASCENDING.Enum(),
+ descending: pb.Query_Order_DESCENDING.Enum(),
+}
+
+// order is a sort order on query results.
+type order struct {
+ FieldName string
+ Direction sortDirection
+}
+
+// NewQuery creates a new Query for a specific entity kind.
+//
+// An empty kind means to return all entities, including entities created and
+// managed by other App Engine features, and is called a kindless query.
+// Kindless queries cannot include filters or sort orders on property values.
+func NewQuery(kind string) *Query {
+ return &Query{
+ kind: kind,
+ limit: -1,
+ }
+}
+
+// Query represents a datastore query.
+type Query struct {
+ kind string
+ ancestor *Key
+ filter []filter
+ order []order
+ projection []string
+
+ distinct bool
+ keysOnly bool
+ eventual bool
+ limit int32
+ offset int32
+ start *pb.CompiledCursor
+ end *pb.CompiledCursor
+
+ err error
+}
+
+func (q *Query) clone() *Query {
+ x := *q
+ // Copy the contents of the slice-typed fields to a new backing store.
+ if len(q.filter) > 0 {
+ x.filter = make([]filter, len(q.filter))
+ copy(x.filter, q.filter)
+ }
+ if len(q.order) > 0 {
+ x.order = make([]order, len(q.order))
+ copy(x.order, q.order)
+ }
+ return &x
+}
+
+// Ancestor returns a derivative query with an ancestor filter.
+// The ancestor should not be nil.
+func (q *Query) Ancestor(ancestor *Key) *Query {
+ q = q.clone()
+ if ancestor == nil {
+ q.err = errors.New("datastore: nil query ancestor")
+ return q
+ }
+ q.ancestor = ancestor
+ return q
+}
+
+// EventualConsistency returns a derivative query that returns eventually
+// consistent results.
+// It only has an effect on ancestor queries.
+func (q *Query) EventualConsistency() *Query {
+ q = q.clone()
+ q.eventual = true
+ return q
+}
+
+// Filter returns a derivative query with a field-based filter.
+// The filterStr argument must be a field name followed by optional space,
+// followed by an operator, one of ">", "<", ">=", "<=", or "=".
+// Fields are compared against the provided value using the operator.
+// Multiple filters are AND'ed together.
+func (q *Query) Filter(filterStr string, value interface{}) *Query {
+ q = q.clone()
+ filterStr = strings.TrimSpace(filterStr)
+ if len(filterStr) < 1 {
+ q.err = errors.New("datastore: invalid filter: " + filterStr)
+ return q
+ }
+ f := filter{
+ FieldName: strings.TrimRight(filterStr, " ><=!"),
+ Value: value,
+ }
+ switch op := strings.TrimSpace(filterStr[len(f.FieldName):]); op {
+ case "<=":
+ f.Op = lessEq
+ case ">=":
+ f.Op = greaterEq
+ case "<":
+ f.Op = lessThan
+ case ">":
+ f.Op = greaterThan
+ case "=":
+ f.Op = equal
+ default:
+ q.err = fmt.Errorf("datastore: invalid operator %q in filter %q", op, filterStr)
+ return q
+ }
+ q.filter = append(q.filter, f)
+ return q
+}
+
+// Order returns a derivative query with a field-based sort order. Orders are
+// applied in the order they are added. The default order is ascending; to sort
+// in descending order prefix the fieldName with a minus sign (-).
+func (q *Query) Order(fieldName string) *Query {
+ q = q.clone()
+ fieldName = strings.TrimSpace(fieldName)
+ o := order{
+ Direction: ascending,
+ FieldName: fieldName,
+ }
+ if strings.HasPrefix(fieldName, "-") {
+ o.Direction = descending
+ o.FieldName = strings.TrimSpace(fieldName[1:])
+ } else if strings.HasPrefix(fieldName, "+") {
+ q.err = fmt.Errorf("datastore: invalid order: %q", fieldName)
+ return q
+ }
+ if len(o.FieldName) == 0 {
+ q.err = errors.New("datastore: empty order")
+ return q
+ }
+ q.order = append(q.order, o)
+ return q
+}
+
+// Project returns a derivative query that yields only the given fields. It
+// cannot be used with KeysOnly.
+func (q *Query) Project(fieldNames ...string) *Query {
+ q = q.clone()
+ q.projection = append([]string(nil), fieldNames...)
+ return q
+}
+
+// Distinct returns a derivative query that yields de-duplicated entities with
+// respect to the set of projected fields. It is only used for projection
+// queries.
+func (q *Query) Distinct() *Query {
+ q = q.clone()
+ q.distinct = true
+ return q
+}
+
+// KeysOnly returns a derivative query that yields only keys, not keys and
+// entities. It cannot be used with projection queries.
+func (q *Query) KeysOnly() *Query {
+ q = q.clone()
+ q.keysOnly = true
+ return q
+}
+
+// Limit returns a derivative query that has a limit on the number of results
+// returned. A negative value means unlimited.
+func (q *Query) Limit(limit int) *Query {
+ q = q.clone()
+ if limit < math.MinInt32 || limit > math.MaxInt32 {
+ q.err = errors.New("datastore: query limit overflow")
+ return q
+ }
+ q.limit = int32(limit)
+ return q
+}
+
+// Offset returns a derivative query that has an offset of how many keys to
+// skip over before returning results. A negative value is invalid.
+func (q *Query) Offset(offset int) *Query {
+ q = q.clone()
+ if offset < 0 {
+ q.err = errors.New("datastore: negative query offset")
+ return q
+ }
+ if offset > math.MaxInt32 {
+ q.err = errors.New("datastore: query offset overflow")
+ return q
+ }
+ q.offset = int32(offset)
+ return q
+}
+
+// Start returns a derivative query with the given start point.
+func (q *Query) Start(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.start = c.cc
+ return q
+}
+
+// End returns a derivative query with the given end point.
+func (q *Query) End(c Cursor) *Query {
+ q = q.clone()
+ if c.cc == nil {
+ q.err = errors.New("datastore: invalid cursor")
+ return q
+ }
+ q.end = c.cc
+ return q
+}
+
+// toProto converts the query to a protocol buffer.
+func (q *Query) toProto(dst *pb.Query, appID string) error {
+ if len(q.projection) != 0 && q.keysOnly {
+ return errors.New("datastore: query cannot both project and be keys-only")
+ }
+ dst.Reset()
+ dst.App = proto.String(appID)
+ if q.kind != "" {
+ dst.Kind = proto.String(q.kind)
+ }
+ if q.ancestor != nil {
+ dst.Ancestor = keyToProto(appID, q.ancestor)
+ if q.eventual {
+ dst.Strong = proto.Bool(false)
+ }
+ }
+ if q.projection != nil {
+ dst.PropertyName = q.projection
+ if q.distinct {
+ dst.GroupByPropertyName = q.projection
+ }
+ }
+ if q.keysOnly {
+ dst.KeysOnly = proto.Bool(true)
+ dst.RequirePerfectPlan = proto.Bool(true)
+ }
+ for _, qf := range q.filter {
+ if qf.FieldName == "" {
+ return errors.New("datastore: empty query filter field name")
+ }
+ p, errStr := valueToProto(appID, qf.FieldName, reflect.ValueOf(qf.Value), false)
+ if errStr != "" {
+ return errors.New("datastore: bad query filter value type: " + errStr)
+ }
+ xf := &pb.Query_Filter{
+ Op: operatorToProto[qf.Op],
+ Property: []*pb.Property{p},
+ }
+ if xf.Op == nil {
+ return errors.New("datastore: unknown query filter operator")
+ }
+ dst.Filter = append(dst.Filter, xf)
+ }
+ for _, qo := range q.order {
+ if qo.FieldName == "" {
+ return errors.New("datastore: empty query order field name")
+ }
+ xo := &pb.Query_Order{
+ Property: proto.String(qo.FieldName),
+ Direction: sortDirectionToProto[qo.Direction],
+ }
+ if xo.Direction == nil {
+ return errors.New("datastore: unknown query order direction")
+ }
+ dst.Order = append(dst.Order, xo)
+ }
+ if q.limit >= 0 {
+ dst.Limit = proto.Int32(q.limit)
+ }
+ if q.offset != 0 {
+ dst.Offset = proto.Int32(q.offset)
+ }
+ dst.CompiledCursor = q.start
+ dst.EndCompiledCursor = q.end
+ dst.Compile = proto.Bool(true)
+ return nil
+}
+
+// Count returns the number of results for the query.
+func (q *Query) Count(c appengine.Context) (int, error) {
+ // Check that the query is well-formed.
+ if q.err != nil {
+ return 0, q.err
+ }
+
+ // Run a copy of the query, with keysOnly true (if we're not a projection,
+ // since the two are incompatible), and an adjusted offset. We also set the
+ // limit to zero, as we don't want any actual entity data, just the number
+ // of skipped results.
+ newQ := q.clone()
+ newQ.keysOnly = len(newQ.projection) == 0
+ newQ.limit = 0
+ if q.limit < 0 {
+ // If the original query was unlimited, set the new query's offset to maximum.
+ newQ.offset = math.MaxInt32
+ } else {
+ newQ.offset = q.offset + q.limit
+ if newQ.offset < 0 {
+ // Do the best we can, in the presence of overflow.
+ newQ.offset = math.MaxInt32
+ }
+ }
+ req := &pb.Query{}
+ if err := newQ.toProto(req, c.FullyQualifiedAppID()); err != nil {
+ return 0, err
+ }
+ res := &pb.QueryResult{}
+ if err := c.Call("datastore_v3", "RunQuery", req, res, nil); err != nil {
+ return 0, err
+ }
+
+ // n is the count we will return. For example, suppose that our original
+ // query had an offset of 4 and a limit of 2008: the count will be 2008,
+ // provided that there are at least 2012 matching entities. However, the
+ // RPCs will only skip 1000 results at a time. The RPC sequence is:
+ // call RunQuery with (offset, limit) = (2012, 0) // 2012 == newQ.offset
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 1000
+ // call Next with (offset, limit) = (1012, 0) // 1012 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (1000, true)
+ // n += 1000 // n == 2000
+ // call Next with (offset, limit) = (12, 0) // 12 == newQ.offset - n
+ // response has (skippedResults, moreResults) = (12, false)
+ // n += 12 // n == 2012
+ // // exit the loop
+ // n -= 4 // n == 2008
+ var n int32
+ for {
+ // The QueryResult should have no actual entity data, just skipped results.
+ if len(res.Result) != 0 {
+ return 0, errors.New("datastore: internal error: Count request returned too much data")
+ }
+ n += res.GetSkippedResults()
+ if !res.GetMoreResults() {
+ break
+ }
+ if err := callNext(c, res, newQ.offset-n, 0); err != nil {
+ return 0, err
+ }
+ }
+ n -= q.offset
+ if n < 0 {
+ // If the offset was greater than the number of matching entities,
+ // return 0 instead of negative.
+ n = 0
+ }
+ return int(n), nil
+}
+
+// callNext issues a datastore_v3/Next RPC to advance a cursor, such as that
+// returned by a query with more results.
+func callNext(c appengine.Context, res *pb.QueryResult, offset, limit int32) error {
+ if res.Cursor == nil {
+ return errors.New("datastore: internal error: server did not return a cursor")
+ }
+ req := &pb.NextRequest{
+ Cursor: res.Cursor,
+ }
+ if limit >= 0 {
+ req.Count = proto.Int32(limit)
+ }
+ if offset != 0 {
+ req.Offset = proto.Int32(offset)
+ }
+ if res.CompiledCursor != nil {
+ req.Compile = proto.Bool(true)
+ }
+ res.Reset()
+ return c.Call("datastore_v3", "Next", req, res, nil)
+}
+
+// GetAll runs the query in the given context and returns all keys that match
+// that query, as well as appending the values to dst.
+//
+// dst must have type *[]S or *[]*S or *[]P, for some struct type S or some non-
+// interface, non-pointer type P such that P or *P implements PropertyLoadSaver.
+//
+// As a special case, *PropertyList is an invalid type for dst, even though a
+// PropertyList is a slice of structs. It is treated as invalid to avoid being
+// mistakenly passed when *[]PropertyList was intended.
+//
+// The keys returned by GetAll will be in a 1-1 correspondence with the entities
+// added to dst.
+//
+// If q is a ``keys-only'' query, GetAll ignores dst and only returns the keys.
+func (q *Query) GetAll(c appengine.Context, dst interface{}) ([]*Key, error) {
+ var (
+ dv reflect.Value
+ mat multiArgType
+ elemType reflect.Type
+ errFieldMismatch error
+ )
+ if !q.keysOnly {
+ dv = reflect.ValueOf(dst)
+ if dv.Kind() != reflect.Ptr || dv.IsNil() {
+ return nil, ErrInvalidEntityType
+ }
+ dv = dv.Elem()
+ mat, elemType = checkMultiArg(dv)
+ if mat == multiArgTypeInvalid || mat == multiArgTypeInterface {
+ return nil, ErrInvalidEntityType
+ }
+ }
+
+ var keys []*Key
+ for t := q.Run(c); ; {
+ k, e, err := t.next()
+ if err == Done {
+ break
+ }
+ if err != nil {
+ return keys, err
+ }
+ if !q.keysOnly {
+ ev := reflect.New(elemType)
+ if elemType.Kind() == reflect.Map {
+ // This is a special case. The zero values of a map type are
+ // not immediately useful; they have to be make'd.
+ //
+ // Funcs and channels are similar, in that a zero value is not useful,
+ // but even a freshly make'd channel isn't useful: there's no fixed
+ // channel buffer size that is always going to be large enough, and
+ // there's no goroutine to drain the other end. Theoretically, these
+ // types could be supported, for example by sniffing for a constructor
+ // method or requiring prior registration, but for now it's not a
+ // frequent enough concern to be worth it. Programmers can work around
+ // it by explicitly using Iterator.Next instead of the Query.GetAll
+ // convenience method.
+ x := reflect.MakeMap(elemType)
+ ev.Elem().Set(x)
+ }
+ if err = loadEntity(ev.Interface(), e); err != nil {
+ if _, ok := err.(*ErrFieldMismatch); ok {
+ // We continue loading entities even in the face of field mismatch errors.
+ // If we encounter any other error, that other error is returned. Otherwise,
+ // an ErrFieldMismatch is returned.
+ errFieldMismatch = err
+ } else {
+ return keys, err
+ }
+ }
+ if mat != multiArgTypeStructPtr {
+ ev = ev.Elem()
+ }
+ dv.Set(reflect.Append(dv, ev))
+ }
+ keys = append(keys, k)
+ }
+ return keys, errFieldMismatch
+}
+
+// Run runs the query in the given context.
+func (q *Query) Run(c appengine.Context) *Iterator {
+ if q.err != nil {
+ return &Iterator{err: q.err}
+ }
+ t := &Iterator{
+ c: c,
+ limit: q.limit,
+ q: q,
+ prevCC: q.start,
+ }
+ var req pb.Query
+ if err := q.toProto(&req, c.FullyQualifiedAppID()); err != nil {
+ t.err = err
+ return t
+ }
+ if err := c.Call("datastore_v3", "RunQuery", &req, &t.res, nil); err != nil {
+ t.err = err
+ return t
+ }
+ offset := q.offset - t.res.GetSkippedResults()
+ for offset > 0 && t.res.GetMoreResults() {
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, offset, t.limit); err != nil {
+ t.err = err
+ break
+ }
+ skip := t.res.GetSkippedResults()
+ if skip < 0 {
+ t.err = errors.New("datastore: internal error: negative number of skipped_results")
+ break
+ }
+ offset -= skip
+ }
+ if offset < 0 {
+ t.err = errors.New("datastore: internal error: query offset was overshot")
+ }
+ return t
+}
+
+// Iterator is the result of running a query.
+type Iterator struct {
+ c appengine.Context
+ err error
+ // res is the result of the most recent RunQuery or Next API call.
+ res pb.QueryResult
+ // i is how many elements of res.Result we have iterated over.
+ i int
+ // limit is the limit on the number of results this iterator should return.
+ // A negative value means unlimited.
+ limit int32
+ // q is the original query which yielded this iterator.
+ q *Query
+ // prevCC is the compiled cursor that marks the end of the previous batch
+ // of results.
+ prevCC *pb.CompiledCursor
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("datastore: query has no more results")
+
+// Next returns the key of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// If the query is not keys only and dst is non-nil, it also loads the entity
+// stored for that key into the struct pointer or PropertyLoadSaver dst, with
+// the same semantics and possible errors as for the Get function.
+func (t *Iterator) Next(dst interface{}) (*Key, error) {
+ k, e, err := t.next()
+ if err != nil {
+ return nil, err
+ }
+ if dst != nil && !t.q.keysOnly {
+ err = loadEntity(dst, e)
+ }
+ return k, err
+}
+
+func (t *Iterator) next() (*Key, *pb.EntityProto, error) {
+ if t.err != nil {
+ return nil, nil, t.err
+ }
+
+ // Issue datastore_v3/Next RPCs as necessary.
+ for t.i == len(t.res.Result) {
+ if !t.res.GetMoreResults() {
+ t.err = Done
+ return nil, nil, t.err
+ }
+ t.prevCC = t.res.CompiledCursor
+ if err := callNext(t.c, &t.res, 0, t.limit); err != nil {
+ t.err = err
+ return nil, nil, t.err
+ }
+ if t.res.GetSkippedResults() != 0 {
+ t.err = errors.New("datastore: internal error: iterator has skipped results")
+ return nil, nil, t.err
+ }
+ t.i = 0
+ if t.limit >= 0 {
+ t.limit -= int32(len(t.res.Result))
+ if t.limit < 0 {
+ t.err = errors.New("datastore: internal error: query returned more results than the limit")
+ return nil, nil, t.err
+ }
+ }
+ }
+
+ // Extract the key from the t.i'th element of t.res.Result.
+ e := t.res.Result[t.i]
+ t.i++
+ if e.Key == nil {
+ return nil, nil, errors.New("datastore: internal error: server did not return a key")
+ }
+ k, err := protoToKey(e.Key)
+ if err != nil || k.Incomplete() {
+ return nil, nil, errors.New("datastore: internal error: server returned an invalid key")
+ }
+ return k, e, nil
+}
+
+// Cursor returns a cursor for the iterator's current location.
+func (t *Iterator) Cursor() (Cursor, error) {
+ if t.err != nil && t.err != Done {
+ return Cursor{}, t.err
+ }
+ // If we are at either end of the current batch of results,
+ // return the compiled cursor at that end.
+ skipped := t.res.GetSkippedResults()
+ if t.i == 0 && skipped == 0 {
+ if t.prevCC == nil {
+ // A nil pointer (of type *pb.CompiledCursor) means no constraint:
+ // passing it as the end cursor of a new query means unlimited results
+ // (glossing over the integer limit parameter for now).
+ // A non-nil pointer to an empty pb.CompiledCursor means the start:
+ // passing it as the end cursor of a new query means 0 results.
+ // If prevCC was nil, then the original query had no start cursor, but
+ // Iterator.Cursor should return "the start" instead of unlimited.
+ return Cursor{&zeroCC}, nil
+ }
+ return Cursor{t.prevCC}, nil
+ }
+ if t.i == len(t.res.Result) {
+ return Cursor{t.res.CompiledCursor}, nil
+ }
+ // Otherwise, re-run the query offset to this iterator's position, starting from
+ // the most recent compiled cursor. This is done on a best-effort basis, as it
+ // is racy; if a concurrent process has added or removed entities, then the
+ // cursor returned may be inconsistent.
+ q := t.q.clone()
+ q.start = t.prevCC
+ q.offset = skipped + int32(t.i)
+ q.limit = 0
+ q.keysOnly = len(q.projection) == 0
+ t1 := q.Run(t.c)
+ _, _, err := t1.next()
+ if err != Done {
+ if err == nil {
+ err = fmt.Errorf("datastore: internal error: zero-limit query did not have zero results")
+ }
+ return Cursor{}, err
+ }
+ return Cursor{t1.res.CompiledCursor}, nil
+}
+
+var zeroCC pb.CompiledCursor
+
+// Cursor is an iterator's position. It can be converted to and from an opaque
+// string. A cursor can be used from different HTTP requests, but only with a
+// query with the same kind, ancestor, filter and order constraints.
+type Cursor struct {
+ cc *pb.CompiledCursor
+}
+
+// String returns a base-64 string representation of a cursor.
+func (c Cursor) String() string {
+ if c.cc == nil {
+ return ""
+ }
+ b, err := proto.Marshal(c.cc)
+ if err != nil {
+ // The only way to construct a Cursor with a non-nil cc field is to
+ // unmarshal from the byte representation. We panic if the unmarshal
+ // succeeds but the marshaling of the unchanged protobuf value fails.
+ panic(fmt.Sprintf("datastore: internal error: malformed cursor: %v", err))
+ }
+ return strings.TrimRight(base64.URLEncoding.EncodeToString(b), "=")
+}
+
+// Decode decodes a cursor from its base-64 string representation.
+func DecodeCursor(s string) (Cursor, error) {
+ if s == "" {
+ return Cursor{&zeroCC}, nil
+ }
+ if n := len(s) % 4; n != 0 {
+ s += strings.Repeat("=", 4-n)
+ }
+ b, err := base64.URLEncoding.DecodeString(s)
+ if err != nil {
+ return Cursor{}, err
+ }
+ cc := &pb.CompiledCursor{}
+ if err := proto.Unmarshal(b, cc); err != nil {
+ return Cursor{}, err
+ }
+ return Cursor{cc}, nil
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/query_test.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/query_test.go
new file mode 100644
index 000000000000..b6a858b669a3
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/query_test.go
@@ -0,0 +1,580 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var (
+ path1 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ },
+ }
+ path2 = &pb.Path{
+ Element: []*pb.Path_Element{
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(6),
+ },
+ {
+ Type: proto.String("Gopher"),
+ Id: proto.Int64(8),
+ },
+ },
+ }
+)
+
+func fakeRunQuery(in *pb.Query, out *pb.QueryResult) error {
+ expectedIn := &pb.Query{
+ App: proto.String("dev~fake-app"),
+ Kind: proto.String("Gopher"),
+ Compile: proto.Bool(true),
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument: got %v want %v", in, expectedIn)
+ }
+ *out = pb.QueryResult{
+ Result: []*pb.EntityProto{
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path1,
+ },
+ EntityGroup: path1,
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("George"),
+ },
+ },
+ {
+ Name: proto.String("Height"),
+ Value: &pb.PropertyValue{
+ Int64Value: proto.Int64(32),
+ },
+ },
+ },
+ },
+ {
+ Key: &pb.Reference{
+ App: proto.String("s~test-app"),
+ Path: path2,
+ },
+ EntityGroup: path1, // ancestor is George
+ Property: []*pb.Property{
+ {
+ Meaning: pb.Property_TEXT.Enum(),
+ Name: proto.String("Name"),
+ Value: &pb.PropertyValue{
+ StringValue: proto.String("Rufus"),
+ },
+ },
+ // No height for Rufus.
+ },
+ },
+ },
+ MoreResults: proto.Bool(false),
+ }
+ return nil
+}
+
+type StructThatImplementsPLS struct{}
+
+func (StructThatImplementsPLS) Load(p []Property) error { return nil }
+func (StructThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = StructThatImplementsPLS{}
+
+type StructPtrThatImplementsPLS struct{}
+
+func (*StructPtrThatImplementsPLS) Load(p []Property) error { return nil }
+func (*StructPtrThatImplementsPLS) Save() ([]Property, error) { return nil, nil }
+
+var _ PropertyLoadSaver = &StructPtrThatImplementsPLS{}
+
+type PropertyMap map[string]Property
+
+func (m PropertyMap) Load(props []Property) error {
+ for _, p := range props {
+ if p.Multiple {
+ return errors.New("PropertyMap does not support multiple properties")
+ }
+ m[p.Name] = p
+ }
+ return nil
+}
+
+func (m PropertyMap) Save() ([]Property, error) {
+ props := make([]Property, 0, len(m))
+ for _, p := range m {
+ if p.Multiple {
+ return nil, errors.New("PropertyMap does not support multiple properties")
+ }
+ props = append(props, p)
+ }
+ return props, nil
+}
+
+var _ PropertyLoadSaver = PropertyMap{}
+
+type Gopher struct {
+ Name string
+ Height int
+}
+
+// typeOfEmptyInterface is the type of interface{}, but we can't use
+// reflect.TypeOf((interface{})(nil)) directly because TypeOf takes an
+// interface{}.
+var typeOfEmptyInterface = reflect.TypeOf((*interface{})(nil)).Elem()
+
+func TestCheckMultiArg(t *testing.T) {
+ testCases := []struct {
+ v interface{}
+ mat multiArgType
+ elemType reflect.Type
+ }{
+ // Invalid cases.
+ {nil, multiArgTypeInvalid, nil},
+ {Gopher{}, multiArgTypeInvalid, nil},
+ {&Gopher{}, multiArgTypeInvalid, nil},
+ {PropertyList{}, multiArgTypeInvalid, nil}, // This is a special case.
+ {PropertyMap{}, multiArgTypeInvalid, nil},
+ {[]*PropertyList(nil), multiArgTypeInvalid, nil},
+ {[]*PropertyMap(nil), multiArgTypeInvalid, nil},
+ {[]**Gopher(nil), multiArgTypeInvalid, nil},
+ {[]*interface{}(nil), multiArgTypeInvalid, nil},
+ // Valid cases.
+ {
+ []PropertyList(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyList{}),
+ },
+ {
+ []PropertyMap(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(PropertyMap{}),
+ },
+ {
+ []StructThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructThatImplementsPLS{}),
+ },
+ {
+ []StructPtrThatImplementsPLS(nil),
+ multiArgTypePropertyLoadSaver,
+ reflect.TypeOf(StructPtrThatImplementsPLS{}),
+ },
+ {
+ []Gopher(nil),
+ multiArgTypeStruct,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []*Gopher(nil),
+ multiArgTypeStructPtr,
+ reflect.TypeOf(Gopher{}),
+ },
+ {
+ []interface{}(nil),
+ multiArgTypeInterface,
+ typeOfEmptyInterface,
+ },
+ }
+ for _, tc := range testCases {
+ mat, elemType := checkMultiArg(reflect.ValueOf(tc.v))
+ if mat != tc.mat || elemType != tc.elemType {
+ t.Errorf("checkMultiArg(%T): got %v, %v want %v, %v",
+ tc.v, mat, elemType, tc.mat, tc.elemType)
+ }
+ }
+}
+
+func TestSimpleQuery(t *testing.T) {
+ struct1 := Gopher{Name: "George", Height: 32}
+ struct2 := Gopher{Name: "Rufus"}
+ pList1 := PropertyList{
+ {
+ Name: "Name",
+ Value: "George",
+ },
+ {
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pList2 := PropertyList{
+ {
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+ pMap1 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "George",
+ },
+ "Height": Property{
+ Name: "Height",
+ Value: int64(32),
+ },
+ }
+ pMap2 := PropertyMap{
+ "Name": Property{
+ Name: "Name",
+ Value: "Rufus",
+ },
+ }
+
+ testCases := []struct {
+ dst interface{}
+ want interface{}
+ }{
+ // The destination must have type *[]P, *[]S or *[]*S, for some non-interface
+ // type P such that *P implements PropertyLoadSaver, or for some struct type S.
+ {new([]Gopher), &[]Gopher{struct1, struct2}},
+ {new([]*Gopher), &[]*Gopher{&struct1, &struct2}},
+ {new([]PropertyList), &[]PropertyList{pList1, pList2}},
+ {new([]PropertyMap), &[]PropertyMap{pMap1, pMap2}},
+
+ // Any other destination type is invalid.
+ {0, nil},
+ {Gopher{}, nil},
+ {PropertyList{}, nil},
+ {PropertyMap{}, nil},
+ {[]int{}, nil},
+ {[]Gopher{}, nil},
+ {[]PropertyList{}, nil},
+ {new(int), nil},
+ {new(Gopher), nil},
+ {new(PropertyList), nil}, // This is a special case.
+ {new(PropertyMap), nil},
+ {new([]int), nil},
+ {new([]map[int]int), nil},
+ {new([]map[string]Property), nil},
+ {new([]map[string]interface{}), nil},
+ {new([]*int), nil},
+ {new([]*map[int]int), nil},
+ {new([]*map[string]Property), nil},
+ {new([]*map[string]interface{}), nil},
+ {new([]**Gopher), nil},
+ {new([]*PropertyList), nil},
+ {new([]*PropertyMap), nil},
+ }
+ for _, tc := range testCases {
+ nCall := 0
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ nCall++
+ return fakeRunQuery(in, out)
+ })
+
+ var (
+ expectedErr error
+ expectedNCall int
+ )
+ if tc.want == nil {
+ expectedErr = ErrInvalidEntityType
+ } else {
+ expectedNCall = 1
+ }
+ keys, err := NewQuery("Gopher").GetAll(c, tc.dst)
+ if err != expectedErr {
+ t.Errorf("dst type %T: got error %v, want %v", tc.dst, err, expectedErr)
+ continue
+ }
+ if nCall != expectedNCall {
+ t.Errorf("dst type %T: Context.Call was called an incorrect number of times: got %d want %d", tc.dst, nCall, expectedNCall)
+ continue
+ }
+ if err != nil {
+ continue
+ }
+
+ key1 := NewKey(c, "Gopher", "", 6, nil)
+ expectedKeys := []*Key{
+ key1,
+ NewKey(c, "Gopher", "", 8, key1),
+ }
+ if l1, l2 := len(keys), len(expectedKeys); l1 != l2 {
+ t.Errorf("dst type %T: got %d keys, want %d keys", tc.dst, l1, l2)
+ continue
+ }
+ for i, key := range keys {
+ if key.AppID() != "s~test-app" {
+ t.Errorf(`dst type %T: Key #%d's AppID = %q, want "s~test-app"`, tc.dst, i, key.AppID())
+ continue
+ }
+ if !keysEqual(key, expectedKeys[i]) {
+ t.Errorf("dst type %T: got key #%d %v, want %v", tc.dst, i, key, expectedKeys[i])
+ continue
+ }
+ }
+
+ if !reflect.DeepEqual(tc.dst, tc.want) {
+ t.Errorf("dst type %T: Entities got %+v, want %+v", tc.dst, tc.dst, tc.want)
+ continue
+ }
+ }
+}
+
+// keysEqual is like (*Key).Equal, but ignores the App ID.
+func keysEqual(a, b *Key) bool {
+ for a != nil && b != nil {
+ if a.Kind() != b.Kind() || a.StringID() != b.StringID() || a.IntID() != b.IntID() {
+ return false
+ }
+ a, b = a.Parent(), b.Parent()
+ }
+ return a == b
+}
+
+func TestQueriesAreImmutable(t *testing.T) {
+ // Test that deriving q2 from q1 does not modify q1.
+ q0 := NewQuery("foo")
+ q1 := NewQuery("foo")
+ q2 := q1.Offset(2)
+ if !reflect.DeepEqual(q0, q1) {
+ t.Errorf("q0 and q1 were not equal")
+ }
+ if reflect.DeepEqual(q1, q2) {
+ t.Errorf("q1 and q2 were equal")
+ }
+
+ // Test that deriving from q4 twice does not conflict, even though
+ // q4 has a long list of order clauses. This tests that the arrays
+ // backed by a query's slice of orders are not shared.
+ f := func() *Query {
+ q := NewQuery("bar")
+ // 47 is an ugly number that is unlikely to be near a re-allocation
+ // point in repeated append calls. For example, it's not near a power
+ // of 2 or a multiple of 10.
+ for i := 0; i < 47; i++ {
+ q = q.Order(fmt.Sprintf("x%d", i))
+ }
+ return q
+ }
+ q3 := f().Order("y")
+ q4 := f()
+ q5 := q4.Order("y")
+ q6 := q4.Order("z")
+ if !reflect.DeepEqual(q3, q5) {
+ t.Errorf("q3 and q5 were not equal")
+ }
+ if reflect.DeepEqual(q5, q6) {
+ t.Errorf("q5 and q6 were equal")
+ }
+}
+
+func TestFilterParser(t *testing.T) {
+ testCases := []struct {
+ filterStr string
+ wantOK bool
+ wantFieldName string
+ wantOp operator
+ }{
+ // Supported ops.
+ {"x<", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {"x <", true, "x", lessThan},
+ {" x < ", true, "x", lessThan},
+ {"x <=", true, "x", lessEq},
+ {"x =", true, "x", equal},
+ {"x >=", true, "x", greaterEq},
+ {"x >", true, "x", greaterThan},
+ {"in >", true, "in", greaterThan},
+ {"in>", true, "in", greaterThan},
+ // Valid but (currently) unsupported ops.
+ {"x!=", false, "", 0},
+ {"x !=", false, "", 0},
+ {" x != ", false, "", 0},
+ {"x IN", false, "", 0},
+ {"x in", false, "", 0},
+ // Invalid ops.
+ {"x EQ", false, "", 0},
+ {"x lt", false, "", 0},
+ {"x <>", false, "", 0},
+ {"x >>", false, "", 0},
+ {"x ==", false, "", 0},
+ {"x =<", false, "", 0},
+ {"x =>", false, "", 0},
+ {"x !", false, "", 0},
+ {"x ", false, "", 0},
+ {"x", false, "", 0},
+ }
+ for _, tc := range testCases {
+ q := NewQuery("foo").Filter(tc.filterStr, 42)
+ if ok := q.err == nil; ok != tc.wantOK {
+ t.Errorf("%q: ok=%t, want %t", tc.filterStr, ok, tc.wantOK)
+ continue
+ }
+ if !tc.wantOK {
+ continue
+ }
+ if len(q.filter) != 1 {
+ t.Errorf("%q: len=%d, want %d", tc.filterStr, len(q.filter), 1)
+ continue
+ }
+ got, want := q.filter[0], filter{tc.wantFieldName, tc.wantOp, 42}
+ if got != want {
+ t.Errorf("%q: got %v, want %v", tc.filterStr, got, want)
+ continue
+ }
+ }
+}
+
+func TestQueryToProto(t *testing.T) {
+ // The context is required to make Keys for the test cases.
+ var got *pb.Query
+ NoErr := errors.New("No error")
+ c := aetesting.FakeSingleContext(t, "datastore_v3", "RunQuery", func(in *pb.Query, out *pb.QueryResult) error {
+ got = in
+ return NoErr // return a non-nil error so Run doesn't keep going.
+ })
+
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.Query
+ err string
+ }{
+ {
+ desc: "empty",
+ query: NewQuery(""),
+ want: &pb.Query{},
+ },
+ {
+ desc: "standard query",
+ query: NewQuery("kind").Order("-I").Filter("I >", 17).Filter("U =", "Dave").Limit(7).Offset(42),
+ want: &pb.Query{
+ Kind: proto.String("kind"),
+ Filter: []*pb.Query_Filter{
+ {
+ Op: pb.Query_Filter_GREATER_THAN.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("I"),
+ Value: &pb.PropertyValue{Int64Value: proto.Int64(17)},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ {
+ Op: pb.Query_Filter_EQUAL.Enum(),
+ Property: []*pb.Property{
+ {
+ Name: proto.String("U"),
+ Value: &pb.PropertyValue{StringValue: proto.String("Dave")},
+ Multiple: proto.Bool(false),
+ },
+ },
+ },
+ },
+ Order: []*pb.Query_Order{
+ {
+ Property: proto.String("I"),
+ Direction: pb.Query_Order_DESCENDING.Enum(),
+ },
+ },
+ Limit: proto.Int32(7),
+ Offset: proto.Int32(42),
+ },
+ },
+ {
+ desc: "ancestor",
+ query: NewQuery("").Ancestor(NewKey(c, "kind", "Mummy", 0, nil)),
+ want: &pb.Query{
+ Ancestor: &pb.Reference{
+ App: proto.String("dev~fake-app"),
+ Path: &pb.Path{
+ Element: []*pb.Path_Element{{Type: proto.String("kind"), Name: proto.String("Mummy")}},
+ },
+ },
+ },
+ },
+ {
+ desc: "projection",
+ query: NewQuery("").Project("A", "B"),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "projection with distinct",
+ query: NewQuery("").Project("A", "B").Distinct(),
+ want: &pb.Query{
+ PropertyName: []string{"A", "B"},
+ GroupByPropertyName: []string{"A", "B"},
+ },
+ },
+ {
+ desc: "keys only",
+ query: NewQuery("").KeysOnly(),
+ want: &pb.Query{
+ KeysOnly: proto.Bool(true),
+ RequirePerfectPlan: proto.Bool(true),
+ },
+ },
+ {
+ desc: "empty filter",
+ query: NewQuery("kind").Filter("=", 17),
+ err: "empty query filter field nam",
+ },
+ {
+ desc: "bad filter type",
+ query: NewQuery("kind").Filter("M =", map[string]bool{}),
+ err: "bad query filter value type",
+ },
+ {
+ desc: "bad filter operator",
+ query: NewQuery("kind").Filter("I <<=", 17),
+ err: `invalid operator "<<=" in filter "I <<="`,
+ },
+ {
+ desc: "empty order",
+ query: NewQuery("kind").Order(""),
+ err: "empty order",
+ },
+ {
+ desc: "bad order direction",
+ query: NewQuery("kind").Order("+I"),
+ err: `invalid order: "+I`,
+ },
+ }
+
+ for _, tt := range testCases {
+ got = nil
+ if _, err := tt.query.Run(c).Next(nil); err != NoErr {
+ if tt.err == "" || !strings.Contains(err.Error(), tt.err) {
+ t.Errorf("%s: error %v, want %q", tt.desc, err, tt.err)
+ }
+ continue
+ }
+ if tt.err != "" {
+ t.Errorf("%s: no error, want %q", tt.desc, tt.err)
+ continue
+ }
+ // Fields that are common to all protos.
+ tt.want.App = proto.String("dev~fake-app")
+ tt.want.Compile = proto.Bool(true)
+ if !proto.Equal(got, tt.want) {
+ t.Errorf("%s:\ngot %v\nwant %v", tt.desc, got, tt.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/save.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/save.go
new file mode 100644
index 000000000000..6aeffb6316c4
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/save.go
@@ -0,0 +1,300 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+ "fmt"
+ "math"
+ "reflect"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func toUnixMicro(t time.Time) int64 {
+ // We cannot use t.UnixNano() / 1e3 because we want to handle times more than
+ // 2^63 nanoseconds (which is about 292 years) away from 1970, and those cannot
+ // be represented in the numerator of a single int64 divide.
+ return t.Unix()*1e6 + int64(t.Nanosecond()/1e3)
+}
+
+func fromUnixMicro(t int64) time.Time {
+ return time.Unix(t/1e6, (t%1e6)*1e3)
+}
+
+var (
+ minTime = time.Unix(int64(math.MinInt64)/1e6, (int64(math.MinInt64)%1e6)*1e3)
+ maxTime = time.Unix(int64(math.MaxInt64)/1e6, (int64(math.MaxInt64)%1e6)*1e3)
+)
+
+// valueToProto converts a named value to a newly allocated Property.
+// The returned error string is empty on success.
+func valueToProto(defaultAppID, name string, v reflect.Value, multiple bool) (p *pb.Property, errStr string) {
+ var (
+ pv pb.PropertyValue
+ unsupported bool
+ )
+ switch v.Kind() {
+ case reflect.Invalid:
+ // No-op.
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ pv.Int64Value = proto.Int64(v.Int())
+ case reflect.Bool:
+ pv.BooleanValue = proto.Bool(v.Bool())
+ case reflect.String:
+ pv.StringValue = proto.String(v.String())
+ case reflect.Float32, reflect.Float64:
+ pv.DoubleValue = proto.Float64(v.Float())
+ case reflect.Ptr:
+ if k, ok := v.Interface().(*Key); ok {
+ if k != nil {
+ pv.Referencevalue = keyToReferenceValue(defaultAppID, k)
+ }
+ } else {
+ unsupported = true
+ }
+ case reflect.Struct:
+ switch t := v.Interface().(type) {
+ case time.Time:
+ if t.Before(minTime) || t.After(maxTime) {
+ return nil, "time value out of range"
+ }
+ pv.Int64Value = proto.Int64(toUnixMicro(t))
+ case appengine.GeoPoint:
+ if !t.Valid() {
+ return nil, "invalid GeoPoint value"
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ pv.Pointvalue = &pb.PropertyValue_PointValue{X: &t.Lat, Y: &t.Lng}
+ default:
+ unsupported = true
+ }
+ case reflect.Slice:
+ if b, ok := v.Interface().([]byte); ok {
+ pv.StringValue = proto.String(string(b))
+ } else {
+ // nvToProto should already catch slice values.
+ // If we get here, we have a slice of slice values.
+ unsupported = true
+ }
+ default:
+ unsupported = true
+ }
+ if unsupported {
+ return nil, "unsupported datastore value type: " + v.Type().String()
+ }
+ p = &pb.Property{
+ Name: proto.String(name),
+ Value: &pv,
+ Multiple: proto.Bool(multiple),
+ }
+ if v.IsValid() {
+ switch v.Interface().(type) {
+ case []byte:
+ p.Meaning = pb.Property_BLOB.Enum()
+ case ByteString:
+ p.Meaning = pb.Property_BYTESTRING.Enum()
+ case appengine.BlobKey:
+ p.Meaning = pb.Property_BLOBKEY.Enum()
+ case time.Time:
+ p.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.GeoPoint:
+ p.Meaning = pb.Property_GEORSS_POINT.Enum()
+ }
+ }
+ return p, ""
+}
+
+// saveEntity saves an EntityProto into a PropertyLoadSaver or struct pointer.
+func saveEntity(defaultAppID string, key *Key, src interface{}) (*pb.EntityProto, error) {
+ var err error
+ var props []Property
+ if e, ok := src.(PropertyLoadSaver); ok {
+ props, err = e.Save()
+ } else {
+ props, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, err
+ }
+ return propertiesToProto(defaultAppID, key, props)
+}
+
+func saveStructProperty(props *[]Property, name string, noIndex, multiple bool, v reflect.Value) error {
+ p := Property{
+ Name: name,
+ NoIndex: noIndex,
+ Multiple: multiple,
+ }
+ switch x := v.Interface().(type) {
+ case *Key:
+ p.Value = x
+ case time.Time:
+ p.Value = x
+ case appengine.BlobKey:
+ p.Value = x
+ case appengine.GeoPoint:
+ p.Value = x
+ case ByteString:
+ p.Value = x
+ default:
+ switch v.Kind() {
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ p.Value = v.Int()
+ case reflect.Bool:
+ p.Value = v.Bool()
+ case reflect.String:
+ p.Value = v.String()
+ case reflect.Float32, reflect.Float64:
+ p.Value = v.Float()
+ case reflect.Slice:
+ if v.Type().Elem().Kind() == reflect.Uint8 {
+ p.NoIndex = true
+ p.Value = v.Bytes()
+ }
+ case reflect.Struct:
+ if !v.CanAddr() {
+ return fmt.Errorf("datastore: unsupported struct field: value is unaddressable")
+ }
+ sub, err := newStructPLS(v.Addr().Interface())
+ if err != nil {
+ return fmt.Errorf("datastore: unsupported struct field: %v", err)
+ }
+ return sub.(structPLS).save(props, name, noIndex, multiple)
+ }
+ }
+ if p.Value == nil {
+ return fmt.Errorf("datastore: unsupported struct field type: %v", v.Type())
+ }
+ *props = append(*props, p)
+ return nil
+}
+
+func (s structPLS) Save() ([]Property, error) {
+ var props []Property
+ if err := s.save(&props, "", false, false); err != nil {
+ return nil, err
+ }
+ return props, nil
+}
+
+func (s structPLS) save(props *[]Property, prefix string, noIndex, multiple bool) error {
+ for i, t := range s.codec.byIndex {
+ if t.name == "-" {
+ continue
+ }
+ name := t.name
+ if prefix != "" {
+ name = prefix + name
+ }
+ v := s.v.Field(i)
+ if !v.IsValid() || !v.CanSet() {
+ continue
+ }
+ noIndex1 := noIndex || t.noIndex
+ // For slice fields that aren't []byte, save each element.
+ if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 {
+ for j := 0; j < v.Len(); j++ {
+ if err := saveStructProperty(props, name, noIndex1, true, v.Index(j)); err != nil {
+ return err
+ }
+ }
+ continue
+ }
+ // Otherwise, save the field itself.
+ if err := saveStructProperty(props, name, noIndex1, multiple, v); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+func propertiesToProto(defaultAppID string, key *Key, props []Property) (*pb.EntityProto, error) {
+ e := &pb.EntityProto{
+ Key: keyToProto(defaultAppID, key),
+ }
+ if key.parent == nil {
+ e.EntityGroup = &pb.Path{}
+ } else {
+ e.EntityGroup = keyToProto(defaultAppID, key.root()).Path
+ }
+ prevMultiple := make(map[string]bool)
+
+ for _, p := range props {
+ if pm, ok := prevMultiple[p.Name]; ok {
+ if !pm || !p.Multiple {
+ return nil, fmt.Errorf("datastore: multiple Properties with Name %q, but Multiple is false", p.Name)
+ }
+ } else {
+ prevMultiple[p.Name] = p.Multiple
+ }
+
+ x := &pb.Property{
+ Name: proto.String(p.Name),
+ Value: new(pb.PropertyValue),
+ Multiple: proto.Bool(p.Multiple),
+ }
+ switch v := p.Value.(type) {
+ case int64:
+ x.Value.Int64Value = proto.Int64(v)
+ case bool:
+ x.Value.BooleanValue = proto.Bool(v)
+ case string:
+ x.Value.StringValue = proto.String(v)
+ if p.NoIndex {
+ x.Meaning = pb.Property_TEXT.Enum()
+ }
+ case float64:
+ x.Value.DoubleValue = proto.Float64(v)
+ case *Key:
+ if v != nil {
+ x.Value.Referencevalue = keyToReferenceValue(defaultAppID, v)
+ }
+ case time.Time:
+ if v.Before(minTime) || v.After(maxTime) {
+ return nil, fmt.Errorf("datastore: time value out of range")
+ }
+ x.Value.Int64Value = proto.Int64(toUnixMicro(v))
+ x.Meaning = pb.Property_GD_WHEN.Enum()
+ case appengine.BlobKey:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOBKEY.Enum()
+ case appengine.GeoPoint:
+ if !v.Valid() {
+ return nil, fmt.Errorf("datastore: invalid GeoPoint value")
+ }
+ // NOTE: Strangely, latitude maps to X, longitude to Y.
+ x.Value.Pointvalue = &pb.PropertyValue_PointValue{X: &v.Lat, Y: &v.Lng}
+ x.Meaning = pb.Property_GEORSS_POINT.Enum()
+ case []byte:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BLOB.Enum()
+ if !p.NoIndex {
+ return nil, fmt.Errorf("datastore: cannot index a []byte valued Property with Name %q", p.Name)
+ }
+ case ByteString:
+ x.Value.StringValue = proto.String(string(v))
+ x.Meaning = pb.Property_BYTESTRING.Enum()
+ default:
+ if p.Value != nil {
+ return nil, fmt.Errorf("datastore: invalid Value type for a Property with Name %q", p.Name)
+ }
+ }
+
+ if p.NoIndex {
+ e.RawProperty = append(e.RawProperty, x)
+ } else {
+ e.Property = append(e.Property, x)
+ if len(e.Property) > maxIndexedProperties {
+ return nil, errors.New("datastore: too many indexed properties")
+ }
+ }
+ }
+ return e, nil
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/time_test.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/time_test.go
new file mode 100644
index 000000000000..ba74b449eb1f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/time_test.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "testing"
+ "time"
+)
+
+func TestUnixMicro(t *testing.T) {
+ // Test that all these time.Time values survive a round trip to unix micros.
+ testCases := []time.Time{
+ {},
+ time.Date(2, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(23, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(234, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1600, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1700, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1800, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(1900, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Unix(-1e6, -1000),
+ time.Unix(-1e6, 0),
+ time.Unix(-1e6, +1000),
+ time.Unix(-60, -1000),
+ time.Unix(-60, 0),
+ time.Unix(-60, +1000),
+ time.Unix(-1, -1000),
+ time.Unix(-1, 0),
+ time.Unix(-1, +1000),
+ time.Unix(0, -3000),
+ time.Unix(0, -2000),
+ time.Unix(0, -1000),
+ time.Unix(0, 0),
+ time.Unix(0, +1000),
+ time.Unix(0, +2000),
+ time.Unix(+60, -1000),
+ time.Unix(+60, 0),
+ time.Unix(+60, +1000),
+ time.Unix(+1e6, -1000),
+ time.Unix(+1e6, 0),
+ time.Unix(+1e6, +1000),
+ time.Date(1999, 12, 31, 23, 59, 59, 999000, time.UTC),
+ time.Date(2000, 1, 1, 0, 0, 0, 0, time.UTC),
+ time.Date(2006, 1, 2, 15, 4, 5, 678000, time.UTC),
+ time.Date(2009, 11, 10, 23, 0, 0, 0, time.UTC),
+ time.Date(3456, 1, 1, 0, 0, 0, 0, time.UTC),
+ }
+ for _, tc := range testCases {
+ got := fromUnixMicro(toUnixMicro(tc))
+ if !got.Equal(tc) {
+ t.Errorf("got %q, want %q", got, tc)
+ }
+ }
+
+ // Test that a time.Time that isn't an integral number of microseconds
+ // is not perfectly reconstructed after a round trip.
+ t0 := time.Unix(0, 123)
+ t1 := fromUnixMicro(toUnixMicro(t0))
+ if t1.Nanosecond()%1000 != 0 || t0.Nanosecond()%1000 == 0 {
+ t.Errorf("quantization to µs: got %q with %d ns, started with %d ns", t1, t1.Nanosecond(), t0.Nanosecond())
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/datastore/transaction.go b/Godeps/_workspace/src/google.golang.org/appengine/datastore/transaction.go
new file mode 100644
index 000000000000..284037b6435d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/datastore/transaction.go
@@ -0,0 +1,138 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package datastore
+
+import (
+ "errors"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+func init() {
+ internal.RegisterTransactionSetter(func(x *pb.Query, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.GetRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.PutRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+ internal.RegisterTransactionSetter(func(x *pb.DeleteRequest, t *pb.Transaction) {
+ x.Transaction = t
+ })
+}
+
+// ErrConcurrentTransaction is returned when a transaction is rolled back due
+// to a conflict with a concurrent transaction.
+var ErrConcurrentTransaction = errors.New("datastore: concurrent transaction")
+
+type transaction struct {
+ appengine.Context
+ transaction pb.Transaction
+ finished bool
+}
+
+func (t *transaction) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ if t.finished {
+ return errors.New("datastore: transaction context has expired")
+ }
+ internal.ApplyTransaction(in, &t.transaction)
+ return t.Context.Call(service, method, in, out, opts)
+}
+
+func runOnce(c appengine.Context, f func(appengine.Context) error, opts *TransactionOptions) error {
+ // Begin the transaction.
+ t := &transaction{Context: c}
+ req := &pb.BeginTransactionRequest{
+ App: proto.String(c.FullyQualifiedAppID()),
+ }
+ if opts != nil && opts.XG {
+ req.AllowMultipleEg = proto.Bool(true)
+ }
+ if err := t.Context.Call("datastore_v3", "BeginTransaction", req, &t.transaction, nil); err != nil {
+ return err
+ }
+
+ // Call f, rolling back the transaction if f returns a non-nil error, or panics.
+ // The panic is not recovered.
+ defer func() {
+ if t.finished {
+ return
+ }
+ t.finished = true
+ // Ignore the error return value, since we are already returning a non-nil
+ // error (or we're panicking).
+ c.Call("datastore_v3", "Rollback", &t.transaction, &basepb.VoidProto{}, nil)
+ }()
+ if err := f(t); err != nil {
+ return err
+ }
+ t.finished = true
+
+ // Commit the transaction.
+ res := &pb.CommitResponse{}
+ err := c.Call("datastore_v3", "Commit", &t.transaction, res, nil)
+ if ae, ok := err.(*internal.APIError); ok {
+ if appengine.IsDevAppServer() {
+ // The Python Dev AppServer raises an ApplicationError with error code 2 (which is
+ // Error.CONCURRENT_TRANSACTION) and message "Concurrency exception.".
+ if ae.Code == int32(pb.Error_BAD_REQUEST) && ae.Detail == "ApplicationError: 2 Concurrency exception." {
+ return ErrConcurrentTransaction
+ }
+ }
+ if ae.Code == int32(pb.Error_CONCURRENT_TRANSACTION) {
+ return ErrConcurrentTransaction
+ }
+ }
+ return err
+}
+
+// RunInTransaction runs f in a transaction. It calls f with a transaction
+// context tc that f should use for all App Engine operations.
+//
+// If f returns nil, RunInTransaction attempts to commit the transaction,
+// returning nil if it succeeds. If the commit fails due to a conflicting
+// transaction, RunInTransaction retries f, each time with a new transaction
+// context. It gives up and returns ErrConcurrentTransaction after three
+// failed attempts.
+//
+// If f returns non-nil, then any datastore changes will not be applied and
+// RunInTransaction returns that same error. The function f is not retried.
+//
+// Note that when f returns, the transaction is not yet committed. Calling code
+// must be careful not to assume that any of f's changes have been committed
+// until RunInTransaction returns nil.
+//
+// Nested transactions are not supported; c may not be a transaction context.
+func RunInTransaction(c appengine.Context, f func(tc appengine.Context) error, opts *TransactionOptions) error {
+ if _, ok := c.(*transaction); ok {
+ return errors.New("datastore: nested transactions are not supported")
+ }
+ for i := 0; i < 3; i++ {
+ if err := runOnce(c, f, opts); err != ErrConcurrentTransaction {
+ return err
+ }
+ }
+ return ErrConcurrentTransaction
+}
+
+// TransactionOptions are the options for running a transaction.
+type TransactionOptions struct {
+ // XG is whether the transaction can cross multiple entity groups. In
+ // comparison, a single group transaction is one where all datastore keys
+ // used have the same root key. Note that cross group transactions do not
+ // have the same behavior as single group transactions. In particular, it
+ // is much more likely to see partially applied transactions in different
+ // entity groups, in global queries.
+ // It is valid to set XG to true even if the transaction is within a
+ // single entity group.
+ XG bool
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/delay/delay.go b/Godeps/_workspace/src/google.golang.org/appengine/delay/delay.go
new file mode 100644
index 000000000000..458be9d7aead
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/delay/delay.go
@@ -0,0 +1,275 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package delay provides a way to execute code outside the scope of a
+user request by using the taskqueue API.
+
+To declare a function that may be executed later, call Func
+in a top-level assignment context, passing it an arbitrary string key
+and a function whose first argument is of type appengine.Context.
+ var laterFunc = delay.Func("key", myFunc)
+It is also possible to use a function literal.
+ var laterFunc = delay.Func("key", func(c appengine.Context, x string) {
+ // ...
+ })
+
+To call a function, invoke its Call method.
+ laterFunc.Call(c, "something")
+A function may be called any number of times. If the function has any
+return arguments, and the last one is of type error, the function may
+return a non-nil error to signal that the function should be retried.
+
+The arguments to functions may be of any type that is encodable by the gob
+package. If an argument is of interface type, it is the client's responsibility
+to register with the gob package whatever concrete type may be passed for that
+argument; see http://golang.org/pkg/gob/#Register for details.
+
+Any errors during initialization or execution of a function will be
+logged to the application logs. Error logs that occur during initialization will
+be associated with the request that invoked the Call method.
+
+The state of a function invocation that has not yet successfully
+executed is preserved by combining the file name in which it is declared
+with the string key that was passed to the Func function. Updating an app
+with pending function invocations is safe as long as the relevant
+functions have the (filename, key) combination preserved.
+
+The delay package uses the Task Queue API to create tasks that call the
+reserved application path "/_ah/queue/go/delay".
+This path must not be marked as "login: required" in app.yaml;
+it must be marked as "login: admin" or have no access restriction.
+*/
+package delay // import "google.golang.org/appengine/delay"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "reflect"
+ "runtime"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/taskqueue"
+)
+
+// Function represents a function that may have a delayed invocation.
+type Function struct {
+ fv reflect.Value // Kind() == reflect.Func
+ key string
+ err error // any error during initialization
+}
+
+const (
+ // The HTTP path for invocations.
+ path = "/_ah/queue/go/delay"
+ // Use the default queue.
+ queue = ""
+)
+
+var (
+ // registry of all delayed functions
+ funcs = make(map[string]*Function)
+
+ // precomputed types
+ contextType = reflect.TypeOf((*appengine.Context)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+
+ // errors
+ errFirstArg = errors.New("first argument must be appengine.Context")
+)
+
+// Func declares a new Function. The second argument must be a function with a
+// first argument of type appengine.Context.
+// This function must be called at program initialization time. That means it
+// must be called in a global variable declaration or from an init function.
+// This restriction is necessary because the instance that delays a function
+// call may not be the one that executes it. Only the code executed at program
+// initialization time is guaranteed to have been run by an instance before it
+// receives a request.
+func Func(key string, i interface{}) *Function {
+ f := &Function{fv: reflect.ValueOf(i)}
+
+ // Derive unique, somewhat stable key for this func.
+ _, file, _, _ := runtime.Caller(1)
+ f.key = file + ":" + key
+
+ t := f.fv.Type()
+ if t.Kind() != reflect.Func {
+ f.err = errors.New("not a function")
+ return f
+ }
+ if t.NumIn() == 0 || t.In(0) != contextType {
+ f.err = errFirstArg
+ return f
+ }
+
+ // Register the function's arguments with the gob package.
+ // This is required because they are marshaled inside a []interface{}.
+ // gob.Register only expects to be called during initialization;
+ // that's fine because this function expects the same.
+ for i := 0; i < t.NumIn(); i++ {
+ // Only concrete types may be registered. If the argument has
+ // interface type, the client is resposible for registering the
+ // concrete types it will hold.
+ if t.In(i).Kind() == reflect.Interface {
+ continue
+ }
+ gob.Register(reflect.Zero(t.In(i)).Interface())
+ }
+
+ funcs[f.key] = f
+ return f
+}
+
+type invocation struct {
+ Key string
+ Args []interface{}
+}
+
+// Call invokes a delayed function.
+// f.Call(c, ...)
+// is equivalent to
+// t, _ := f.Task(...)
+// taskqueue.Add(c, t, "")
+func (f *Function) Call(c appengine.Context, args ...interface{}) {
+ t, err := f.Task(args...)
+ if err != nil {
+ c.Errorf("%v", err)
+ return
+ }
+ if _, err := taskqueueAdder(c, t, queue); err != nil {
+ c.Errorf("delay: taskqueue.Add failed: %v", err)
+ return
+ }
+}
+
+// Task creates a Task that will invoke the function.
+// Its parameters may be tweaked before adding it to a queue.
+// Users should not modify the Path or Payload fields of the returned Task.
+func (f *Function) Task(args ...interface{}) (*taskqueue.Task, error) {
+ if f.err != nil {
+ return nil, fmt.Errorf("delay: func is invalid: %v", f.err)
+ }
+
+ nArgs := len(args) + 1 // +1 for the appengine.Context
+ ft := f.fv.Type()
+ minArgs := ft.NumIn()
+ if ft.IsVariadic() {
+ minArgs--
+ }
+ if nArgs < minArgs {
+ return nil, fmt.Errorf("delay: too few arguments to func: %d < %d", nArgs, minArgs)
+ }
+ if !ft.IsVariadic() && nArgs > minArgs {
+ return nil, fmt.Errorf("delay: too many arguments to func: %d > %d", nArgs, minArgs)
+ }
+
+ // Check arg types.
+ for i := 1; i < nArgs; i++ {
+ at := reflect.TypeOf(args[i-1])
+ var dt reflect.Type
+ if i < minArgs {
+ // not a variadic arg
+ dt = ft.In(i)
+ } else {
+ // a variadic arg
+ dt = ft.In(minArgs).Elem()
+ }
+ // nil arguments won't have a type, so they need special handling.
+ if at == nil {
+ // nil interface
+ switch dt.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ continue // may be nil
+ }
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not nilable", i, dt)
+ }
+ switch at.Kind() {
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
+ av := reflect.ValueOf(args[i-1])
+ if av.IsNil() {
+ // nil value in interface; not supported by gob, so we replace it
+ // with a nil interface value
+ args[i-1] = nil
+ }
+ }
+ if !at.AssignableTo(dt) {
+ return nil, fmt.Errorf("delay: argument %d has wrong type: %v is not assignable to %v", i, at, dt)
+ }
+ }
+
+ inv := invocation{
+ Key: f.key,
+ Args: args,
+ }
+
+ buf := new(bytes.Buffer)
+ if err := gob.NewEncoder(buf).Encode(inv); err != nil {
+ return nil, fmt.Errorf("delay: gob encoding failed: %v", err)
+ }
+
+ return &taskqueue.Task{
+ Path: path,
+ Payload: buf.Bytes(),
+ }, nil
+}
+
+var taskqueueAdder = taskqueue.Add // for testing
+
+func init() {
+ http.HandleFunc(path, func(w http.ResponseWriter, req *http.Request) {
+ runFunc(appengine.NewContext(req), w, req)
+ })
+}
+
+func runFunc(c appengine.Context, w http.ResponseWriter, req *http.Request) {
+ defer req.Body.Close()
+
+ var inv invocation
+ if err := gob.NewDecoder(req.Body).Decode(&inv); err != nil {
+ c.Errorf("delay: failed decoding task payload: %v", err)
+ c.Warningf("delay: dropping task")
+ return
+ }
+
+ f := funcs[inv.Key]
+ if f == nil {
+ c.Errorf("delay: no func with key %q found", inv.Key)
+ c.Warningf("delay: dropping task")
+ return
+ }
+
+ ft := f.fv.Type()
+ in := []reflect.Value{reflect.ValueOf(c)}
+ for _, arg := range inv.Args {
+ var v reflect.Value
+ if arg != nil {
+ v = reflect.ValueOf(arg)
+ } else {
+ // Task was passed a nil argument, so we must construct
+ // the zero value for the argument here.
+ n := len(in) // we're constructing the nth argument
+ var at reflect.Type
+ if !ft.IsVariadic() || n < ft.NumIn()-1 {
+ at = ft.In(n)
+ } else {
+ at = ft.In(ft.NumIn() - 1).Elem()
+ }
+ v = reflect.Zero(at)
+ }
+ in = append(in, v)
+ }
+ out := f.fv.Call(in)
+
+ if n := ft.NumOut(); n > 0 && ft.Out(n-1) == errorType {
+ if errv := out[n-1]; !errv.IsNil() {
+ c.Errorf("delay: func failed (will retry): %v", errv.Interface())
+ w.WriteHeader(http.StatusInternalServerError)
+ return
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/delay/delay_test.go b/Godeps/_workspace/src/google.golang.org/appengine/delay/delay_test.go
new file mode 100644
index 000000000000..a25479257e61
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/delay/delay_test.go
@@ -0,0 +1,307 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package delay
+
+import (
+ "bytes"
+ "encoding/gob"
+ "errors"
+ "fmt"
+ "net/http"
+ "net/http/httptest"
+ "reflect"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/taskqueue"
+)
+
+type CustomType struct {
+ N int
+}
+
+type CustomInterface interface {
+ N() int
+}
+
+type CustomImpl int
+
+func (c CustomImpl) N() int { return int(c) }
+
+// CustomImpl needs to be registered with gob.
+func init() {
+ gob.Register(CustomImpl(0))
+}
+
+var (
+ invalidFunc = Func("invalid", func() {})
+
+ regFuncRuns = 0
+ regFuncMsg = ""
+ regFunc = Func("reg", func(c appengine.Context, arg string) {
+ regFuncRuns++
+ regFuncMsg = arg
+ })
+
+ custFuncTally = 0
+ custFunc = Func("cust", func(c appengine.Context, ct *CustomType, ci CustomInterface) {
+ a, b := 2, 3
+ if ct != nil {
+ a = ct.N
+ }
+ if ci != nil {
+ b = ci.N()
+ }
+ custFuncTally += a + b
+ })
+
+ anotherCustFunc = Func("cust2", func(c appengine.Context, n int, ct *CustomType, ci CustomInterface) {
+ })
+
+ varFuncMsg = ""
+ varFunc = Func("variadic", func(c appengine.Context, format string, args ...int) {
+ // convert []int to []interface{} for fmt.Sprintf.
+ as := make([]interface{}, len(args))
+ for i, a := range args {
+ as[i] = a
+ }
+ varFuncMsg = fmt.Sprintf(format, as...)
+ })
+
+ errFuncRuns = 0
+ errFuncErr = errors.New("error!")
+ errFunc = Func("err", func(c appengine.Context) error {
+ errFuncRuns++
+ if errFuncRuns == 1 {
+ return nil
+ }
+ return errFuncErr
+ })
+)
+
+type fakeContext struct {
+ appengine.Context
+ logging [][]interface{}
+}
+
+func (f *fakeContext) log(level, format string, args ...interface{}) {
+ f.logging = append(f.logging, append([]interface{}{level, format}, args...))
+}
+
+func (f *fakeContext) Infof(format string, args ...interface{}) { f.log("INFO", format, args...) }
+func (f *fakeContext) Errorf(format string, args ...interface{}) { f.log("ERROR", format, args...) }
+
+func TestInvalidFunction(t *testing.T) {
+ c := &fakeContext{}
+
+ invalidFunc.Call(c)
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "%v", fmt.Errorf("delay: func is invalid: %s", errFirstArg)},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+}
+
+func TestVariadicFunctionArguments(t *testing.T) {
+ // Check the argument type validation for variadic functions.
+
+ c := &fakeContext{}
+
+ calls := 0
+ taskqueueAdder = func(c appengine.Context, t *taskqueue.Task, _ string) (*taskqueue.Task, error) {
+ calls++
+ return t, nil
+ }
+
+ varFunc.Call(c, "hi")
+ varFunc.Call(c, "%d", 12)
+ varFunc.Call(c, "%d %d %d", 3, 1, 4)
+ if calls != 3 {
+ t.Errorf("Got %d calls to taskqueueAdder, want 3", calls)
+ }
+
+ varFunc.Call(c, "%d %s", 12, "a string is bad")
+ wantLogging := [][]interface{}{
+ {"ERROR", "%v", errors.New("delay: argument 3 has wrong type: string is not assignable to int")},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+}
+
+func TestBadArguments(t *testing.T) {
+ // Try running regFunc with different sets of inappropriate arguments.
+
+ c := &fakeContext{}
+
+ regFunc.Call(c)
+ regFunc.Call(c, "lala", 53)
+ regFunc.Call(c, 53)
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "%v", errors.New("delay: too few arguments to func: 1 < 2")},
+ {"ERROR", "%v", errors.New("delay: too many arguments to func: 3 > 2")},
+ {"ERROR", "%v", errors.New("delay: argument 1 has wrong type: int is not assignable to string")},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+}
+
+func TestRunningFunction(t *testing.T) {
+ c := &fakeContext{}
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ regFuncRuns, regFuncMsg = 0, "" // reset state
+ const msg = "Why, hello!"
+ regFunc.Call(c, msg)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c, rw, req)
+
+ if regFuncRuns != 1 {
+ t.Errorf("regFuncRuns: got %d, want 1", regFuncRuns)
+ }
+ if regFuncMsg != msg {
+ t.Errorf("regFuncMsg: got %q, want %q", regFuncMsg, msg)
+ }
+}
+
+func TestCustomType(t *testing.T) {
+ c := &fakeContext{}
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ custFuncTally = 0 // reset state
+ custFunc.Call(c, &CustomType{N: 11}, CustomImpl(13))
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c, rw, req)
+
+ if custFuncTally != 24 {
+ t.Errorf("custFuncTally = %d, want 24", custFuncTally)
+ }
+
+ // Try the same, but with nil values; one is a nil pointer (and thus a non-nil interface value),
+ // and the other is a nil interface value.
+ custFuncTally = 0 // reset state
+ custFunc.Call(c, (*CustomType)(nil), nil)
+
+ // Simulate the Task Queue service.
+ req, err = http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw = httptest.NewRecorder()
+ runFunc(c, rw, req)
+
+ if custFuncTally != 5 {
+ t.Errorf("custFuncTally = %d, want 5", custFuncTally)
+ }
+}
+
+func TestRunningVariadic(t *testing.T) {
+ c := &fakeContext{}
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ varFuncMsg = "" // reset state
+ varFunc.Call(c, "Amiga %d has %d KB RAM", 500, 512)
+
+ // Simulate the Task Queue service.
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c, rw, req)
+
+ const expected = "Amiga 500 has 512 KB RAM"
+ if varFuncMsg != expected {
+ t.Errorf("varFuncMsg = %q, want %q", varFuncMsg, expected)
+ }
+}
+
+func TestErrorFunction(t *testing.T) {
+ c := &fakeContext{}
+
+ // Fake out the adding of a task.
+ var task *taskqueue.Task
+ taskqueueAdder = func(_ appengine.Context, tk *taskqueue.Task, queue string) (*taskqueue.Task, error) {
+ if queue != "" {
+ t.Errorf(`Got queue %q, expected ""`, queue)
+ }
+ task = tk
+ return tk, nil
+ }
+
+ errFunc.Call(c)
+
+ // Simulate the Task Queue service.
+ // The first call should succeed; the second call should fail.
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c, rw, req)
+ }
+ {
+ req, err := http.NewRequest("POST", path, bytes.NewBuffer(task.Payload))
+ if err != nil {
+ t.Fatalf("Failed making http.Request: %v", err)
+ }
+ rw := httptest.NewRecorder()
+ runFunc(c, rw, req)
+ if rw.Code != http.StatusInternalServerError {
+ t.Errorf("Got status code %d, want %d", rw.Code, http.StatusInternalServerError)
+ }
+
+ wantLogging := [][]interface{}{
+ {"ERROR", "delay: func failed (will retry): %v", errFuncErr},
+ }
+ if !reflect.DeepEqual(c.logging, wantLogging) {
+ t.Errorf("Incorrect logging: got %+v, want %+v", c.logging, wantLogging)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/app.yaml b/Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/app.yaml
new file mode 100644
index 000000000000..7837139515f3
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/app.yaml
@@ -0,0 +1,19 @@
+# Demo application for Managed VMs.
+application: vm-guestbook
+version: 1
+runtime: go
+vm: true
+api_version: go1
+
+manual_scaling:
+ instances: 1
+
+handlers:
+# Favicon. Without this, the browser hits this once per page view.
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+
+# Main app. All the real work is here.
+- url: /.*
+ script: _go_app
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/favicon.ico b/Godeps/_workspace/src/google.golang.org/appengine/demos/guestbook/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..1a71ea772e972df2e955b36261ae5d7f53b9c9b1
GIT binary patch
literal 1150
zcmd5)OKVd>6rNI{3l|0|#f50WO+XjL$3`~+!3T;Ix^p413yHRhmS9^&ywzgVMH)<-
zCQV34A4!wjylP%GkDGUz=QT;NG>gb*8n4`ye3#{^zkce45EvUvW9N8Y#yV5-i2?n|gRoZc<%s
zmh~rn+mM*?Ph4ge?;K&MO=5dH$Y(hhHh2y-K8|XULpI_@BFLhc^dYyZ;RQd6ULnX%
zY7XBrdX%kq;dvp(g8Ue4lb2A6TCi0~Be~{)e`OwVpB?PH2D#WOBIv*k9@h8svMjN%LB8=hT3X!a(GF&~^uI=HQRRDv3$W^b7s@-uyV
zh0r)6|MU>DZWSsYRM^NkQI4_jJUxMR7lX9x9lUlU?B*HdJ=56ZweCUP$ZoY9rFF+p
zujNrIgppL7LdhyaA;coEVs7#ao|(V$&G-5wg`mF4|60vrXX_&(76p9^7qVeblj~)T
zDEamE)_Ys!wZ}cExSr6rOJIAGMbZ`|
+
+
+ Guestbook Demo
+
+
+
+ {{with .Email}}You are currently logged in as {{.}}.{{end}}
+ {{with .Login}}Sign in{{end}}
+ {{with .Logout}}Sign out{{end}}
+
+
+ {{range .Greetings }}
+
+ {{with .Author}}{{.}}{{else}}An anonymous person{{end}}
+ on {{.Date.Format "3:04pm, Mon 2 Jan"}}
+ wrote
{{.Content}}
+
+ {{end}}
+
+
+
+
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/app.yaml b/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/app.yaml
new file mode 100644
index 000000000000..bac034b77a30
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/app.yaml
@@ -0,0 +1,15 @@
+application: helloworld
+version: 1
+runtime: go
+api_version: go1
+vm: true
+
+manual_scaling:
+ instances: 1
+
+handlers:
+- url: /favicon.ico
+ static_files: favicon.ico
+ upload: favicon.ico
+- url: /.*
+ script: _go_app
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/favicon.ico b/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..f19c04d270a3865384ce3db41412448692b8cba4
GIT binary patch
literal 1150
zcmchVT}YE*6vvO#Ozpx+V3I*aL_(n#kx)ooMSh^NcA;pHT?E=LQZz`!FSHCRr@prN
zwT3p)%=s-dmt~u}Ev?O|`zZYCq8qyiy0L=y-}5?0YR${e%Q?^Uob&&^@ADoGkso`+
zVq)QuG~pS#!VCV*}8%$~So~Xo7Z}fn#{=kyT1ep!Zb
zv1b!}`L%0%gZ-u8{86F};i`UY4wfg*lK=n!
literal 0
HcmV?d00001
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/helloworld.go b/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/helloworld.go
new file mode 100644
index 000000000000..efd61f8c378b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/demos/helloworld/helloworld.go
@@ -0,0 +1,45 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package helloworld
+
+import (
+ "html/template"
+ "net/http"
+ "time"
+
+ "google.golang.org/appengine"
+)
+
+var initTime = time.Now()
+
+func init() {
+ http.HandleFunc("/", handle)
+}
+
+func handle(w http.ResponseWriter, r *http.Request) {
+ if r.URL.Path != "/" {
+ http.NotFound(w, r)
+ return
+ }
+
+ c := appengine.NewContext(r)
+ c.Infof("Serving the front page.")
+
+ tmpl.Execute(w, time.Since(initTime))
+}
+
+var tmpl = template.Must(template.New("front").Parse(`
+
+
+
+Hello, World! 세상아 안녕!
+
+
+
+This instance has been running for {{.}}.
+
+
+
+`))
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/errors.go b/Godeps/_workspace/src/google.golang.org/appengine/errors.go
new file mode 100644
index 000000000000..16d0772e2a46
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/errors.go
@@ -0,0 +1,46 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// This file provides error functions for common API failure modes.
+
+package appengine
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsOverQuota reports whether err represents an API call failure
+// due to insufficient available quota.
+func IsOverQuota(err error) bool {
+ callErr, ok := err.(*internal.CallError)
+ return ok && callErr.Code == 4
+}
+
+// MultiError is returned by batch operations when there are errors with
+// particular elements. Errors will be in a one-to-one correspondence with
+// the input elements; successful elements will have a nil entry.
+type MultiError []error
+
+func (m MultiError) Error() string {
+ s, n := "", 0
+ for _, e := range m {
+ if e != nil {
+ if n == 0 {
+ s = e.Error()
+ }
+ n++
+ }
+ }
+ switch n {
+ case 0:
+ return "(0 errors)"
+ case 1:
+ return s
+ case 2:
+ return s + " (and 1 other error)"
+ }
+ return fmt.Sprintf("%s (and %d other errors)", s, n-1)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/file/file.go b/Godeps/_workspace/src/google.golang.org/appengine/file/file.go
new file mode 100644
index 000000000000..273a1c3f254b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/file/file.go
@@ -0,0 +1,26 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package file provides helper functions for using Google Cloud Storage.
+package file
+
+import (
+ "fmt"
+
+ "google.golang.org/appengine"
+ aipb "google.golang.org/appengine/internal/app_identity"
+)
+
+// DefaultBucketName returns the name of this application's
+// default Google Cloud Storage bucket.
+func DefaultBucketName(c appengine.Context) (string, error) {
+ req := &aipb.GetDefaultGcsBucketNameRequest{}
+ res := &aipb.GetDefaultGcsBucketNameResponse{}
+
+ err := c.Call("app_identity_service", "GetDefaultGcsBucketName", req, res, nil)
+ if err != nil {
+ return "", fmt.Errorf("file: no default bucket name returned in RPC response: %v", res)
+ }
+ return res.GetDefaultGcsBucketName(), nil
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/identity.go b/Godeps/_workspace/src/google.golang.org/appengine/identity.go
new file mode 100644
index 000000000000..3571956d02c7
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/identity.go
@@ -0,0 +1,141 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/app_identity"
+ modpb "google.golang.org/appengine/internal/modules"
+)
+
+// AppID returns the application ID for the current application.
+// The string will be a plain application ID (e.g. "appid"), with a
+// domain prefix for custom domain deployments (e.g. "example.com:appid").
+func AppID(c Context) string { return internal.AppID(c.FullyQualifiedAppID()) }
+
+// DefaultVersionHostname returns the standard hostname of the default version
+// of the current application (e.g. "my-app.appspot.com"). This is suitable for
+// use in constructing URLs.
+func DefaultVersionHostname(c Context) string {
+ return internal.DefaultVersionHostname(c.Request())
+}
+
+// ModuleName returns the module name of the current instance.
+func ModuleName(c Context) string {
+ return internal.ModuleName()
+}
+
+// ModuleHostname returns a hostname of a module instance.
+// If module is the empty string, it refers to the module of the current instance.
+// If version is empty, it refers to the version of the current instance if valid,
+// or the default version of the module of the current instance.
+// If instance is empty, ModuleHostname returns the load-balancing hostname.
+func ModuleHostname(c Context, module, version, instance string) (string, error) {
+ req := &modpb.GetHostnameRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ if instance != "" {
+ req.Instance = &instance
+ }
+ res := &modpb.GetHostnameResponse{}
+ if err := c.Call("modules", "GetHostname", req, res, nil); err != nil {
+ return "", err
+ }
+ return *res.Hostname, nil
+}
+
+// VersionID returns the version ID for the current application.
+// It will be of the form "X.Y", where X is specified in app.yaml,
+// and Y is a number generated when each version of the app is uploaded.
+// It does not include a module name.
+func VersionID(c Context) string { return internal.VersionID() }
+
+// InstanceID returns a mostly-unique identifier for this instance.
+func InstanceID() string { return internal.InstanceID() }
+
+// Datacenter returns an identifier for the datacenter that the instance is running in.
+func Datacenter(c Context) string { return internal.Datacenter(c.Request()) }
+
+// ServerSoftware returns the App Engine release version.
+// In production, it looks like "Google App Engine/X.Y.Z".
+// In the development appserver, it looks like "Development/X.Y".
+func ServerSoftware() string { return internal.ServerSoftware() }
+
+// RequestID returns a string that uniquely identifies the request.
+func RequestID(c Context) string { return internal.RequestID(c.Request()) }
+
+// AccessToken generates an OAuth2 access token for the specified scopes on
+// behalf of service account of this application. This token will expire after
+// the returned time.
+func AccessToken(c Context, scopes ...string) (token string, expiry time.Time, err error) {
+ req := &pb.GetAccessTokenRequest{Scope: scopes}
+ res := &pb.GetAccessTokenResponse{}
+
+ err = c.Call("app_identity_service", "GetAccessToken", req, res, nil)
+ if err != nil {
+ return "", time.Time{}, err
+ }
+ return res.GetAccessToken(), time.Unix(res.GetExpirationTime(), 0), nil
+}
+
+// Certificate represents a public certificate for the app.
+type Certificate struct {
+ KeyName string
+ Data []byte // PEM-encoded X.509 certificate
+}
+
+// PublicCertificates retrieves the public certificates for the app.
+// They can be used to verify a signature returned by SignBytes.
+func PublicCertificates(c Context) ([]Certificate, error) {
+ req := &pb.GetPublicCertificateForAppRequest{}
+ res := &pb.GetPublicCertificateForAppResponse{}
+ if err := c.Call("app_identity_service", "GetPublicCertificatesForApp", req, res, nil); err != nil {
+ return nil, err
+ }
+ var cs []Certificate
+ for _, pc := range res.PublicCertificateList {
+ cs = append(cs, Certificate{
+ KeyName: pc.GetKeyName(),
+ Data: []byte(pc.GetX509CertificatePem()),
+ })
+ }
+ return cs, nil
+}
+
+// ServiceAccount returns a string representing the service account name, in
+// the form of an email address (typically app_id@appspot.gserviceaccount.com).
+func ServiceAccount(c Context) (string, error) {
+ req := &pb.GetServiceAccountNameRequest{}
+ res := &pb.GetServiceAccountNameResponse{}
+
+ err := c.Call("app_identity_service", "GetServiceAccountName", req, res, nil)
+ if err != nil {
+ return "", err
+ }
+ return res.GetServiceAccountName(), err
+}
+
+// SignBytes signs bytes using a private key unique to your application.
+func SignBytes(c Context, bytes []byte) (string, []byte, error) {
+ req := &pb.SignForAppRequest{BytesToSign: bytes}
+ res := &pb.SignForAppResponse{}
+
+ err := c.Call("app_identity_service", "SignForApp", req, res, nil)
+ if err != nil {
+ return "", nil, err
+ }
+ return res.GetKeyName(), res.GetSignatureBytes(), err
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("app_identity_service", pb.AppIdentityServiceError_ErrorCode_name)
+ internal.RegisterErrorCodeMap("modules", modpb.ModulesServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/image/image.go b/Godeps/_workspace/src/google.golang.org/appengine/image/image.go
new file mode 100644
index 000000000000..e1558fe44f7f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/image/image.go
@@ -0,0 +1,65 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package image provides image services.
+package image // import "google.golang.org/appengine/image"
+
+import (
+ "fmt"
+ "net/url"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/image"
+)
+
+type ServingURLOptions struct {
+ Secure bool // whether the URL should use HTTPS
+
+ // Size must be between zero and 1600.
+ // If Size is non-zero, a resized version of the image is served,
+ // and Size is the served image's longest dimension. The aspect ratio is preserved.
+ // If Crop is true the image is cropped from the center instead of being resized.
+ Size int
+ Crop bool
+}
+
+// ServingURL returns a URL that will serve an image from Blobstore.
+func ServingURL(c appengine.Context, key appengine.BlobKey, opts *ServingURLOptions) (*url.URL, error) {
+ req := &pb.ImagesGetUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ if opts != nil && opts.Secure {
+ req.CreateSecureUrl = &opts.Secure
+ }
+ res := &pb.ImagesGetUrlBaseResponse{}
+ if err := c.Call("images", "GetUrlBase", req, res, nil); err != nil {
+ return nil, err
+ }
+
+ // The URL may have suffixes added to dynamically resize or crop:
+ // - adding "=s32" will serve the image resized to 32 pixels, preserving the aspect ratio.
+ // - adding "=s32-c" is the same as "=s32" except it will be cropped.
+ u := *res.Url
+ if opts != nil && opts.Size > 0 {
+ u += fmt.Sprintf("=s%d", opts.Size)
+ if opts.Crop {
+ u += "-c"
+ }
+ }
+ return url.Parse(u)
+}
+
+// DeleteServingURL deletes the serving URL for an image.
+func DeleteServingURL(c appengine.Context, key appengine.BlobKey) error {
+ req := &pb.ImagesDeleteUrlBaseRequest{
+ BlobKey: (*string)(&key),
+ }
+ res := &pb.ImagesDeleteUrlBaseResponse{}
+ return c.Call("images", "DeleteUrlBase", req, res, nil)
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("images", pb.ImagesServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/aetesting/fake.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/aetesting/fake.go
new file mode 100644
index 000000000000..68925f297546
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/aetesting/fake.go
@@ -0,0 +1,88 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package aetesting provides utilities for testing App Engine packages.
+// This is not for testing user applications.
+package aetesting
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+)
+
+// FakeSingleContext returns a context whose Call invocations will be serviced
+// by f, which should be a function that has two arguments of the input and output
+// protocol buffer type, and one error return.
+func FakeSingleContext(t *testing.T, service, method string, f interface{}) appengine.Context {
+ fv := reflect.ValueOf(f)
+ if fv.Kind() != reflect.Func {
+ t.Fatal("not a function")
+ }
+ ft := fv.Type()
+ if ft.NumIn() != 2 || ft.NumOut() != 1 {
+ t.Fatalf("f has %d in and %d out, want 2 in and 1 out", ft.NumIn(), ft.NumOut())
+ }
+ for i := 0; i < 2; i++ {
+ at := ft.In(i)
+ if !at.Implements(protoMessageType) {
+ t.Fatalf("arg %d does not implement proto.Message", i)
+ }
+ }
+ if ft.Out(0) != errorType {
+ t.Fatalf("f's return is %v, want error", ft.Out(0))
+ }
+ return &single{
+ t: t,
+ service: service,
+ method: method,
+ f: fv,
+ }
+}
+
+var (
+ protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem()
+ errorType = reflect.TypeOf((*error)(nil)).Elem()
+)
+
+type single struct {
+ t *testing.T
+ service, method string
+ f reflect.Value
+}
+
+func (s *single) logf(level, format string, args ...interface{}) {
+ s.t.Logf(level+": "+format, args...)
+}
+
+func (s *single) Debugf(format string, args ...interface{}) { s.logf("DEBUG", format, args...) }
+func (s *single) Infof(format string, args ...interface{}) { s.logf("INFO", format, args...) }
+func (s *single) Warningf(format string, args ...interface{}) { s.logf("WARNING", format, args...) }
+func (s *single) Errorf(format string, args ...interface{}) { s.logf("ERROR", format, args...) }
+func (s *single) Criticalf(format string, args ...interface{}) { s.logf("CRITICAL", format, args...) }
+func (*single) FullyQualifiedAppID() string { return "dev~fake-app" }
+func (*single) Request() interface{} { return nil }
+
+func (s *single) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ if service == "__go__" {
+ return fmt.Errorf("Unknown API call /%s.%s", service, method)
+ }
+ if service != s.service || method != s.method {
+ s.t.Fatalf("Unexpected call to /%s.%s", service, method)
+ }
+ ins := []reflect.Value{
+ reflect.ValueOf(in),
+ reflect.ValueOf(out),
+ }
+ outs := s.f.Call(ins)
+ if outs[0].IsNil() {
+ return nil
+ }
+ return outs[0].Interface().(error)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/api.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/api.go
new file mode 100644
index 000000000000..7c7b851c7e8a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/api.go
@@ -0,0 +1,589 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "runtime"
+ "strconv"
+ "strings"
+ "sync"
+ "sync/atomic"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ basepb "google.golang.org/appengine/internal/base"
+ logpb "google.golang.org/appengine/internal/log"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const (
+ apiPath = "/rpc_http"
+)
+
+var (
+ // Incoming headers.
+ ticketHeader = http.CanonicalHeaderKey("X-AppEngine-API-Ticket")
+ dapperHeader = http.CanonicalHeaderKey("X-Google-DapperTraceInfo")
+ defNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+ curNamespaceHeader = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ userIPHeader = http.CanonicalHeaderKey("X-AppEngine-User-IP")
+ remoteAddrHeader = http.CanonicalHeaderKey("X-AppEngine-Remote-Addr")
+
+ // Outgoing headers.
+ apiEndpointHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Endpoint")
+ apiEndpointHeaderValue = []string{"app-engine-apis"}
+ apiMethodHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Method")
+ apiMethodHeaderValue = []string{"/VMRemoteAPI.CallRemoteAPI"}
+ apiDeadlineHeader = http.CanonicalHeaderKey("X-Google-RPC-Service-Deadline")
+ apiContentType = http.CanonicalHeaderKey("Content-Type")
+ apiContentTypeValue = []string{"application/octet-stream"}
+ logFlushHeader = http.CanonicalHeaderKey("X-AppEngine-Log-Flush-Count")
+
+ apiHTTPClient = &http.Client{
+ Transport: &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: limitDial,
+ },
+ }
+)
+
+func apiHost() string {
+ host, port := "appengine.googleapis.com", "10001"
+ if h := os.Getenv("API_HOST"); h != "" {
+ host = h
+ }
+ if p := os.Getenv("API_PORT"); p != "" {
+ port = p
+ }
+ return host + ":" + port
+}
+
+func handleHTTP(w http.ResponseWriter, r *http.Request) {
+ c := &context{
+ req: r,
+ outHeader: w.Header(),
+ }
+ stopFlushing := make(chan int)
+
+ ctxs.Lock()
+ ctxs.m[r] = c
+ ctxs.Unlock()
+ defer func() {
+ ctxs.Lock()
+ delete(ctxs.m, r)
+ ctxs.Unlock()
+ }()
+
+ // Patch up RemoteAddr so it looks reasonable.
+ if addr := r.Header.Get(userIPHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else if addr = r.Header.Get(remoteAddrHeader); addr != "" {
+ r.RemoteAddr = addr
+ } else {
+ // Should not normally reach here, but pick a sensible default anyway.
+ r.RemoteAddr = "127.0.0.1"
+ }
+ // The address in the headers will most likely be of these forms:
+ // 123.123.123.123
+ // 2001:db8::1
+ // net/http.Request.RemoteAddr is specified to be in "IP:port" form.
+ if _, _, err := net.SplitHostPort(r.RemoteAddr); err != nil {
+ // Assume the remote address is only a host; add a default port.
+ r.RemoteAddr = net.JoinHostPort(r.RemoteAddr, "80")
+ }
+
+ // Start goroutine responsible for flushing app logs.
+ // This is done after adding c to ctx.m (and stopped before removing it)
+ // because flushing logs requires making an API call.
+ go c.logFlusher(stopFlushing)
+
+ executeRequestSafely(c, r)
+ c.outHeader = nil // make sure header changes aren't respected any more
+
+ stopFlushing <- 1 // any logging beyond this point will be dropped
+
+ // Flush any pending logs asynchronously.
+ c.pendingLogs.Lock()
+ flushes := c.pendingLogs.flushes
+ if len(c.pendingLogs.lines) > 0 {
+ flushes++
+ }
+ c.pendingLogs.Unlock()
+ go c.flushLog(false)
+ w.Header().Set(logFlushHeader, strconv.Itoa(flushes))
+
+ // Avoid nil Write call if c.Write is never called.
+ if c.outCode != 0 {
+ w.WriteHeader(c.outCode)
+ }
+ if c.outBody != nil {
+ w.Write(c.outBody)
+ }
+}
+
+func executeRequestSafely(c *context, r *http.Request) {
+ defer func() {
+ if x := recover(); x != nil {
+ c.logf(4, "%s", renderPanic(x)) // 4 == critical
+ }
+ }()
+
+ http.DefaultServeMux.ServeHTTP(c, r)
+}
+
+func renderPanic(x interface{}) string {
+ buf := make([]byte, 16<<10) // 16 KB should be plenty
+ buf = buf[:runtime.Stack(buf, false)]
+
+ // Remove the first few stack frames:
+ // this func
+ // the recover closure in the caller
+ // That will root the stack trace at the site of the panic.
+ const (
+ skipStart = "internal.renderPanic"
+ skipFrames = 2
+ )
+ start := bytes.Index(buf, []byte(skipStart))
+ p := start
+ for i := 0; i < skipFrames*2 && p+1 < len(buf); i++ {
+ p = bytes.IndexByte(buf[p+1:], '\n') + p + 1
+ if p < 0 {
+ break
+ }
+ }
+ if p >= 0 {
+ // buf[start:p+1] is the block to remove.
+ // Copy buf[p+1:] over buf[start:] and shrink buf.
+ copy(buf[start:], buf[p+1:])
+ buf = buf[:len(buf)-(p+1-start)]
+ }
+
+ // Add panic heading.
+ head := fmt.Sprintf("panic: %v\n\n", x)
+ if len(head) > len(buf) {
+ // Extremely unlikely to happen.
+ return head
+ }
+ copy(buf[len(head):], buf)
+ copy(buf, head)
+
+ return string(buf)
+}
+
+var ctxs = struct {
+ sync.Mutex
+ m map[*http.Request]*context
+ bg *context // background context, lazily initialized
+}{
+ m: make(map[*http.Request]*context),
+}
+
+// context represents the context of an in-flight HTTP request.
+// It implements the appengine.Context and http.ResponseWriter interfaces.
+type context struct {
+ req *http.Request
+
+ outCode int
+ outHeader http.Header
+ outBody []byte
+
+ pendingLogs struct {
+ sync.Mutex
+ lines []*logpb.UserAppLogLine
+ flushes int
+ }
+}
+
+func NewContext(req *http.Request) *context {
+ ctxs.Lock()
+ c := ctxs.m[req]
+ ctxs.Unlock()
+
+ if c == nil {
+ // Someone passed in an http.Request that is not in-flight.
+ // We panic here rather than panicking at a later point
+ // so that stack traces will be more sensible.
+ log.Panic("appengine: NewContext passed an unknown http.Request")
+ }
+ return c
+}
+
+func BackgroundContext() *context {
+ ctxs.Lock()
+ defer ctxs.Unlock()
+
+ if ctxs.bg != nil {
+ return ctxs.bg
+ }
+
+ // Compute background security ticket.
+ appID := partitionlessAppID()
+ escAppID := strings.Replace(strings.Replace(appID, ":", "_", -1), ".", "_", -1)
+ majVersion := VersionID()
+ if i := strings.Index(majVersion, "_"); i >= 0 {
+ majVersion = majVersion[:i]
+ }
+ ticket := fmt.Sprintf("%s/%s.%s.%s", escAppID, ModuleName(), majVersion, InstanceID())
+
+ ctxs.bg = &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{ticket},
+ },
+ },
+ }
+
+ // TODO(dsymonds): Wire up the shutdown handler to do a final flush.
+ go ctxs.bg.logFlusher(make(chan int))
+
+ return ctxs.bg
+}
+
+var errTimeout = &CallError{
+ Detail: "Deadline exceeded",
+ Code: int32(remotepb.RpcError_CANCELLED),
+ Timeout: true,
+}
+
+func (c *context) Header() http.Header { return c.outHeader }
+
+// Copied from $GOROOT/src/pkg/net/http/transfer.go. Some response status
+// codes do not permit a response body (nor response entity headers such as
+// Content-Length, Content-Type, etc).
+func bodyAllowedForStatus(status int) bool {
+ switch {
+ case status >= 100 && status <= 199:
+ return false
+ case status == 204:
+ return false
+ case status == 304:
+ return false
+ }
+ return true
+}
+
+func (c *context) Write(b []byte) (int, error) {
+ if c.outCode == 0 {
+ c.WriteHeader(http.StatusOK)
+ }
+ if len(b) > 0 && !bodyAllowedForStatus(c.outCode) {
+ return 0, http.ErrBodyNotAllowed
+ }
+ c.outBody = append(c.outBody, b...)
+ return len(b), nil
+}
+
+func (c *context) WriteHeader(code int) {
+ if c.outCode != 0 {
+ c.Errorf("WriteHeader called multiple times on request.")
+ return
+ }
+ c.outCode = code
+}
+
+func (c *context) post(body []byte, timeout time.Duration) (b []byte, err error) {
+ dst := apiHost()
+ hreq := &http.Request{
+ Method: "POST",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: dst,
+ Path: apiPath,
+ },
+ Header: http.Header{
+ apiEndpointHeader: apiEndpointHeaderValue,
+ apiMethodHeader: apiMethodHeaderValue,
+ apiContentType: apiContentTypeValue,
+ apiDeadlineHeader: []string{strconv.FormatFloat(timeout.Seconds(), 'f', -1, 64)},
+ },
+ Body: ioutil.NopCloser(bytes.NewReader(body)),
+ ContentLength: int64(len(body)),
+ Host: dst,
+ }
+ if info := c.req.Header.Get(dapperHeader); info != "" {
+ hreq.Header.Set(dapperHeader, info)
+ }
+
+ tr := apiHTTPClient.Transport.(*http.Transport)
+
+ var timedOut int32 // atomic; set to 1 if timed out
+ t := time.AfterFunc(timeout, func() {
+ atomic.StoreInt32(&timedOut, 1)
+ tr.CancelRequest(hreq)
+ })
+ defer t.Stop()
+ defer func() {
+ // Check if timeout was exceeded.
+ if atomic.LoadInt32(&timedOut) != 0 {
+ err = errTimeout
+ }
+ }()
+
+ hresp, err := apiHTTPClient.Do(hreq)
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge HTTP failed: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ defer hresp.Body.Close()
+ hrespBody, err := ioutil.ReadAll(hresp.Body)
+ if hresp.StatusCode != 200 {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge returned HTTP %d (%q)", hresp.StatusCode, hrespBody),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ if err != nil {
+ return nil, &CallError{
+ Detail: fmt.Sprintf("service bridge response bad: %v", err),
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return hrespBody, nil
+}
+
+var virtualMethodHeaders = map[string]string{
+ "GetNamespace": curNamespaceHeader,
+ "GetDefaultNamespace": defNamespaceHeader,
+
+ "user:Email": http.CanonicalHeaderKey("X-AppEngine-User-Email"),
+ "user:AuthDomain": http.CanonicalHeaderKey("X-AppEngine-Auth-Domain"),
+ "user:ID": http.CanonicalHeaderKey("X-AppEngine-User-Id"),
+ "user:IsAdmin": http.CanonicalHeaderKey("X-AppEngine-User-Is-Admin"),
+ "user:FederatedIdentity": http.CanonicalHeaderKey("X-AppEngine-Federated-Identity"),
+ "user:FederatedProvider": http.CanonicalHeaderKey("X-AppEngine-Federated-Provider"),
+}
+
+func (c *context) Call(service, method string, in, out proto.Message, opts *CallOptions) error {
+ if service == "__go__" {
+ if hdr, ok := virtualMethodHeaders[method]; ok {
+ out.(*basepb.StringProto).Value = proto.String(c.req.Header.Get(hdr))
+ return nil
+ }
+ }
+
+ // Default RPC timeout is 5s.
+ timeout := 5 * time.Second
+ if opts != nil && opts.Timeout > 0 {
+ timeout = opts.Timeout
+ }
+
+ data, err := proto.Marshal(in)
+ if err != nil {
+ return err
+ }
+
+ ticket := c.req.Header.Get(ticketHeader)
+ req := &remotepb.Request{
+ ServiceName: &service,
+ Method: &method,
+ Request: data,
+ RequestId: &ticket,
+ }
+ hreqBody, err := proto.Marshal(req)
+ if err != nil {
+ return err
+ }
+
+ hrespBody, err := c.post(hreqBody, timeout)
+ if err != nil {
+ return err
+ }
+
+ res := &remotepb.Response{}
+ if err := proto.Unmarshal(hrespBody, res); err != nil {
+ return err
+ }
+ if res.RpcError != nil {
+ ce := &CallError{
+ Detail: res.RpcError.GetDetail(),
+ Code: *res.RpcError.Code,
+ }
+ switch remotepb.RpcError_ErrorCode(ce.Code) {
+ case remotepb.RpcError_CANCELLED, remotepb.RpcError_DEADLINE_EXCEEDED:
+ ce.Timeout = true
+ }
+ return ce
+ }
+ if res.ApplicationError != nil {
+ return &APIError{
+ Service: *req.ServiceName,
+ Detail: res.ApplicationError.GetDetail(),
+ Code: *res.ApplicationError.Code,
+ }
+ }
+ if res.Exception != nil || res.JavaException != nil {
+ // This shouldn't happen, but let's be defensive.
+ return &CallError{
+ Detail: "service bridge returned exception",
+ Code: int32(remotepb.RpcError_UNKNOWN),
+ }
+ }
+ return proto.Unmarshal(res.Response, out)
+}
+
+func (c *context) Request() interface{} {
+ return c.req
+}
+
+func (c *context) addLogLine(ll *logpb.UserAppLogLine) {
+ // Truncate long log lines.
+ // TODO(dsymonds): Check if this is still necessary.
+ const lim = 8 << 10
+ if len(*ll.Message) > lim {
+ suffix := fmt.Sprintf("...(length %d)", len(*ll.Message))
+ ll.Message = proto.String((*ll.Message)[:lim-len(suffix)] + suffix)
+ }
+
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(c.pendingLogs.lines, ll)
+ c.pendingLogs.Unlock()
+}
+
+var logLevelName = map[int64]string{
+ 0: "DEBUG",
+ 1: "INFO",
+ 2: "WARNING",
+ 3: "ERROR",
+ 4: "CRITICAL",
+}
+
+func (c *context) logf(level int64, format string, args ...interface{}) {
+ s := fmt.Sprintf(format, args...)
+ s = strings.TrimRight(s, "\n") // Remove any trailing newline characters.
+ c.addLogLine(&logpb.UserAppLogLine{
+ TimestampUsec: proto.Int64(time.Now().UnixNano() / 1e3),
+ Level: &level,
+ Message: &s,
+ })
+ log.Print(logLevelName[level] + ": " + s)
+}
+
+func (c *context) Debugf(format string, args ...interface{}) { c.logf(0, format, args...) }
+func (c *context) Infof(format string, args ...interface{}) { c.logf(1, format, args...) }
+func (c *context) Warningf(format string, args ...interface{}) { c.logf(2, format, args...) }
+func (c *context) Errorf(format string, args ...interface{}) { c.logf(3, format, args...) }
+func (c *context) Criticalf(format string, args ...interface{}) { c.logf(4, format, args...) }
+
+// FullyQualifiedAppID returns the fully-qualified application ID.
+// This may contain a partition prefix (e.g. "s~" for High Replication apps),
+// or a domain prefix (e.g. "example.com:").
+func (c *context) FullyQualifiedAppID() string { return fullyQualifiedAppID() }
+
+// flushLog attempts to flush any pending logs to the appserver.
+// It should not be called concurrently.
+func (c *context) flushLog(force bool) (flushed bool) {
+ c.pendingLogs.Lock()
+ // Grab up to 30 MB. We can get away with up to 32 MB, but let's be cautious.
+ n, rem := 0, 30<<20
+ for ; n < len(c.pendingLogs.lines); n++ {
+ ll := c.pendingLogs.lines[n]
+ // Each log line will require about 3 bytes of overhead.
+ nb := proto.Size(ll) + 3
+ if nb > rem {
+ break
+ }
+ rem -= nb
+ }
+ lines := c.pendingLogs.lines[:n]
+ c.pendingLogs.lines = c.pendingLogs.lines[n:]
+ c.pendingLogs.Unlock()
+
+ if len(lines) == 0 && !force {
+ // Nothing to flush.
+ return false
+ }
+
+ rescueLogs := false
+ defer func() {
+ if rescueLogs {
+ c.pendingLogs.Lock()
+ c.pendingLogs.lines = append(lines, c.pendingLogs.lines...)
+ c.pendingLogs.Unlock()
+ }
+ }()
+
+ buf, err := proto.Marshal(&logpb.UserAppLogGroup{
+ LogLine: lines,
+ })
+ if err != nil {
+ log.Printf("internal.flushLog: marshaling UserAppLogGroup: %v", err)
+ rescueLogs = true
+ return false
+ }
+
+ req := &logpb.FlushRequest{
+ Logs: buf,
+ }
+ res := &basepb.VoidProto{}
+ c.pendingLogs.Lock()
+ c.pendingLogs.flushes++
+ c.pendingLogs.Unlock()
+ if err := c.Call("logservice", "Flush", req, res, nil); err != nil {
+ log.Printf("internal.flushLog: Flush RPC: %v", err)
+ rescueLogs = true
+ return false
+ }
+ return true
+}
+
+const (
+ // Log flushing parameters.
+ flushInterval = 1 * time.Second
+ forceFlushInterval = 60 * time.Second
+)
+
+func (c *context) logFlusher(stop <-chan int) {
+ lastFlush := time.Now()
+ tick := time.NewTicker(flushInterval)
+ for {
+ select {
+ case <-stop:
+ // Request finished.
+ tick.Stop()
+ return
+ case <-tick.C:
+ force := time.Now().Sub(lastFlush) > forceFlushInterval
+ if c.flushLog(force) {
+ lastFlush = time.Now()
+ }
+ }
+ }
+}
+
+func ContextForTesting(req *http.Request) *context {
+ return &context{req: req}
+}
+
+// caller is a subset of appengine.Context.
+type caller interface {
+ Call(service, method string, in, out proto.Message, opts *CallOptions) error
+}
+
+var virtualOpts = &CallOptions{
+ // Virtual API calls should happen nearly instantaneously.
+ Timeout: 1 * time.Millisecond,
+}
+
+// VirtAPI invokes a virtual API call for the __go__ service.
+// It is for methods that accept a VoidProto and return a StringProto.
+// It returns an empty string if the call fails.
+func VirtAPI(c caller, method string) string {
+ s := &basepb.StringProto{}
+ if err := c.Call("__go__", method, &basepb.VoidProto{}, s, virtualOpts); err != nil {
+ log.Printf("/__go__.%s failed: %v", method, err)
+ }
+ return s.GetValue()
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/api_race_test.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/api_race_test.go
new file mode 100644
index 000000000000..d6977f192d2c
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/api_race_test.go
@@ -0,0 +1,5 @@
+// +build race
+
+package internal
+
+func init() { raceDetector = true }
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/api_test.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/api_test.go
new file mode 100644
index 000000000000..1339c019132b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/api_test.go
@@ -0,0 +1,412 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "bufio"
+ "bytes"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "net/url"
+ "os"
+ "os/exec"
+ "strings"
+ "sync/atomic"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ basepb "google.golang.org/appengine/internal/base"
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+const testTicketHeader = "X-Magic-Ticket-Header"
+
+func init() {
+ ticketHeader = testTicketHeader
+}
+
+type fakeAPIHandler struct {
+ hang chan int // used for RunSlowly RPC
+
+ LogFlushes int32 // atomic
+}
+
+func (f *fakeAPIHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
+ writeResponse := func(res *remotepb.Response) {
+ hresBody, err := proto.Marshal(res)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding API response: %v", err), 500)
+ return
+ }
+ w.Write(hresBody)
+ }
+
+ if r.URL.Path != "/rpc_http" {
+ http.NotFound(w, r)
+ return
+ }
+ hreqBody, err := ioutil.ReadAll(r.Body)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Bad body: %v", err), 500)
+ return
+ }
+ apiReq := &remotepb.Request{}
+ if err := proto.Unmarshal(hreqBody, apiReq); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded API request: %v", err), 500)
+ return
+ }
+ if *apiReq.RequestId != "s3cr3t" {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_SECURITY_VIOLATION)),
+ Detail: proto.String("bad security ticket"),
+ },
+ })
+ return
+ }
+ if got, want := r.Header.Get(dapperHeader), "trace-001"; got != want {
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_BAD_REQUEST)),
+ Detail: proto.String(fmt.Sprintf("trace info = %q, want %q", got, want)),
+ },
+ })
+ return
+ }
+
+ service, method := *apiReq.ServiceName, *apiReq.Method
+ var resOut proto.Message
+ if service == "actordb" && method == "LookupActor" {
+ req := &basepb.StringProto{}
+ res := &basepb.StringProto{}
+ if err := proto.Unmarshal(apiReq.Request, req); err != nil {
+ http.Error(w, fmt.Sprintf("Bad encoded request: %v", err), 500)
+ return
+ }
+ if *req.Value == "Doctor Who" {
+ res.Value = proto.String("David Tennant")
+ }
+ resOut = res
+ }
+ if service == "errors" {
+ switch method {
+ case "Non200":
+ http.Error(w, "I'm a little teapot.", 418)
+ return
+ case "ShortResponse":
+ w.Header().Set("Content-Length", "100")
+ w.Write([]byte("way too short"))
+ return
+ case "OverQuota":
+ writeResponse(&remotepb.Response{
+ RpcError: &remotepb.RpcError{
+ Code: proto.Int32(int32(remotepb.RpcError_OVER_QUOTA)),
+ Detail: proto.String("you are hogging the resources!"),
+ },
+ })
+ return
+ case "RunSlowly":
+ // TestAPICallRPCFailure creates f.hang, but does not strobe it
+ // until c.Call returns with remotepb.RpcError_CANCELLED.
+ // This is here to force a happens-before relationship between
+ // the httptest server handler and shutdown.
+ <-f.hang
+ resOut = &basepb.VoidProto{}
+ }
+ }
+ if service == "logservice" && method == "Flush" {
+ // Pretend log flushing is slow.
+ time.Sleep(50 * time.Millisecond)
+ atomic.AddInt32(&f.LogFlushes, 1)
+ resOut = &basepb.VoidProto{}
+ }
+
+ encOut, err := proto.Marshal(resOut)
+ if err != nil {
+ http.Error(w, fmt.Sprintf("Failed encoding response: %v", err), 500)
+ return
+ }
+ writeResponse(&remotepb.Response{
+ Response: encOut,
+ })
+}
+
+func setup() (f *fakeAPIHandler, c *context, cleanup func()) {
+ f = &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ parts := strings.SplitN(strings.TrimPrefix(srv.URL, "http://"), ":", 2)
+ os.Setenv("API_HOST", parts[0])
+ os.Setenv("API_PORT", parts[1])
+ return f, &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ }, func() {
+ srv.Close()
+ os.Setenv("API_HOST", "")
+ os.Setenv("API_PORT", "")
+ }
+}
+
+func TestAPICall(t *testing.T) {
+ _, c, cleanup := setup()
+ defer cleanup()
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ err := c.Call("actordb", "LookupActor", req, res, nil)
+ if err != nil {
+ t.Fatalf("API call failed: %v", err)
+ }
+ if got, want := *res.Value, "David Tennant"; got != want {
+ t.Errorf("Response is %q, want %q", got, want)
+ }
+}
+
+func TestAPICallRPCFailure(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ testCases := []struct {
+ method string
+ code remotepb.RpcError_ErrorCode
+ }{
+ {"Non200", remotepb.RpcError_UNKNOWN},
+ {"ShortResponse", remotepb.RpcError_UNKNOWN},
+ {"OverQuota", remotepb.RpcError_OVER_QUOTA},
+ {"RunSlowly", remotepb.RpcError_CANCELLED},
+ }
+ f.hang = make(chan int) // only for RunSlowly
+ for _, tc := range testCases {
+ opts := &CallOptions{
+ Timeout: 100 * time.Millisecond,
+ }
+ err := c.Call("errors", tc.method, &basepb.VoidProto{}, &basepb.VoidProto{}, opts)
+ ce, ok := err.(*CallError)
+ if !ok {
+ t.Errorf("%s: API call error is %T (%v), want *CallError", tc.method, err, err)
+ continue
+ }
+ if ce.Code != int32(tc.code) {
+ t.Errorf("%s: ce.Code = %d, want %d", tc.method, ce.Code, tc.code)
+ }
+ if tc.method == "RunSlowly" {
+ f.hang <- 1 // release the HTTP handler
+ }
+ }
+}
+
+func TestAPICallDialFailure(t *testing.T) {
+ // See what happens if the API host is unresponsive.
+ // This should time out quickly, not hang forever.
+ _, c, cleanup := setup()
+ defer cleanup()
+ os.Setenv("API_HOST", "")
+ os.Setenv("API_PORT", "")
+
+ start := time.Now()
+ err := c.Call("foo", "bar", &basepb.VoidProto{}, &basepb.VoidProto{}, nil)
+ const max = 1 * time.Second
+ if taken := time.Since(start); taken > max {
+ t.Errorf("Dial hang took too long: %v > %v", taken, max)
+ }
+ if err == nil {
+ t.Error("Call did not fail")
+ }
+}
+
+func TestDelayedLogFlushing(t *testing.T) {
+ f, c, cleanup := setup()
+ defer cleanup()
+
+ http.HandleFunc("/quick_log", func(w http.ResponseWriter, r *http.Request) {
+ c := NewContext(r)
+ c.Infof("It's a lovely day.")
+ w.WriteHeader(200)
+ w.Write(make([]byte, 100<<10)) // write 100 KB to force HTTP flush
+ })
+
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Path: "/quick_log",
+ },
+ Header: c.req.Header,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ w := httptest.NewRecorder()
+
+ // Check that log flushing does not hold up the HTTP response.
+ start := time.Now()
+ handleHTTP(w, r)
+ if d := time.Since(start); d > 10*time.Millisecond {
+ t.Errorf("handleHTTP took %v, want under 10ms", d)
+ }
+ const hdr = "X-AppEngine-Log-Flush-Count"
+ if h := w.HeaderMap.Get(hdr); h != "1" {
+ t.Errorf("%s header = %q, want %q", hdr, h, "1")
+ }
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 0 {
+ t.Errorf("After HTTP response: f.LogFlushes = %d, want 0", f)
+ }
+
+ // Check that the log flush eventually comes in.
+ time.Sleep(100 * time.Millisecond)
+ if f := atomic.LoadInt32(&f.LogFlushes); f != 1 {
+ t.Errorf("After 100ms: f.LogFlushes = %d, want 1", f)
+ }
+}
+
+func TestRemoteAddr(t *testing.T) {
+ var addr string
+ http.HandleFunc("/remote_addr", func(w http.ResponseWriter, r *http.Request) {
+ addr = r.RemoteAddr
+ })
+
+ testCases := []struct {
+ headers http.Header
+ addr string
+ }{
+ {http.Header{"X-Appengine-User-Ip": []string{"10.5.2.1"}}, "10.5.2.1:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4"}}, "1.2.3.4:80"},
+ {http.Header{"X-Appengine-Remote-Addr": []string{"1.2.3.4:8080"}}, "1.2.3.4:8080"},
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"2401:fa00:9:1:7646:a0ff:fe90:ca66"}},
+ "[2401:fa00:9:1:7646:a0ff:fe90:ca66]:80",
+ },
+ {
+ http.Header{"X-Appengine-Remote-Addr": []string{"[::1]:http"}},
+ "[::1]:http",
+ },
+ {http.Header{}, "127.0.0.1:80"},
+ }
+
+ for _, tc := range testCases {
+ r := &http.Request{
+ Method: "GET",
+ URL: &url.URL{Scheme: "http", Path: "/remote_addr"},
+ Header: tc.headers,
+ Body: ioutil.NopCloser(bytes.NewReader(nil)),
+ }
+ handleHTTP(httptest.NewRecorder(), r)
+ if addr != tc.addr {
+ t.Errorf("Header %v, got %q, want %q", tc.headers, addr, tc.addr)
+ }
+ }
+}
+
+var raceDetector = false
+
+func TestAPICallAllocations(t *testing.T) {
+ if raceDetector {
+ t.Skip("not running under race detector")
+ }
+
+ // Run the test API server in a subprocess so we aren't counting its allocations.
+ cleanup := launchHelperProcess(t)
+ defer cleanup()
+ c := &context{
+ req: &http.Request{
+ Header: http.Header{
+ ticketHeader: []string{"s3cr3t"},
+ dapperHeader: []string{"trace-001"},
+ },
+ },
+ }
+
+ req := &basepb.StringProto{
+ Value: proto.String("Doctor Who"),
+ }
+ res := &basepb.StringProto{}
+ opts := &CallOptions{
+ Timeout: 100 * time.Millisecond,
+ }
+ var apiErr error
+ avg := testing.AllocsPerRun(100, func() {
+ if err := c.Call("actordb", "LookupActor", req, res, opts); err != nil && apiErr == nil {
+ apiErr = err // get the first error only
+ }
+ })
+ if apiErr != nil {
+ t.Errorf("API call failed: %v", apiErr)
+ }
+
+ // Lots of room for improvement...
+ const min, max float64 = 75, 85
+ if avg < min || max < avg {
+ t.Errorf("Allocations per API call = %g, want in [%g,%g]", avg, min, max)
+ }
+}
+
+func launchHelperProcess(t *testing.T) (cleanup func()) {
+ cmd := exec.Command(os.Args[0], "-test.run=TestHelperProcess")
+ cmd.Env = []string{"GO_WANT_HELPER_PROCESS=1"}
+ stdin, err := cmd.StdinPipe()
+ if err != nil {
+ t.Fatalf("StdinPipe: %v", err)
+ }
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatalf("StdoutPipe: %v", err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatalf("Starting helper process: %v", err)
+ }
+
+ scan := bufio.NewScanner(stdout)
+ ok := false
+ for scan.Scan() {
+ line := scan.Text()
+ if hp := strings.TrimPrefix(line, helperProcessMagic); hp != line {
+ parts := strings.SplitN(hp, ":", 2)
+ os.Setenv("API_HOST", parts[0])
+ os.Setenv("API_PORT", parts[1])
+ ok = true
+ break
+ }
+ }
+ if err := scan.Err(); err != nil {
+ t.Fatalf("Scanning helper process stdout: %v", err)
+ }
+ if !ok {
+ t.Fatal("Helper process never reported")
+ }
+
+ return func() {
+ stdin.Close()
+ if err := cmd.Wait(); err != nil {
+ t.Errorf("Helper process did not exit cleanly: %v", err)
+ }
+ }
+}
+
+const helperProcessMagic = "A lovely helper process is listening at "
+
+// This isn't a real test. It's used as a helper process.
+func TestHelperProcess(*testing.T) {
+ if os.Getenv("GO_WANT_HELPER_PROCESS") != "1" {
+ return
+ }
+ defer os.Exit(0)
+
+ f := &fakeAPIHandler{}
+ srv := httptest.NewServer(f)
+ defer srv.Close()
+ fmt.Println(helperProcessMagic + strings.TrimPrefix(srv.URL, "http://"))
+
+ // Wait for stdin to be closed.
+ io.Copy(ioutil.Discard, os.Stdin)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id.go
new file mode 100644
index 000000000000..11df8c07b538
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id.go
@@ -0,0 +1,28 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "strings"
+)
+
+func parseFullAppID(appid string) (partition, domain, displayID string) {
+ if i := strings.Index(appid, "~"); i != -1 {
+ partition, appid = appid[:i], appid[i+1:]
+ }
+ if i := strings.Index(appid, ":"); i != -1 {
+ domain, appid = appid[:i], appid[i+1:]
+ }
+ return partition, domain, appid
+}
+
+// appID returns "appid" or "domain.com:appid".
+func appID(fullAppID string) string {
+ _, dom, dis := parseFullAppID(fullAppID)
+ if dom != "" {
+ return dom + ":" + dis
+ }
+ return dis
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id_test.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id_test.go
new file mode 100644
index 000000000000..e69195cd4055
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_id_test.go
@@ -0,0 +1,34 @@
+// Copyright 2011 Google Inc. All Rights Reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "testing"
+)
+
+func TestAppIDParsing(t *testing.T) {
+ testCases := []struct {
+ in string
+ partition, domain, displayID string
+ }{
+ {"simple-app-id", "", "", "simple-app-id"},
+ {"domain.com:domain-app-id", "", "domain.com", "domain-app-id"},
+ {"part~partition-app-id", "part", "", "partition-app-id"},
+ {"part~domain.com:display", "part", "domain.com", "display"},
+ }
+
+ for _, tc := range testCases {
+ part, dom, dis := parseFullAppID(tc.in)
+ if part != tc.partition {
+ t.Errorf("partition of %q: got %q, want %q", tc.in, part, tc.partition)
+ }
+ if dom != tc.domain {
+ t.Errorf("domain of %q: got %q, want %q", tc.in, dom, tc.domain)
+ }
+ if dis != tc.displayID {
+ t.Errorf("displayID of %q: got %q, want %q", tc.in, dis, tc.displayID)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
new file mode 100644
index 000000000000..a5f3bafc216f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.pb.go
@@ -0,0 +1,295 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+// DO NOT EDIT!
+
+/*
+Package app_identity is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/app_identity/app_identity_service.proto
+
+It has these top-level messages:
+ AppIdentityServiceError
+ SignForAppRequest
+ SignForAppResponse
+ GetPublicCertificateForAppRequest
+ PublicCertificate
+ GetPublicCertificateForAppResponse
+ GetServiceAccountNameRequest
+ GetServiceAccountNameResponse
+ GetAccessTokenRequest
+ GetAccessTokenResponse
+ GetDefaultGcsBucketNameRequest
+ GetDefaultGcsBucketNameResponse
+*/
+package app_identity
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type AppIdentityServiceError_ErrorCode int32
+
+const (
+ AppIdentityServiceError_SUCCESS AppIdentityServiceError_ErrorCode = 0
+ AppIdentityServiceError_UNKNOWN_SCOPE AppIdentityServiceError_ErrorCode = 9
+ AppIdentityServiceError_BLOB_TOO_LARGE AppIdentityServiceError_ErrorCode = 1000
+ AppIdentityServiceError_DEADLINE_EXCEEDED AppIdentityServiceError_ErrorCode = 1001
+ AppIdentityServiceError_NOT_A_VALID_APP AppIdentityServiceError_ErrorCode = 1002
+ AppIdentityServiceError_UNKNOWN_ERROR AppIdentityServiceError_ErrorCode = 1003
+ AppIdentityServiceError_NOT_ALLOWED AppIdentityServiceError_ErrorCode = 1005
+ AppIdentityServiceError_NOT_IMPLEMENTED AppIdentityServiceError_ErrorCode = 1006
+)
+
+var AppIdentityServiceError_ErrorCode_name = map[int32]string{
+ 0: "SUCCESS",
+ 9: "UNKNOWN_SCOPE",
+ 1000: "BLOB_TOO_LARGE",
+ 1001: "DEADLINE_EXCEEDED",
+ 1002: "NOT_A_VALID_APP",
+ 1003: "UNKNOWN_ERROR",
+ 1005: "NOT_ALLOWED",
+ 1006: "NOT_IMPLEMENTED",
+}
+var AppIdentityServiceError_ErrorCode_value = map[string]int32{
+ "SUCCESS": 0,
+ "UNKNOWN_SCOPE": 9,
+ "BLOB_TOO_LARGE": 1000,
+ "DEADLINE_EXCEEDED": 1001,
+ "NOT_A_VALID_APP": 1002,
+ "UNKNOWN_ERROR": 1003,
+ "NOT_ALLOWED": 1005,
+ "NOT_IMPLEMENTED": 1006,
+}
+
+func (x AppIdentityServiceError_ErrorCode) Enum() *AppIdentityServiceError_ErrorCode {
+ p := new(AppIdentityServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x AppIdentityServiceError_ErrorCode) String() string {
+ return proto.EnumName(AppIdentityServiceError_ErrorCode_name, int32(x))
+}
+func (x *AppIdentityServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(AppIdentityServiceError_ErrorCode_value, data, "AppIdentityServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = AppIdentityServiceError_ErrorCode(value)
+ return nil
+}
+
+type AppIdentityServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppIdentityServiceError) Reset() { *m = AppIdentityServiceError{} }
+func (m *AppIdentityServiceError) String() string { return proto.CompactTextString(m) }
+func (*AppIdentityServiceError) ProtoMessage() {}
+
+type SignForAppRequest struct {
+ BytesToSign []byte `protobuf:"bytes,1,opt,name=bytes_to_sign" json:"bytes_to_sign,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppRequest) Reset() { *m = SignForAppRequest{} }
+func (m *SignForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*SignForAppRequest) ProtoMessage() {}
+
+func (m *SignForAppRequest) GetBytesToSign() []byte {
+ if m != nil {
+ return m.BytesToSign
+ }
+ return nil
+}
+
+type SignForAppResponse struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ SignatureBytes []byte `protobuf:"bytes,2,opt,name=signature_bytes" json:"signature_bytes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SignForAppResponse) Reset() { *m = SignForAppResponse{} }
+func (m *SignForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*SignForAppResponse) ProtoMessage() {}
+
+func (m *SignForAppResponse) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *SignForAppResponse) GetSignatureBytes() []byte {
+ if m != nil {
+ return m.SignatureBytes
+ }
+ return nil
+}
+
+type GetPublicCertificateForAppRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppRequest) Reset() { *m = GetPublicCertificateForAppRequest{} }
+func (m *GetPublicCertificateForAppRequest) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppRequest) ProtoMessage() {}
+
+type PublicCertificate struct {
+ KeyName *string `protobuf:"bytes,1,opt,name=key_name" json:"key_name,omitempty"`
+ X509CertificatePem *string `protobuf:"bytes,2,opt,name=x509_certificate_pem" json:"x509_certificate_pem,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PublicCertificate) Reset() { *m = PublicCertificate{} }
+func (m *PublicCertificate) String() string { return proto.CompactTextString(m) }
+func (*PublicCertificate) ProtoMessage() {}
+
+func (m *PublicCertificate) GetKeyName() string {
+ if m != nil && m.KeyName != nil {
+ return *m.KeyName
+ }
+ return ""
+}
+
+func (m *PublicCertificate) GetX509CertificatePem() string {
+ if m != nil && m.X509CertificatePem != nil {
+ return *m.X509CertificatePem
+ }
+ return ""
+}
+
+type GetPublicCertificateForAppResponse struct {
+ PublicCertificateList []*PublicCertificate `protobuf:"bytes,1,rep,name=public_certificate_list" json:"public_certificate_list,omitempty"`
+ MaxClientCacheTimeInSecond *int64 `protobuf:"varint,2,opt,name=max_client_cache_time_in_second" json:"max_client_cache_time_in_second,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetPublicCertificateForAppResponse) Reset() { *m = GetPublicCertificateForAppResponse{} }
+func (m *GetPublicCertificateForAppResponse) String() string { return proto.CompactTextString(m) }
+func (*GetPublicCertificateForAppResponse) ProtoMessage() {}
+
+func (m *GetPublicCertificateForAppResponse) GetPublicCertificateList() []*PublicCertificate {
+ if m != nil {
+ return m.PublicCertificateList
+ }
+ return nil
+}
+
+func (m *GetPublicCertificateForAppResponse) GetMaxClientCacheTimeInSecond() int64 {
+ if m != nil && m.MaxClientCacheTimeInSecond != nil {
+ return *m.MaxClientCacheTimeInSecond
+ }
+ return 0
+}
+
+type GetServiceAccountNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameRequest) Reset() { *m = GetServiceAccountNameRequest{} }
+func (m *GetServiceAccountNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameRequest) ProtoMessage() {}
+
+type GetServiceAccountNameResponse struct {
+ ServiceAccountName *string `protobuf:"bytes,1,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetServiceAccountNameResponse) Reset() { *m = GetServiceAccountNameResponse{} }
+func (m *GetServiceAccountNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetServiceAccountNameResponse) ProtoMessage() {}
+
+func (m *GetServiceAccountNameResponse) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenRequest struct {
+ Scope []string `protobuf:"bytes,1,rep,name=scope" json:"scope,omitempty"`
+ ServiceAccountId *int64 `protobuf:"varint,2,opt,name=service_account_id" json:"service_account_id,omitempty"`
+ ServiceAccountName *string `protobuf:"bytes,3,opt,name=service_account_name" json:"service_account_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenRequest) Reset() { *m = GetAccessTokenRequest{} }
+func (m *GetAccessTokenRequest) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenRequest) ProtoMessage() {}
+
+func (m *GetAccessTokenRequest) GetScope() []string {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountId() int64 {
+ if m != nil && m.ServiceAccountId != nil {
+ return *m.ServiceAccountId
+ }
+ return 0
+}
+
+func (m *GetAccessTokenRequest) GetServiceAccountName() string {
+ if m != nil && m.ServiceAccountName != nil {
+ return *m.ServiceAccountName
+ }
+ return ""
+}
+
+type GetAccessTokenResponse struct {
+ AccessToken *string `protobuf:"bytes,1,opt,name=access_token" json:"access_token,omitempty"`
+ ExpirationTime *int64 `protobuf:"varint,2,opt,name=expiration_time" json:"expiration_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetAccessTokenResponse) Reset() { *m = GetAccessTokenResponse{} }
+func (m *GetAccessTokenResponse) String() string { return proto.CompactTextString(m) }
+func (*GetAccessTokenResponse) ProtoMessage() {}
+
+func (m *GetAccessTokenResponse) GetAccessToken() string {
+ if m != nil && m.AccessToken != nil {
+ return *m.AccessToken
+ }
+ return ""
+}
+
+func (m *GetAccessTokenResponse) GetExpirationTime() int64 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return 0
+}
+
+type GetDefaultGcsBucketNameRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameRequest) Reset() { *m = GetDefaultGcsBucketNameRequest{} }
+func (m *GetDefaultGcsBucketNameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameRequest) ProtoMessage() {}
+
+type GetDefaultGcsBucketNameResponse struct {
+ DefaultGcsBucketName *string `protobuf:"bytes,1,opt,name=default_gcs_bucket_name" json:"default_gcs_bucket_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultGcsBucketNameResponse) Reset() { *m = GetDefaultGcsBucketNameResponse{} }
+func (m *GetDefaultGcsBucketNameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultGcsBucketNameResponse) ProtoMessage() {}
+
+func (m *GetDefaultGcsBucketNameResponse) GetDefaultGcsBucketName() string {
+ if m != nil && m.DefaultGcsBucketName != nil {
+ return *m.DefaultGcsBucketName
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("appengine.AppIdentityServiceError_ErrorCode", AppIdentityServiceError_ErrorCode_name, AppIdentityServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
new file mode 100644
index 000000000000..19610ca5b753
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/app_identity/app_identity_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "app_identity";
+
+package appengine;
+
+message AppIdentityServiceError {
+ enum ErrorCode {
+ SUCCESS = 0;
+ UNKNOWN_SCOPE = 9;
+ BLOB_TOO_LARGE = 1000;
+ DEADLINE_EXCEEDED = 1001;
+ NOT_A_VALID_APP = 1002;
+ UNKNOWN_ERROR = 1003;
+ NOT_ALLOWED = 1005;
+ NOT_IMPLEMENTED = 1006;
+ }
+}
+
+message SignForAppRequest {
+ optional bytes bytes_to_sign = 1;
+}
+
+message SignForAppResponse {
+ optional string key_name = 1;
+ optional bytes signature_bytes = 2;
+}
+
+message GetPublicCertificateForAppRequest {
+}
+
+message PublicCertificate {
+ optional string key_name = 1;
+ optional string x509_certificate_pem = 2;
+}
+
+message GetPublicCertificateForAppResponse {
+ repeated PublicCertificate public_certificate_list = 1;
+ optional int64 max_client_cache_time_in_second = 2;
+}
+
+message GetServiceAccountNameRequest {
+}
+
+message GetServiceAccountNameResponse {
+ optional string service_account_name = 1;
+}
+
+message GetAccessTokenRequest {
+ repeated string scope = 1;
+ optional int64 service_account_id = 2;
+ optional string service_account_name = 3;
+}
+
+message GetAccessTokenResponse {
+ optional string access_token = 1;
+ optional int64 expiration_time = 2;
+}
+
+message GetDefaultGcsBucketNameRequest {
+}
+
+message GetDefaultGcsBucketNameResponse {
+ optional string default_gcs_bucket_name = 1;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.pb.go
new file mode 100644
index 000000000000..9ecc29b6361d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.pb.go
@@ -0,0 +1,134 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/base/api_base.proto
+// DO NOT EDIT!
+
+/*
+Package base is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/base/api_base.proto
+
+It has these top-level messages:
+ StringProto
+ Integer32Proto
+ Integer64Proto
+ BoolProto
+ DoubleProto
+ BytesProto
+ VoidProto
+*/
+package base
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type StringProto struct {
+ Value *string `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StringProto) Reset() { *m = StringProto{} }
+func (m *StringProto) String() string { return proto.CompactTextString(m) }
+func (*StringProto) ProtoMessage() {}
+
+func (m *StringProto) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Integer32Proto struct {
+ Value *int32 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer32Proto) Reset() { *m = Integer32Proto{} }
+func (m *Integer32Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer32Proto) ProtoMessage() {}
+
+func (m *Integer32Proto) GetValue() int32 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type Integer64Proto struct {
+ Value *int64 `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Integer64Proto) Reset() { *m = Integer64Proto{} }
+func (m *Integer64Proto) String() string { return proto.CompactTextString(m) }
+func (*Integer64Proto) ProtoMessage() {}
+
+func (m *Integer64Proto) GetValue() int64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BoolProto struct {
+ Value *bool `protobuf:"varint,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BoolProto) Reset() { *m = BoolProto{} }
+func (m *BoolProto) String() string { return proto.CompactTextString(m) }
+func (*BoolProto) ProtoMessage() {}
+
+func (m *BoolProto) GetValue() bool {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return false
+}
+
+type DoubleProto struct {
+ Value *float64 `protobuf:"fixed64,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DoubleProto) Reset() { *m = DoubleProto{} }
+func (m *DoubleProto) String() string { return proto.CompactTextString(m) }
+func (*DoubleProto) ProtoMessage() {}
+
+func (m *DoubleProto) GetValue() float64 {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return 0
+}
+
+type BytesProto struct {
+ Value []byte `protobuf:"bytes,1,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BytesProto) Reset() { *m = BytesProto{} }
+func (m *BytesProto) String() string { return proto.CompactTextString(m) }
+func (*BytesProto) ProtoMessage() {}
+
+func (m *BytesProto) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type VoidProto struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *VoidProto) Reset() { *m = VoidProto{} }
+func (m *VoidProto) String() string { return proto.CompactTextString(m) }
+func (*VoidProto) ProtoMessage() {}
+
+func init() {
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.proto
new file mode 100644
index 000000000000..56cd7a3cad05
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/base/api_base.proto
@@ -0,0 +1,33 @@
+// Built-in base types for API calls. Primarily useful as return types.
+
+syntax = "proto2";
+option go_package = "base";
+
+package appengine.base;
+
+message StringProto {
+ required string value = 1;
+}
+
+message Integer32Proto {
+ required int32 value = 1;
+}
+
+message Integer64Proto {
+ required int64 value = 1;
+}
+
+message BoolProto {
+ required bool value = 1;
+}
+
+message DoubleProto {
+ required double value = 1;
+}
+
+message BytesProto {
+ required bytes value = 1 [ctype=CORD];
+}
+
+message VoidProto {
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.pb.go
new file mode 100644
index 000000000000..1b5a6257123e
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.pb.go
@@ -0,0 +1,153 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/channel/channel_service.proto
+// DO NOT EDIT!
+
+/*
+Package channel is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/channel/channel_service.proto
+
+It has these top-level messages:
+ ChannelServiceError
+ CreateChannelRequest
+ CreateChannelResponse
+ SendMessageRequest
+*/
+package channel
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ChannelServiceError_ErrorCode int32
+
+const (
+ ChannelServiceError_OK ChannelServiceError_ErrorCode = 0
+ ChannelServiceError_INTERNAL_ERROR ChannelServiceError_ErrorCode = 1
+ ChannelServiceError_INVALID_CHANNEL_KEY ChannelServiceError_ErrorCode = 2
+ ChannelServiceError_BAD_MESSAGE ChannelServiceError_ErrorCode = 3
+ ChannelServiceError_INVALID_CHANNEL_TOKEN_DURATION ChannelServiceError_ErrorCode = 4
+ ChannelServiceError_APPID_ALIAS_REQUIRED ChannelServiceError_ErrorCode = 5
+)
+
+var ChannelServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "INVALID_CHANNEL_KEY",
+ 3: "BAD_MESSAGE",
+ 4: "INVALID_CHANNEL_TOKEN_DURATION",
+ 5: "APPID_ALIAS_REQUIRED",
+}
+var ChannelServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "INVALID_CHANNEL_KEY": 2,
+ "BAD_MESSAGE": 3,
+ "INVALID_CHANNEL_TOKEN_DURATION": 4,
+ "APPID_ALIAS_REQUIRED": 5,
+}
+
+func (x ChannelServiceError_ErrorCode) Enum() *ChannelServiceError_ErrorCode {
+ p := new(ChannelServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ChannelServiceError_ErrorCode) String() string {
+ return proto.EnumName(ChannelServiceError_ErrorCode_name, int32(x))
+}
+func (x *ChannelServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ChannelServiceError_ErrorCode_value, data, "ChannelServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ChannelServiceError_ErrorCode(value)
+ return nil
+}
+
+type ChannelServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ChannelServiceError) Reset() { *m = ChannelServiceError{} }
+func (m *ChannelServiceError) String() string { return proto.CompactTextString(m) }
+func (*ChannelServiceError) ProtoMessage() {}
+
+type CreateChannelRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,2,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelRequest) Reset() { *m = CreateChannelRequest{} }
+func (m *CreateChannelRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelRequest) ProtoMessage() {}
+
+func (m *CreateChannelRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *CreateChannelRequest) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type CreateChannelResponse struct {
+ Token *string `protobuf:"bytes,2,opt,name=token" json:"token,omitempty"`
+ DurationMinutes *int32 `protobuf:"varint,3,opt,name=duration_minutes" json:"duration_minutes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateChannelResponse) Reset() { *m = CreateChannelResponse{} }
+func (m *CreateChannelResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateChannelResponse) ProtoMessage() {}
+
+func (m *CreateChannelResponse) GetToken() string {
+ if m != nil && m.Token != nil {
+ return *m.Token
+ }
+ return ""
+}
+
+func (m *CreateChannelResponse) GetDurationMinutes() int32 {
+ if m != nil && m.DurationMinutes != nil {
+ return *m.DurationMinutes
+ }
+ return 0
+}
+
+type SendMessageRequest struct {
+ ApplicationKey *string `protobuf:"bytes,1,req,name=application_key" json:"application_key,omitempty"`
+ Message *string `protobuf:"bytes,2,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SendMessageRequest) Reset() { *m = SendMessageRequest{} }
+func (m *SendMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*SendMessageRequest) ProtoMessage() {}
+
+func (m *SendMessageRequest) GetApplicationKey() string {
+ if m != nil && m.ApplicationKey != nil {
+ return *m.ApplicationKey
+ }
+ return ""
+}
+
+func (m *SendMessageRequest) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("appengine.ChannelServiceError_ErrorCode", ChannelServiceError_ErrorCode_name, ChannelServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.proto
new file mode 100644
index 000000000000..2b5a918ca652
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/channel/channel_service.proto
@@ -0,0 +1,30 @@
+syntax = "proto2";
+option go_package = "channel";
+
+package appengine;
+
+message ChannelServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ INVALID_CHANNEL_KEY = 2;
+ BAD_MESSAGE = 3;
+ INVALID_CHANNEL_TOKEN_DURATION = 4;
+ APPID_ALIAS_REQUIRED = 5;
+ }
+}
+
+message CreateChannelRequest {
+ required string application_key = 1;
+ optional int32 duration_minutes = 2;
+}
+
+message CreateChannelResponse {
+ optional string token = 2;
+ optional int32 duration_minutes = 3;
+}
+
+message SendMessageRequest {
+ required string application_key = 1;
+ required string message = 2;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
new file mode 100644
index 000000000000..f4fef0de0f9e
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.pb.go
@@ -0,0 +1,2787 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/datastore/datastore_v3.proto
+// DO NOT EDIT!
+
+/*
+Package datastore is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/datastore/datastore_v3.proto
+
+It has these top-level messages:
+ Action
+ PropertyValue
+ Property
+ Path
+ Reference
+ User
+ EntityProto
+ CompositeProperty
+ Index
+ CompositeIndex
+ IndexPostfix
+ IndexPosition
+ Snapshot
+ InternalHeader
+ Transaction
+ Query
+ CompiledQuery
+ CompiledCursor
+ Cursor
+ Error
+ Cost
+ GetRequest
+ GetResponse
+ PutRequest
+ PutResponse
+ TouchRequest
+ TouchResponse
+ DeleteRequest
+ DeleteResponse
+ NextRequest
+ QueryResult
+ AllocateIdsRequest
+ AllocateIdsResponse
+ CompositeIndices
+ AddActionsRequest
+ AddActionsResponse
+ BeginTransactionRequest
+ CommitResponse
+*/
+package datastore
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type Property_Meaning int32
+
+const (
+ Property_NO_MEANING Property_Meaning = 0
+ Property_BLOB Property_Meaning = 14
+ Property_TEXT Property_Meaning = 15
+ Property_BYTESTRING Property_Meaning = 16
+ Property_ATOM_CATEGORY Property_Meaning = 1
+ Property_ATOM_LINK Property_Meaning = 2
+ Property_ATOM_TITLE Property_Meaning = 3
+ Property_ATOM_CONTENT Property_Meaning = 4
+ Property_ATOM_SUMMARY Property_Meaning = 5
+ Property_ATOM_AUTHOR Property_Meaning = 6
+ Property_GD_WHEN Property_Meaning = 7
+ Property_GD_EMAIL Property_Meaning = 8
+ Property_GEORSS_POINT Property_Meaning = 9
+ Property_GD_IM Property_Meaning = 10
+ Property_GD_PHONENUMBER Property_Meaning = 11
+ Property_GD_POSTALADDRESS Property_Meaning = 12
+ Property_GD_RATING Property_Meaning = 13
+ Property_BLOBKEY Property_Meaning = 17
+ Property_ENTITY_PROTO Property_Meaning = 19
+ Property_INDEX_VALUE Property_Meaning = 18
+)
+
+var Property_Meaning_name = map[int32]string{
+ 0: "NO_MEANING",
+ 14: "BLOB",
+ 15: "TEXT",
+ 16: "BYTESTRING",
+ 1: "ATOM_CATEGORY",
+ 2: "ATOM_LINK",
+ 3: "ATOM_TITLE",
+ 4: "ATOM_CONTENT",
+ 5: "ATOM_SUMMARY",
+ 6: "ATOM_AUTHOR",
+ 7: "GD_WHEN",
+ 8: "GD_EMAIL",
+ 9: "GEORSS_POINT",
+ 10: "GD_IM",
+ 11: "GD_PHONENUMBER",
+ 12: "GD_POSTALADDRESS",
+ 13: "GD_RATING",
+ 17: "BLOBKEY",
+ 19: "ENTITY_PROTO",
+ 18: "INDEX_VALUE",
+}
+var Property_Meaning_value = map[string]int32{
+ "NO_MEANING": 0,
+ "BLOB": 14,
+ "TEXT": 15,
+ "BYTESTRING": 16,
+ "ATOM_CATEGORY": 1,
+ "ATOM_LINK": 2,
+ "ATOM_TITLE": 3,
+ "ATOM_CONTENT": 4,
+ "ATOM_SUMMARY": 5,
+ "ATOM_AUTHOR": 6,
+ "GD_WHEN": 7,
+ "GD_EMAIL": 8,
+ "GEORSS_POINT": 9,
+ "GD_IM": 10,
+ "GD_PHONENUMBER": 11,
+ "GD_POSTALADDRESS": 12,
+ "GD_RATING": 13,
+ "BLOBKEY": 17,
+ "ENTITY_PROTO": 19,
+ "INDEX_VALUE": 18,
+}
+
+func (x Property_Meaning) Enum() *Property_Meaning {
+ p := new(Property_Meaning)
+ *p = x
+ return p
+}
+func (x Property_Meaning) String() string {
+ return proto.EnumName(Property_Meaning_name, int32(x))
+}
+func (x *Property_Meaning) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_Meaning_value, data, "Property_Meaning")
+ if err != nil {
+ return err
+ }
+ *x = Property_Meaning(value)
+ return nil
+}
+
+type Property_FtsTokenizationOption int32
+
+const (
+ Property_HTML Property_FtsTokenizationOption = 1
+ Property_ATOM Property_FtsTokenizationOption = 2
+)
+
+var Property_FtsTokenizationOption_name = map[int32]string{
+ 1: "HTML",
+ 2: "ATOM",
+}
+var Property_FtsTokenizationOption_value = map[string]int32{
+ "HTML": 1,
+ "ATOM": 2,
+}
+
+func (x Property_FtsTokenizationOption) Enum() *Property_FtsTokenizationOption {
+ p := new(Property_FtsTokenizationOption)
+ *p = x
+ return p
+}
+func (x Property_FtsTokenizationOption) String() string {
+ return proto.EnumName(Property_FtsTokenizationOption_name, int32(x))
+}
+func (x *Property_FtsTokenizationOption) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Property_FtsTokenizationOption_value, data, "Property_FtsTokenizationOption")
+ if err != nil {
+ return err
+ }
+ *x = Property_FtsTokenizationOption(value)
+ return nil
+}
+
+type EntityProto_Kind int32
+
+const (
+ EntityProto_GD_CONTACT EntityProto_Kind = 1
+ EntityProto_GD_EVENT EntityProto_Kind = 2
+ EntityProto_GD_MESSAGE EntityProto_Kind = 3
+)
+
+var EntityProto_Kind_name = map[int32]string{
+ 1: "GD_CONTACT",
+ 2: "GD_EVENT",
+ 3: "GD_MESSAGE",
+}
+var EntityProto_Kind_value = map[string]int32{
+ "GD_CONTACT": 1,
+ "GD_EVENT": 2,
+ "GD_MESSAGE": 3,
+}
+
+func (x EntityProto_Kind) Enum() *EntityProto_Kind {
+ p := new(EntityProto_Kind)
+ *p = x
+ return p
+}
+func (x EntityProto_Kind) String() string {
+ return proto.EnumName(EntityProto_Kind_name, int32(x))
+}
+func (x *EntityProto_Kind) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityProto_Kind_value, data, "EntityProto_Kind")
+ if err != nil {
+ return err
+ }
+ *x = EntityProto_Kind(value)
+ return nil
+}
+
+type Index_Property_Direction int32
+
+const (
+ Index_Property_ASCENDING Index_Property_Direction = 1
+ Index_Property_DESCENDING Index_Property_Direction = 2
+)
+
+var Index_Property_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Index_Property_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Index_Property_Direction) Enum() *Index_Property_Direction {
+ p := new(Index_Property_Direction)
+ *p = x
+ return p
+}
+func (x Index_Property_Direction) String() string {
+ return proto.EnumName(Index_Property_Direction_name, int32(x))
+}
+func (x *Index_Property_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Index_Property_Direction_value, data, "Index_Property_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Index_Property_Direction(value)
+ return nil
+}
+
+type CompositeIndex_State int32
+
+const (
+ CompositeIndex_WRITE_ONLY CompositeIndex_State = 1
+ CompositeIndex_READ_WRITE CompositeIndex_State = 2
+ CompositeIndex_DELETED CompositeIndex_State = 3
+ CompositeIndex_ERROR CompositeIndex_State = 4
+)
+
+var CompositeIndex_State_name = map[int32]string{
+ 1: "WRITE_ONLY",
+ 2: "READ_WRITE",
+ 3: "DELETED",
+ 4: "ERROR",
+}
+var CompositeIndex_State_value = map[string]int32{
+ "WRITE_ONLY": 1,
+ "READ_WRITE": 2,
+ "DELETED": 3,
+ "ERROR": 4,
+}
+
+func (x CompositeIndex_State) Enum() *CompositeIndex_State {
+ p := new(CompositeIndex_State)
+ *p = x
+ return p
+}
+func (x CompositeIndex_State) String() string {
+ return proto.EnumName(CompositeIndex_State_name, int32(x))
+}
+func (x *CompositeIndex_State) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeIndex_State_value, data, "CompositeIndex_State")
+ if err != nil {
+ return err
+ }
+ *x = CompositeIndex_State(value)
+ return nil
+}
+
+type Snapshot_Status int32
+
+const (
+ Snapshot_INACTIVE Snapshot_Status = 0
+ Snapshot_ACTIVE Snapshot_Status = 1
+)
+
+var Snapshot_Status_name = map[int32]string{
+ 0: "INACTIVE",
+ 1: "ACTIVE",
+}
+var Snapshot_Status_value = map[string]int32{
+ "INACTIVE": 0,
+ "ACTIVE": 1,
+}
+
+func (x Snapshot_Status) Enum() *Snapshot_Status {
+ p := new(Snapshot_Status)
+ *p = x
+ return p
+}
+func (x Snapshot_Status) String() string {
+ return proto.EnumName(Snapshot_Status_name, int32(x))
+}
+func (x *Snapshot_Status) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Snapshot_Status_value, data, "Snapshot_Status")
+ if err != nil {
+ return err
+ }
+ *x = Snapshot_Status(value)
+ return nil
+}
+
+type Query_Hint int32
+
+const (
+ Query_ORDER_FIRST Query_Hint = 1
+ Query_ANCESTOR_FIRST Query_Hint = 2
+ Query_FILTER_FIRST Query_Hint = 3
+)
+
+var Query_Hint_name = map[int32]string{
+ 1: "ORDER_FIRST",
+ 2: "ANCESTOR_FIRST",
+ 3: "FILTER_FIRST",
+}
+var Query_Hint_value = map[string]int32{
+ "ORDER_FIRST": 1,
+ "ANCESTOR_FIRST": 2,
+ "FILTER_FIRST": 3,
+}
+
+func (x Query_Hint) Enum() *Query_Hint {
+ p := new(Query_Hint)
+ *p = x
+ return p
+}
+func (x Query_Hint) String() string {
+ return proto.EnumName(Query_Hint_name, int32(x))
+}
+func (x *Query_Hint) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Hint_value, data, "Query_Hint")
+ if err != nil {
+ return err
+ }
+ *x = Query_Hint(value)
+ return nil
+}
+
+type Query_Filter_Operator int32
+
+const (
+ Query_Filter_LESS_THAN Query_Filter_Operator = 1
+ Query_Filter_LESS_THAN_OR_EQUAL Query_Filter_Operator = 2
+ Query_Filter_GREATER_THAN Query_Filter_Operator = 3
+ Query_Filter_GREATER_THAN_OR_EQUAL Query_Filter_Operator = 4
+ Query_Filter_EQUAL Query_Filter_Operator = 5
+ Query_Filter_IN Query_Filter_Operator = 6
+ Query_Filter_EXISTS Query_Filter_Operator = 7
+)
+
+var Query_Filter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 6: "IN",
+ 7: "EXISTS",
+}
+var Query_Filter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "IN": 6,
+ "EXISTS": 7,
+}
+
+func (x Query_Filter_Operator) Enum() *Query_Filter_Operator {
+ p := new(Query_Filter_Operator)
+ *p = x
+ return p
+}
+func (x Query_Filter_Operator) String() string {
+ return proto.EnumName(Query_Filter_Operator_name, int32(x))
+}
+func (x *Query_Filter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Filter_Operator_value, data, "Query_Filter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = Query_Filter_Operator(value)
+ return nil
+}
+
+type Query_Order_Direction int32
+
+const (
+ Query_Order_ASCENDING Query_Order_Direction = 1
+ Query_Order_DESCENDING Query_Order_Direction = 2
+)
+
+var Query_Order_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var Query_Order_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x Query_Order_Direction) Enum() *Query_Order_Direction {
+ p := new(Query_Order_Direction)
+ *p = x
+ return p
+}
+func (x Query_Order_Direction) String() string {
+ return proto.EnumName(Query_Order_Direction_name, int32(x))
+}
+func (x *Query_Order_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Query_Order_Direction_value, data, "Query_Order_Direction")
+ if err != nil {
+ return err
+ }
+ *x = Query_Order_Direction(value)
+ return nil
+}
+
+type Error_ErrorCode int32
+
+const (
+ Error_BAD_REQUEST Error_ErrorCode = 1
+ Error_CONCURRENT_TRANSACTION Error_ErrorCode = 2
+ Error_INTERNAL_ERROR Error_ErrorCode = 3
+ Error_NEED_INDEX Error_ErrorCode = 4
+ Error_TIMEOUT Error_ErrorCode = 5
+ Error_PERMISSION_DENIED Error_ErrorCode = 6
+ Error_BIGTABLE_ERROR Error_ErrorCode = 7
+ Error_COMMITTED_BUT_STILL_APPLYING Error_ErrorCode = 8
+ Error_CAPABILITY_DISABLED Error_ErrorCode = 9
+ Error_TRY_ALTERNATE_BACKEND Error_ErrorCode = 10
+ Error_SAFE_TIME_TOO_OLD Error_ErrorCode = 11
+)
+
+var Error_ErrorCode_name = map[int32]string{
+ 1: "BAD_REQUEST",
+ 2: "CONCURRENT_TRANSACTION",
+ 3: "INTERNAL_ERROR",
+ 4: "NEED_INDEX",
+ 5: "TIMEOUT",
+ 6: "PERMISSION_DENIED",
+ 7: "BIGTABLE_ERROR",
+ 8: "COMMITTED_BUT_STILL_APPLYING",
+ 9: "CAPABILITY_DISABLED",
+ 10: "TRY_ALTERNATE_BACKEND",
+ 11: "SAFE_TIME_TOO_OLD",
+}
+var Error_ErrorCode_value = map[string]int32{
+ "BAD_REQUEST": 1,
+ "CONCURRENT_TRANSACTION": 2,
+ "INTERNAL_ERROR": 3,
+ "NEED_INDEX": 4,
+ "TIMEOUT": 5,
+ "PERMISSION_DENIED": 6,
+ "BIGTABLE_ERROR": 7,
+ "COMMITTED_BUT_STILL_APPLYING": 8,
+ "CAPABILITY_DISABLED": 9,
+ "TRY_ALTERNATE_BACKEND": 10,
+ "SAFE_TIME_TOO_OLD": 11,
+}
+
+func (x Error_ErrorCode) Enum() *Error_ErrorCode {
+ p := new(Error_ErrorCode)
+ *p = x
+ return p
+}
+func (x Error_ErrorCode) String() string {
+ return proto.EnumName(Error_ErrorCode_name, int32(x))
+}
+func (x *Error_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Error_ErrorCode_value, data, "Error_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = Error_ErrorCode(value)
+ return nil
+}
+
+type PutRequest_AutoIdPolicy int32
+
+const (
+ PutRequest_CURRENT PutRequest_AutoIdPolicy = 0
+ PutRequest_SEQUENTIAL PutRequest_AutoIdPolicy = 1
+)
+
+var PutRequest_AutoIdPolicy_name = map[int32]string{
+ 0: "CURRENT",
+ 1: "SEQUENTIAL",
+}
+var PutRequest_AutoIdPolicy_value = map[string]int32{
+ "CURRENT": 0,
+ "SEQUENTIAL": 1,
+}
+
+func (x PutRequest_AutoIdPolicy) Enum() *PutRequest_AutoIdPolicy {
+ p := new(PutRequest_AutoIdPolicy)
+ *p = x
+ return p
+}
+func (x PutRequest_AutoIdPolicy) String() string {
+ return proto.EnumName(PutRequest_AutoIdPolicy_name, int32(x))
+}
+func (x *PutRequest_AutoIdPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PutRequest_AutoIdPolicy_value, data, "PutRequest_AutoIdPolicy")
+ if err != nil {
+ return err
+ }
+ *x = PutRequest_AutoIdPolicy(value)
+ return nil
+}
+
+type Action struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Action) Reset() { *m = Action{} }
+func (m *Action) String() string { return proto.CompactTextString(m) }
+func (*Action) ProtoMessage() {}
+
+type PropertyValue struct {
+ Int64Value *int64 `protobuf:"varint,1,opt,name=int64Value" json:"int64Value,omitempty"`
+ BooleanValue *bool `protobuf:"varint,2,opt,name=booleanValue" json:"booleanValue,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=stringValue" json:"stringValue,omitempty"`
+ DoubleValue *float64 `protobuf:"fixed64,4,opt,name=doubleValue" json:"doubleValue,omitempty"`
+ Pointvalue *PropertyValue_PointValue `protobuf:"group,5,opt,name=PointValue" json:"pointvalue,omitempty"`
+ Uservalue *PropertyValue_UserValue `protobuf:"group,8,opt,name=UserValue" json:"uservalue,omitempty"`
+ Referencevalue *PropertyValue_ReferenceValue `protobuf:"group,12,opt,name=ReferenceValue" json:"referencevalue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue) Reset() { *m = PropertyValue{} }
+func (m *PropertyValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue) ProtoMessage() {}
+
+func (m *PropertyValue) GetInt64Value() int64 {
+ if m != nil && m.Int64Value != nil {
+ return *m.Int64Value
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *PropertyValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *PropertyValue) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *PropertyValue) GetPointvalue() *PropertyValue_PointValue {
+ if m != nil {
+ return m.Pointvalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetUservalue() *PropertyValue_UserValue {
+ if m != nil {
+ return m.Uservalue
+ }
+ return nil
+}
+
+func (m *PropertyValue) GetReferencevalue() *PropertyValue_ReferenceValue {
+ if m != nil {
+ return m.Referencevalue
+ }
+ return nil
+}
+
+type PropertyValue_PointValue struct {
+ X *float64 `protobuf:"fixed64,6,req,name=x" json:"x,omitempty"`
+ Y *float64 `protobuf:"fixed64,7,req,name=y" json:"y,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_PointValue) Reset() { *m = PropertyValue_PointValue{} }
+func (m *PropertyValue_PointValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_PointValue) ProtoMessage() {}
+
+func (m *PropertyValue_PointValue) GetX() float64 {
+ if m != nil && m.X != nil {
+ return *m.X
+ }
+ return 0
+}
+
+func (m *PropertyValue_PointValue) GetY() float64 {
+ if m != nil && m.Y != nil {
+ return *m.Y
+ }
+ return 0
+}
+
+type PropertyValue_UserValue struct {
+ Email *string `protobuf:"bytes,9,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,10,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,11,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,21,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,22,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_UserValue) Reset() { *m = PropertyValue_UserValue{} }
+func (m *PropertyValue_UserValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_UserValue) ProtoMessage() {}
+
+func (m *PropertyValue_UserValue) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *PropertyValue_UserValue) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type PropertyValue_ReferenceValue struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Pathelement []*PropertyValue_ReferenceValue_PathElement `protobuf:"group,14,rep,name=PathElement" json:"pathelement,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue) Reset() { *m = PropertyValue_ReferenceValue{} }
+func (m *PropertyValue_ReferenceValue) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue) GetPathelement() []*PropertyValue_ReferenceValue_PathElement {
+ if m != nil {
+ return m.Pathelement
+ }
+ return nil
+}
+
+type PropertyValue_ReferenceValue_PathElement struct {
+ Type *string `protobuf:"bytes,15,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,16,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,17,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) Reset() {
+ *m = PropertyValue_ReferenceValue_PathElement{}
+}
+func (m *PropertyValue_ReferenceValue_PathElement) String() string { return proto.CompactTextString(m) }
+func (*PropertyValue_ReferenceValue_PathElement) ProtoMessage() {}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *PropertyValue_ReferenceValue_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Property struct {
+ Meaning *Property_Meaning `protobuf:"varint,1,opt,name=meaning,enum=appengine.Property_Meaning,def=0" json:"meaning,omitempty"`
+ MeaningUri *string `protobuf:"bytes,2,opt,name=meaning_uri" json:"meaning_uri,omitempty"`
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,5,req,name=value" json:"value,omitempty"`
+ Multiple *bool `protobuf:"varint,4,req,name=multiple" json:"multiple,omitempty"`
+ Searchable *bool `protobuf:"varint,6,opt,name=searchable,def=0" json:"searchable,omitempty"`
+ FtsTokenizationOption *Property_FtsTokenizationOption `protobuf:"varint,8,opt,name=fts_tokenization_option,enum=appengine.Property_FtsTokenizationOption" json:"fts_tokenization_option,omitempty"`
+ Locale *string `protobuf:"bytes,9,opt,name=locale,def=en" json:"locale,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+const Default_Property_Meaning Property_Meaning = Property_NO_MEANING
+const Default_Property_Searchable bool = false
+const Default_Property_Locale string = "en"
+
+func (m *Property) GetMeaning() Property_Meaning {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return Default_Property_Meaning
+}
+
+func (m *Property) GetMeaningUri() string {
+ if m != nil && m.MeaningUri != nil {
+ return *m.MeaningUri
+ }
+ return ""
+}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *Property) GetMultiple() bool {
+ if m != nil && m.Multiple != nil {
+ return *m.Multiple
+ }
+ return false
+}
+
+func (m *Property) GetSearchable() bool {
+ if m != nil && m.Searchable != nil {
+ return *m.Searchable
+ }
+ return Default_Property_Searchable
+}
+
+func (m *Property) GetFtsTokenizationOption() Property_FtsTokenizationOption {
+ if m != nil && m.FtsTokenizationOption != nil {
+ return *m.FtsTokenizationOption
+ }
+ return Property_HTML
+}
+
+func (m *Property) GetLocale() string {
+ if m != nil && m.Locale != nil {
+ return *m.Locale
+ }
+ return Default_Property_Locale
+}
+
+type Path struct {
+ Element []*Path_Element `protobuf:"group,1,rep" json:"element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path) Reset() { *m = Path{} }
+func (m *Path) String() string { return proto.CompactTextString(m) }
+func (*Path) ProtoMessage() {}
+
+func (m *Path) GetElement() []*Path_Element {
+ if m != nil {
+ return m.Element
+ }
+ return nil
+}
+
+type Path_Element struct {
+ Type *string `protobuf:"bytes,2,req,name=type" json:"type,omitempty"`
+ Id *int64 `protobuf:"varint,3,opt,name=id" json:"id,omitempty"`
+ Name *string `protobuf:"bytes,4,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Path_Element) Reset() { *m = Path_Element{} }
+func (m *Path_Element) String() string { return proto.CompactTextString(m) }
+func (*Path_Element) ProtoMessage() {}
+
+func (m *Path_Element) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *Path_Element) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Path_Element) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+type Reference struct {
+ App *string `protobuf:"bytes,13,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,20,opt,name=name_space" json:"name_space,omitempty"`
+ Path *Path `protobuf:"bytes,14,req,name=path" json:"path,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Reference) Reset() { *m = Reference{} }
+func (m *Reference) String() string { return proto.CompactTextString(m) }
+func (*Reference) ProtoMessage() {}
+
+func (m *Reference) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Reference) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Reference) GetPath() *Path {
+ if m != nil {
+ return m.Path
+ }
+ return nil
+}
+
+type User struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,req,name=auth_domain" json:"auth_domain,omitempty"`
+ Nickname *string `protobuf:"bytes,3,opt,name=nickname" json:"nickname,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,6,opt,name=federated_identity" json:"federated_identity,omitempty"`
+ FederatedProvider *string `protobuf:"bytes,7,opt,name=federated_provider" json:"federated_provider,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *User) Reset() { *m = User{} }
+func (m *User) String() string { return proto.CompactTextString(m) }
+func (*User) ProtoMessage() {}
+
+func (m *User) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *User) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *User) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *User) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+func (m *User) GetFederatedProvider() string {
+ if m != nil && m.FederatedProvider != nil {
+ return *m.FederatedProvider
+ }
+ return ""
+}
+
+type EntityProto struct {
+ Key *Reference `protobuf:"bytes,13,req,name=key" json:"key,omitempty"`
+ EntityGroup *Path `protobuf:"bytes,16,req,name=entity_group" json:"entity_group,omitempty"`
+ Owner *User `protobuf:"bytes,17,opt,name=owner" json:"owner,omitempty"`
+ Kind *EntityProto_Kind `protobuf:"varint,4,opt,name=kind,enum=appengine.EntityProto_Kind" json:"kind,omitempty"`
+ KindUri *string `protobuf:"bytes,5,opt,name=kind_uri" json:"kind_uri,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ RawProperty []*Property `protobuf:"bytes,15,rep,name=raw_property" json:"raw_property,omitempty"`
+ Rank *int32 `protobuf:"varint,18,opt,name=rank" json:"rank,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityProto) Reset() { *m = EntityProto{} }
+func (m *EntityProto) String() string { return proto.CompactTextString(m) }
+func (*EntityProto) ProtoMessage() {}
+
+func (m *EntityProto) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *EntityProto) GetEntityGroup() *Path {
+ if m != nil {
+ return m.EntityGroup
+ }
+ return nil
+}
+
+func (m *EntityProto) GetOwner() *User {
+ if m != nil {
+ return m.Owner
+ }
+ return nil
+}
+
+func (m *EntityProto) GetKind() EntityProto_Kind {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return EntityProto_GD_CONTACT
+}
+
+func (m *EntityProto) GetKindUri() string {
+ if m != nil && m.KindUri != nil {
+ return *m.KindUri
+ }
+ return ""
+}
+
+func (m *EntityProto) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRawProperty() []*Property {
+ if m != nil {
+ return m.RawProperty
+ }
+ return nil
+}
+
+func (m *EntityProto) GetRank() int32 {
+ if m != nil && m.Rank != nil {
+ return *m.Rank
+ }
+ return 0
+}
+
+type CompositeProperty struct {
+ IndexId *int64 `protobuf:"varint,1,req,name=index_id" json:"index_id,omitempty"`
+ Value []string `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeProperty) Reset() { *m = CompositeProperty{} }
+func (m *CompositeProperty) String() string { return proto.CompactTextString(m) }
+func (*CompositeProperty) ProtoMessage() {}
+
+func (m *CompositeProperty) GetIndexId() int64 {
+ if m != nil && m.IndexId != nil {
+ return *m.IndexId
+ }
+ return 0
+}
+
+func (m *CompositeProperty) GetValue() []string {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Index struct {
+ EntityType *string `protobuf:"bytes,1,req,name=entity_type" json:"entity_type,omitempty"`
+ Ancestor *bool `protobuf:"varint,5,req,name=ancestor" json:"ancestor,omitempty"`
+ Property []*Index_Property `protobuf:"group,2,rep" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index) Reset() { *m = Index{} }
+func (m *Index) String() string { return proto.CompactTextString(m) }
+func (*Index) ProtoMessage() {}
+
+func (m *Index) GetEntityType() string {
+ if m != nil && m.EntityType != nil {
+ return *m.EntityType
+ }
+ return ""
+}
+
+func (m *Index) GetAncestor() bool {
+ if m != nil && m.Ancestor != nil {
+ return *m.Ancestor
+ }
+ return false
+}
+
+func (m *Index) GetProperty() []*Index_Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Index_Property struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Direction *Index_Property_Direction `protobuf:"varint,4,opt,name=direction,enum=appengine.Index_Property_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Index_Property) Reset() { *m = Index_Property{} }
+func (m *Index_Property) String() string { return proto.CompactTextString(m) }
+func (*Index_Property) ProtoMessage() {}
+
+const Default_Index_Property_Direction Index_Property_Direction = Index_Property_ASCENDING
+
+func (m *Index_Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Index_Property) GetDirection() Index_Property_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Index_Property_Direction
+}
+
+type CompositeIndex struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Id *int64 `protobuf:"varint,2,req,name=id" json:"id,omitempty"`
+ Definition *Index `protobuf:"bytes,3,req,name=definition" json:"definition,omitempty"`
+ State *CompositeIndex_State `protobuf:"varint,4,req,name=state,enum=appengine.CompositeIndex_State" json:"state,omitempty"`
+ OnlyUseIfRequired *bool `protobuf:"varint,6,opt,name=only_use_if_required,def=0" json:"only_use_if_required,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndex) Reset() { *m = CompositeIndex{} }
+func (m *CompositeIndex) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndex) ProtoMessage() {}
+
+const Default_CompositeIndex_OnlyUseIfRequired bool = false
+
+func (m *CompositeIndex) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *CompositeIndex) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *CompositeIndex) GetDefinition() *Index {
+ if m != nil {
+ return m.Definition
+ }
+ return nil
+}
+
+func (m *CompositeIndex) GetState() CompositeIndex_State {
+ if m != nil && m.State != nil {
+ return *m.State
+ }
+ return CompositeIndex_WRITE_ONLY
+}
+
+func (m *CompositeIndex) GetOnlyUseIfRequired() bool {
+ if m != nil && m.OnlyUseIfRequired != nil {
+ return *m.OnlyUseIfRequired
+ }
+ return Default_CompositeIndex_OnlyUseIfRequired
+}
+
+type IndexPostfix struct {
+ IndexValue []*IndexPostfix_IndexValue `protobuf:"bytes,1,rep,name=index_value" json:"index_value,omitempty"`
+ Key *Reference `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,3,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix) Reset() { *m = IndexPostfix{} }
+func (m *IndexPostfix) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix) ProtoMessage() {}
+
+const Default_IndexPostfix_Before bool = true
+
+func (m *IndexPostfix) GetIndexValue() []*IndexPostfix_IndexValue {
+ if m != nil {
+ return m.IndexValue
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *IndexPostfix) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPostfix_Before
+}
+
+type IndexPostfix_IndexValue struct {
+ PropertyName *string `protobuf:"bytes,1,req,name=property_name" json:"property_name,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPostfix_IndexValue) Reset() { *m = IndexPostfix_IndexValue{} }
+func (m *IndexPostfix_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*IndexPostfix_IndexValue) ProtoMessage() {}
+
+func (m *IndexPostfix_IndexValue) GetPropertyName() string {
+ if m != nil && m.PropertyName != nil {
+ return *m.PropertyName
+ }
+ return ""
+}
+
+func (m *IndexPostfix_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type IndexPosition struct {
+ Key *string `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ Before *bool `protobuf:"varint,2,opt,name=before,def=1" json:"before,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexPosition) Reset() { *m = IndexPosition{} }
+func (m *IndexPosition) String() string { return proto.CompactTextString(m) }
+func (*IndexPosition) ProtoMessage() {}
+
+const Default_IndexPosition_Before bool = true
+
+func (m *IndexPosition) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *IndexPosition) GetBefore() bool {
+ if m != nil && m.Before != nil {
+ return *m.Before
+ }
+ return Default_IndexPosition_Before
+}
+
+type Snapshot struct {
+ Ts *int64 `protobuf:"varint,1,req,name=ts" json:"ts,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Snapshot) Reset() { *m = Snapshot{} }
+func (m *Snapshot) String() string { return proto.CompactTextString(m) }
+func (*Snapshot) ProtoMessage() {}
+
+func (m *Snapshot) GetTs() int64 {
+ if m != nil && m.Ts != nil {
+ return *m.Ts
+ }
+ return 0
+}
+
+type InternalHeader struct {
+ Qos *string `protobuf:"bytes,1,opt,name=qos" json:"qos,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InternalHeader) Reset() { *m = InternalHeader{} }
+func (m *InternalHeader) String() string { return proto.CompactTextString(m) }
+func (*InternalHeader) ProtoMessage() {}
+
+func (m *InternalHeader) GetQos() string {
+ if m != nil && m.Qos != nil {
+ return *m.Qos
+ }
+ return ""
+}
+
+type Transaction struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ Handle *uint64 `protobuf:"fixed64,1,req,name=handle" json:"handle,omitempty"`
+ App *string `protobuf:"bytes,2,req,name=app" json:"app,omitempty"`
+ MarkChanges *bool `protobuf:"varint,3,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transaction) Reset() { *m = Transaction{} }
+func (m *Transaction) String() string { return proto.CompactTextString(m) }
+func (*Transaction) ProtoMessage() {}
+
+const Default_Transaction_MarkChanges bool = false
+
+func (m *Transaction) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Transaction) GetHandle() uint64 {
+ if m != nil && m.Handle != nil {
+ return *m.Handle
+ }
+ return 0
+}
+
+func (m *Transaction) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Transaction) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_Transaction_MarkChanges
+}
+
+type Query struct {
+ Header *InternalHeader `protobuf:"bytes,39,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ NameSpace *string `protobuf:"bytes,29,opt,name=name_space" json:"name_space,omitempty"`
+ Kind *string `protobuf:"bytes,3,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,17,opt,name=ancestor" json:"ancestor,omitempty"`
+ Filter []*Query_Filter `protobuf:"group,4,rep" json:"filter,omitempty"`
+ SearchQuery *string `protobuf:"bytes,8,opt,name=search_query" json:"search_query,omitempty"`
+ Order []*Query_Order `protobuf:"group,9,rep" json:"order,omitempty"`
+ Hint *Query_Hint `protobuf:"varint,18,opt,name=hint,enum=appengine.Query_Hint" json:"hint,omitempty"`
+ Count *int32 `protobuf:"varint,23,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,12,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,16,opt,name=limit" json:"limit,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,30,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ EndCompiledCursor *CompiledCursor `protobuf:"bytes,31,opt,name=end_compiled_cursor" json:"end_compiled_cursor,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,19,rep,name=composite_index" json:"composite_index,omitempty"`
+ RequirePerfectPlan *bool `protobuf:"varint,20,opt,name=require_perfect_plan,def=0" json:"require_perfect_plan,omitempty"`
+ KeysOnly *bool `protobuf:"varint,21,opt,name=keys_only,def=0" json:"keys_only,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,22,opt,name=transaction" json:"transaction,omitempty"`
+ Compile *bool `protobuf:"varint,25,opt,name=compile,def=0" json:"compile,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,26,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,32,opt,name=strong" json:"strong,omitempty"`
+ PropertyName []string `protobuf:"bytes,33,rep,name=property_name" json:"property_name,omitempty"`
+ GroupByPropertyName []string `protobuf:"bytes,34,rep,name=group_by_property_name" json:"group_by_property_name,omitempty"`
+ Distinct *bool `protobuf:"varint,24,opt,name=distinct" json:"distinct,omitempty"`
+ MinSafeTimeSeconds *int64 `protobuf:"varint,35,opt,name=min_safe_time_seconds" json:"min_safe_time_seconds,omitempty"`
+ SafeReplicaName []string `protobuf:"bytes,36,rep,name=safe_replica_name" json:"safe_replica_name,omitempty"`
+ PersistOffset *bool `protobuf:"varint,37,opt,name=persist_offset,def=0" json:"persist_offset,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+const Default_Query_RequirePerfectPlan bool = false
+const Default_Query_KeysOnly bool = false
+const Default_Query_Compile bool = false
+const Default_Query_PersistOffset bool = false
+
+func (m *Query) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *Query) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *Query) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *Query) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Query) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() []*Query_Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetSearchQuery() string {
+ if m != nil && m.SearchQuery != nil {
+ return *m.SearchQuery
+ }
+ return ""
+}
+
+func (m *Query) GetOrder() []*Query_Order {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetHint() Query_Hint {
+ if m != nil && m.Hint != nil {
+ return *m.Hint
+ }
+ return Query_ORDER_FIRST
+}
+
+func (m *Query) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *Query) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.EndCompiledCursor
+ }
+ return nil
+}
+
+func (m *Query) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *Query) GetRequirePerfectPlan() bool {
+ if m != nil && m.RequirePerfectPlan != nil {
+ return *m.RequirePerfectPlan
+ }
+ return Default_Query_RequirePerfectPlan
+}
+
+func (m *Query) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return Default_Query_KeysOnly
+}
+
+func (m *Query) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *Query) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_Query_Compile
+}
+
+func (m *Query) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *Query) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *Query) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetGroupByPropertyName() []string {
+ if m != nil {
+ return m.GroupByPropertyName
+ }
+ return nil
+}
+
+func (m *Query) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return false
+}
+
+func (m *Query) GetMinSafeTimeSeconds() int64 {
+ if m != nil && m.MinSafeTimeSeconds != nil {
+ return *m.MinSafeTimeSeconds
+ }
+ return 0
+}
+
+func (m *Query) GetSafeReplicaName() []string {
+ if m != nil {
+ return m.SafeReplicaName
+ }
+ return nil
+}
+
+func (m *Query) GetPersistOffset() bool {
+ if m != nil && m.PersistOffset != nil {
+ return *m.PersistOffset
+ }
+ return Default_Query_PersistOffset
+}
+
+type Query_Filter struct {
+ Op *Query_Filter_Operator `protobuf:"varint,6,req,name=op,enum=appengine.Query_Filter_Operator" json:"op,omitempty"`
+ Property []*Property `protobuf:"bytes,14,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Filter) Reset() { *m = Query_Filter{} }
+func (m *Query_Filter) String() string { return proto.CompactTextString(m) }
+func (*Query_Filter) ProtoMessage() {}
+
+func (m *Query_Filter) GetOp() Query_Filter_Operator {
+ if m != nil && m.Op != nil {
+ return *m.Op
+ }
+ return Query_Filter_LESS_THAN
+}
+
+func (m *Query_Filter) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+type Query_Order struct {
+ Property *string `protobuf:"bytes,10,req,name=property" json:"property,omitempty"`
+ Direction *Query_Order_Direction `protobuf:"varint,11,opt,name=direction,enum=appengine.Query_Order_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query_Order) Reset() { *m = Query_Order{} }
+func (m *Query_Order) String() string { return proto.CompactTextString(m) }
+func (*Query_Order) ProtoMessage() {}
+
+const Default_Query_Order_Direction Query_Order_Direction = Query_Order_ASCENDING
+
+func (m *Query_Order) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *Query_Order) GetDirection() Query_Order_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_Query_Order_Direction
+}
+
+type CompiledQuery struct {
+ Primaryscan *CompiledQuery_PrimaryScan `protobuf:"group,1,req,name=PrimaryScan" json:"primaryscan,omitempty"`
+ Mergejoinscan []*CompiledQuery_MergeJoinScan `protobuf:"group,7,rep,name=MergeJoinScan" json:"mergejoinscan,omitempty"`
+ IndexDef *Index `protobuf:"bytes,21,opt,name=index_def" json:"index_def,omitempty"`
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,req,name=keys_only" json:"keys_only,omitempty"`
+ PropertyName []string `protobuf:"bytes,24,rep,name=property_name" json:"property_name,omitempty"`
+ DistinctInfixSize *int32 `protobuf:"varint,25,opt,name=distinct_infix_size" json:"distinct_infix_size,omitempty"`
+ Entityfilter *CompiledQuery_EntityFilter `protobuf:"group,13,opt,name=EntityFilter" json:"entityfilter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery) Reset() { *m = CompiledQuery{} }
+func (m *CompiledQuery) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery) ProtoMessage() {}
+
+const Default_CompiledQuery_Offset int32 = 0
+
+func (m *CompiledQuery) GetPrimaryscan() *CompiledQuery_PrimaryScan {
+ if m != nil {
+ return m.Primaryscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetMergejoinscan() []*CompiledQuery_MergeJoinScan {
+ if m != nil {
+ return m.Mergejoinscan
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetIndexDef() *Index {
+ if m != nil {
+ return m.IndexDef
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_CompiledQuery_Offset
+}
+
+func (m *CompiledQuery) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *CompiledQuery) GetPropertyName() []string {
+ if m != nil {
+ return m.PropertyName
+ }
+ return nil
+}
+
+func (m *CompiledQuery) GetDistinctInfixSize() int32 {
+ if m != nil && m.DistinctInfixSize != nil {
+ return *m.DistinctInfixSize
+ }
+ return 0
+}
+
+func (m *CompiledQuery) GetEntityfilter() *CompiledQuery_EntityFilter {
+ if m != nil {
+ return m.Entityfilter
+ }
+ return nil
+}
+
+type CompiledQuery_PrimaryScan struct {
+ IndexName *string `protobuf:"bytes,2,opt,name=index_name" json:"index_name,omitempty"`
+ StartKey *string `protobuf:"bytes,3,opt,name=start_key" json:"start_key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,4,opt,name=start_inclusive" json:"start_inclusive,omitempty"`
+ EndKey *string `protobuf:"bytes,5,opt,name=end_key" json:"end_key,omitempty"`
+ EndInclusive *bool `protobuf:"varint,6,opt,name=end_inclusive" json:"end_inclusive,omitempty"`
+ StartPostfixValue []string `protobuf:"bytes,22,rep,name=start_postfix_value" json:"start_postfix_value,omitempty"`
+ EndPostfixValue []string `protobuf:"bytes,23,rep,name=end_postfix_value" json:"end_postfix_value,omitempty"`
+ EndUnappliedLogTimestampUs *int64 `protobuf:"varint,19,opt,name=end_unapplied_log_timestamp_us" json:"end_unapplied_log_timestamp_us,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_PrimaryScan) Reset() { *m = CompiledQuery_PrimaryScan{} }
+func (m *CompiledQuery_PrimaryScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_PrimaryScan) ProtoMessage() {}
+
+func (m *CompiledQuery_PrimaryScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndKey() string {
+ if m != nil && m.EndKey != nil {
+ return *m.EndKey
+ }
+ return ""
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndInclusive() bool {
+ if m != nil && m.EndInclusive != nil {
+ return *m.EndInclusive
+ }
+ return false
+}
+
+func (m *CompiledQuery_PrimaryScan) GetStartPostfixValue() []string {
+ if m != nil {
+ return m.StartPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndPostfixValue() []string {
+ if m != nil {
+ return m.EndPostfixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_PrimaryScan) GetEndUnappliedLogTimestampUs() int64 {
+ if m != nil && m.EndUnappliedLogTimestampUs != nil {
+ return *m.EndUnappliedLogTimestampUs
+ }
+ return 0
+}
+
+type CompiledQuery_MergeJoinScan struct {
+ IndexName *string `protobuf:"bytes,8,req,name=index_name" json:"index_name,omitempty"`
+ PrefixValue []string `protobuf:"bytes,9,rep,name=prefix_value" json:"prefix_value,omitempty"`
+ ValuePrefix *bool `protobuf:"varint,20,opt,name=value_prefix,def=0" json:"value_prefix,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_MergeJoinScan) Reset() { *m = CompiledQuery_MergeJoinScan{} }
+func (m *CompiledQuery_MergeJoinScan) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_MergeJoinScan) ProtoMessage() {}
+
+const Default_CompiledQuery_MergeJoinScan_ValuePrefix bool = false
+
+func (m *CompiledQuery_MergeJoinScan) GetIndexName() string {
+ if m != nil && m.IndexName != nil {
+ return *m.IndexName
+ }
+ return ""
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetPrefixValue() []string {
+ if m != nil {
+ return m.PrefixValue
+ }
+ return nil
+}
+
+func (m *CompiledQuery_MergeJoinScan) GetValuePrefix() bool {
+ if m != nil && m.ValuePrefix != nil {
+ return *m.ValuePrefix
+ }
+ return Default_CompiledQuery_MergeJoinScan_ValuePrefix
+}
+
+type CompiledQuery_EntityFilter struct {
+ Distinct *bool `protobuf:"varint,14,opt,name=distinct,def=0" json:"distinct,omitempty"`
+ Kind *string `protobuf:"bytes,17,opt,name=kind" json:"kind,omitempty"`
+ Ancestor *Reference `protobuf:"bytes,18,opt,name=ancestor" json:"ancestor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledQuery_EntityFilter) Reset() { *m = CompiledQuery_EntityFilter{} }
+func (m *CompiledQuery_EntityFilter) String() string { return proto.CompactTextString(m) }
+func (*CompiledQuery_EntityFilter) ProtoMessage() {}
+
+const Default_CompiledQuery_EntityFilter_Distinct bool = false
+
+func (m *CompiledQuery_EntityFilter) GetDistinct() bool {
+ if m != nil && m.Distinct != nil {
+ return *m.Distinct
+ }
+ return Default_CompiledQuery_EntityFilter_Distinct
+}
+
+func (m *CompiledQuery_EntityFilter) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *CompiledQuery_EntityFilter) GetAncestor() *Reference {
+ if m != nil {
+ return m.Ancestor
+ }
+ return nil
+}
+
+type CompiledCursor struct {
+ Position *CompiledCursor_Position `protobuf:"group,2,opt" json:"position,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor) Reset() { *m = CompiledCursor{} }
+func (m *CompiledCursor) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor) ProtoMessage() {}
+
+func (m *CompiledCursor) GetPosition() *CompiledCursor_Position {
+ if m != nil {
+ return m.Position
+ }
+ return nil
+}
+
+type CompiledCursor_Position struct {
+ StartKey *string `protobuf:"bytes,27,opt,name=start_key" json:"start_key,omitempty"`
+ Indexvalue []*CompiledCursor_Position_IndexValue `protobuf:"group,29,rep,name=IndexValue" json:"indexvalue,omitempty"`
+ Key *Reference `protobuf:"bytes,32,opt,name=key" json:"key,omitempty"`
+ StartInclusive *bool `protobuf:"varint,28,opt,name=start_inclusive,def=1" json:"start_inclusive,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position) Reset() { *m = CompiledCursor_Position{} }
+func (m *CompiledCursor_Position) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position) ProtoMessage() {}
+
+const Default_CompiledCursor_Position_StartInclusive bool = true
+
+func (m *CompiledCursor_Position) GetStartKey() string {
+ if m != nil && m.StartKey != nil {
+ return *m.StartKey
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position) GetIndexvalue() []*CompiledCursor_Position_IndexValue {
+ if m != nil {
+ return m.Indexvalue
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *CompiledCursor_Position) GetStartInclusive() bool {
+ if m != nil && m.StartInclusive != nil {
+ return *m.StartInclusive
+ }
+ return Default_CompiledCursor_Position_StartInclusive
+}
+
+type CompiledCursor_Position_IndexValue struct {
+ Property *string `protobuf:"bytes,30,opt,name=property" json:"property,omitempty"`
+ Value *PropertyValue `protobuf:"bytes,31,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompiledCursor_Position_IndexValue) Reset() { *m = CompiledCursor_Position_IndexValue{} }
+func (m *CompiledCursor_Position_IndexValue) String() string { return proto.CompactTextString(m) }
+func (*CompiledCursor_Position_IndexValue) ProtoMessage() {}
+
+func (m *CompiledCursor_Position_IndexValue) GetProperty() string {
+ if m != nil && m.Property != nil {
+ return *m.Property
+ }
+ return ""
+}
+
+func (m *CompiledCursor_Position_IndexValue) GetValue() *PropertyValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Cursor struct {
+ Cursor *uint64 `protobuf:"fixed64,1,req,name=cursor" json:"cursor,omitempty"`
+ App *string `protobuf:"bytes,2,opt,name=app" json:"app,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cursor) Reset() { *m = Cursor{} }
+func (m *Cursor) String() string { return proto.CompactTextString(m) }
+func (*Cursor) ProtoMessage() {}
+
+func (m *Cursor) GetCursor() uint64 {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return 0
+}
+
+func (m *Cursor) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+type Error struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Error) Reset() { *m = Error{} }
+func (m *Error) String() string { return proto.CompactTextString(m) }
+func (*Error) ProtoMessage() {}
+
+type Cost struct {
+ IndexWrites *int32 `protobuf:"varint,1,opt,name=index_writes" json:"index_writes,omitempty"`
+ IndexWriteBytes *int32 `protobuf:"varint,2,opt,name=index_write_bytes" json:"index_write_bytes,omitempty"`
+ EntityWrites *int32 `protobuf:"varint,3,opt,name=entity_writes" json:"entity_writes,omitempty"`
+ EntityWriteBytes *int32 `protobuf:"varint,4,opt,name=entity_write_bytes" json:"entity_write_bytes,omitempty"`
+ Commitcost *Cost_CommitCost `protobuf:"group,5,opt,name=CommitCost" json:"commitcost,omitempty"`
+ ApproximateStorageDelta *int32 `protobuf:"varint,8,opt,name=approximate_storage_delta" json:"approximate_storage_delta,omitempty"`
+ IdSequenceUpdates *int32 `protobuf:"varint,9,opt,name=id_sequence_updates" json:"id_sequence_updates,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost) Reset() { *m = Cost{} }
+func (m *Cost) String() string { return proto.CompactTextString(m) }
+func (*Cost) ProtoMessage() {}
+
+func (m *Cost) GetIndexWrites() int32 {
+ if m != nil && m.IndexWrites != nil {
+ return *m.IndexWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetIndexWriteBytes() int32 {
+ if m != nil && m.IndexWriteBytes != nil {
+ return *m.IndexWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWrites() int32 {
+ if m != nil && m.EntityWrites != nil {
+ return *m.EntityWrites
+ }
+ return 0
+}
+
+func (m *Cost) GetEntityWriteBytes() int32 {
+ if m != nil && m.EntityWriteBytes != nil {
+ return *m.EntityWriteBytes
+ }
+ return 0
+}
+
+func (m *Cost) GetCommitcost() *Cost_CommitCost {
+ if m != nil {
+ return m.Commitcost
+ }
+ return nil
+}
+
+func (m *Cost) GetApproximateStorageDelta() int32 {
+ if m != nil && m.ApproximateStorageDelta != nil {
+ return *m.ApproximateStorageDelta
+ }
+ return 0
+}
+
+func (m *Cost) GetIdSequenceUpdates() int32 {
+ if m != nil && m.IdSequenceUpdates != nil {
+ return *m.IdSequenceUpdates
+ }
+ return 0
+}
+
+type Cost_CommitCost struct {
+ RequestedEntityPuts *int32 `protobuf:"varint,6,opt,name=requested_entity_puts" json:"requested_entity_puts,omitempty"`
+ RequestedEntityDeletes *int32 `protobuf:"varint,7,opt,name=requested_entity_deletes" json:"requested_entity_deletes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Cost_CommitCost) Reset() { *m = Cost_CommitCost{} }
+func (m *Cost_CommitCost) String() string { return proto.CompactTextString(m) }
+func (*Cost_CommitCost) ProtoMessage() {}
+
+func (m *Cost_CommitCost) GetRequestedEntityPuts() int32 {
+ if m != nil && m.RequestedEntityPuts != nil {
+ return *m.RequestedEntityPuts
+ }
+ return 0
+}
+
+func (m *Cost_CommitCost) GetRequestedEntityDeletes() int32 {
+ if m != nil && m.RequestedEntityDeletes != nil {
+ return *m.RequestedEntityDeletes
+ }
+ return 0
+}
+
+type GetRequest struct {
+ Header *InternalHeader `protobuf:"bytes,6,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ FailoverMs *int64 `protobuf:"varint,3,opt,name=failover_ms" json:"failover_ms,omitempty"`
+ Strong *bool `protobuf:"varint,4,opt,name=strong" json:"strong,omitempty"`
+ AllowDeferred *bool `protobuf:"varint,5,opt,name=allow_deferred,def=0" json:"allow_deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetRequest) Reset() { *m = GetRequest{} }
+func (m *GetRequest) String() string { return proto.CompactTextString(m) }
+func (*GetRequest) ProtoMessage() {}
+
+const Default_GetRequest_AllowDeferred bool = false
+
+func (m *GetRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *GetRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *GetRequest) GetFailoverMs() int64 {
+ if m != nil && m.FailoverMs != nil {
+ return *m.FailoverMs
+ }
+ return 0
+}
+
+func (m *GetRequest) GetStrong() bool {
+ if m != nil && m.Strong != nil {
+ return *m.Strong
+ }
+ return false
+}
+
+func (m *GetRequest) GetAllowDeferred() bool {
+ if m != nil && m.AllowDeferred != nil {
+ return *m.AllowDeferred
+ }
+ return Default_GetRequest_AllowDeferred
+}
+
+type GetResponse struct {
+ Entity []*GetResponse_Entity `protobuf:"group,1,rep" json:"entity,omitempty"`
+ Deferred []*Reference `protobuf:"bytes,5,rep,name=deferred" json:"deferred,omitempty"`
+ InOrder *bool `protobuf:"varint,6,opt,name=in_order,def=1" json:"in_order,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse) Reset() { *m = GetResponse{} }
+func (m *GetResponse) String() string { return proto.CompactTextString(m) }
+func (*GetResponse) ProtoMessage() {}
+
+const Default_GetResponse_InOrder bool = true
+
+func (m *GetResponse) GetEntity() []*GetResponse_Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse) GetDeferred() []*Reference {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+func (m *GetResponse) GetInOrder() bool {
+ if m != nil && m.InOrder != nil {
+ return *m.InOrder
+ }
+ return Default_GetResponse_InOrder
+}
+
+type GetResponse_Entity struct {
+ Entity *EntityProto `protobuf:"bytes,2,opt,name=entity" json:"entity,omitempty"`
+ Key *Reference `protobuf:"bytes,4,opt,name=key" json:"key,omitempty"`
+ Version *int64 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetResponse_Entity) Reset() { *m = GetResponse_Entity{} }
+func (m *GetResponse_Entity) String() string { return proto.CompactTextString(m) }
+func (*GetResponse_Entity) ProtoMessage() {}
+
+func (m *GetResponse_Entity) GetEntity() *EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetKey() *Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *GetResponse_Entity) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+type PutRequest struct {
+ Header *InternalHeader `protobuf:"bytes,11,opt,name=header" json:"header,omitempty"`
+ Entity []*EntityProto `protobuf:"bytes,1,rep,name=entity" json:"entity,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,3,rep,name=composite_index" json:"composite_index,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ AutoIdPolicy *PutRequest_AutoIdPolicy `protobuf:"varint,10,opt,name=auto_id_policy,enum=appengine.PutRequest_AutoIdPolicy,def=0" json:"auto_id_policy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutRequest) Reset() { *m = PutRequest{} }
+func (m *PutRequest) String() string { return proto.CompactTextString(m) }
+func (*PutRequest) ProtoMessage() {}
+
+const Default_PutRequest_Trusted bool = false
+const Default_PutRequest_Force bool = false
+const Default_PutRequest_MarkChanges bool = false
+const Default_PutRequest_AutoIdPolicy PutRequest_AutoIdPolicy = PutRequest_CURRENT
+
+func (m *PutRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *PutRequest) GetEntity() []*EntityProto {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *PutRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *PutRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_PutRequest_Trusted
+}
+
+func (m *PutRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_PutRequest_Force
+}
+
+func (m *PutRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_PutRequest_MarkChanges
+}
+
+func (m *PutRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+func (m *PutRequest) GetAutoIdPolicy() PutRequest_AutoIdPolicy {
+ if m != nil && m.AutoIdPolicy != nil {
+ return *m.AutoIdPolicy
+ }
+ return Default_PutRequest_AutoIdPolicy
+}
+
+type PutResponse struct {
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ Cost *Cost `protobuf:"bytes,2,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PutResponse) Reset() { *m = PutResponse{} }
+func (m *PutResponse) String() string { return proto.CompactTextString(m) }
+func (*PutResponse) ProtoMessage() {}
+
+func (m *PutResponse) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *PutResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *PutResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type TouchRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ CompositeIndex []*CompositeIndex `protobuf:"bytes,2,rep,name=composite_index" json:"composite_index,omitempty"`
+ Force *bool `protobuf:"varint,3,opt,name=force,def=0" json:"force,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchRequest) Reset() { *m = TouchRequest{} }
+func (m *TouchRequest) String() string { return proto.CompactTextString(m) }
+func (*TouchRequest) ProtoMessage() {}
+
+const Default_TouchRequest_Force bool = false
+
+func (m *TouchRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetCompositeIndex() []*CompositeIndex {
+ if m != nil {
+ return m.CompositeIndex
+ }
+ return nil
+}
+
+func (m *TouchRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_TouchRequest_Force
+}
+
+func (m *TouchRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type TouchResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TouchResponse) Reset() { *m = TouchResponse{} }
+func (m *TouchResponse) String() string { return proto.CompactTextString(m) }
+func (*TouchResponse) ProtoMessage() {}
+
+func (m *TouchResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type DeleteRequest struct {
+ Header *InternalHeader `protobuf:"bytes,10,opt,name=header" json:"header,omitempty"`
+ Key []*Reference `protobuf:"bytes,6,rep,name=key" json:"key,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,5,opt,name=transaction" json:"transaction,omitempty"`
+ Trusted *bool `protobuf:"varint,4,opt,name=trusted,def=0" json:"trusted,omitempty"`
+ Force *bool `protobuf:"varint,7,opt,name=force,def=0" json:"force,omitempty"`
+ MarkChanges *bool `protobuf:"varint,8,opt,name=mark_changes,def=0" json:"mark_changes,omitempty"`
+ Snapshot []*Snapshot `protobuf:"bytes,9,rep,name=snapshot" json:"snapshot,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteRequest) Reset() { *m = DeleteRequest{} }
+func (m *DeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteRequest) ProtoMessage() {}
+
+const Default_DeleteRequest_Trusted bool = false
+const Default_DeleteRequest_Force bool = false
+const Default_DeleteRequest_MarkChanges bool = false
+
+func (m *DeleteRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetKey() []*Reference {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *DeleteRequest) GetTrusted() bool {
+ if m != nil && m.Trusted != nil {
+ return *m.Trusted
+ }
+ return Default_DeleteRequest_Trusted
+}
+
+func (m *DeleteRequest) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return Default_DeleteRequest_Force
+}
+
+func (m *DeleteRequest) GetMarkChanges() bool {
+ if m != nil && m.MarkChanges != nil {
+ return *m.MarkChanges
+ }
+ return Default_DeleteRequest_MarkChanges
+}
+
+func (m *DeleteRequest) GetSnapshot() []*Snapshot {
+ if m != nil {
+ return m.Snapshot
+ }
+ return nil
+}
+
+type DeleteResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []int64 `protobuf:"varint,3,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteResponse) Reset() { *m = DeleteResponse{} }
+func (m *DeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteResponse) ProtoMessage() {}
+
+func (m *DeleteResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *DeleteResponse) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type NextRequest struct {
+ Header *InternalHeader `protobuf:"bytes,5,opt,name=header" json:"header,omitempty"`
+ Cursor *Cursor `protobuf:"bytes,1,req,name=cursor" json:"cursor,omitempty"`
+ Count *int32 `protobuf:"varint,2,opt,name=count" json:"count,omitempty"`
+ Offset *int32 `protobuf:"varint,4,opt,name=offset,def=0" json:"offset,omitempty"`
+ Compile *bool `protobuf:"varint,3,opt,name=compile,def=0" json:"compile,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *NextRequest) Reset() { *m = NextRequest{} }
+func (m *NextRequest) String() string { return proto.CompactTextString(m) }
+func (*NextRequest) ProtoMessage() {}
+
+const Default_NextRequest_Offset int32 = 0
+const Default_NextRequest_Compile bool = false
+
+func (m *NextRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *NextRequest) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *NextRequest) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_NextRequest_Offset
+}
+
+func (m *NextRequest) GetCompile() bool {
+ if m != nil && m.Compile != nil {
+ return *m.Compile
+ }
+ return Default_NextRequest_Compile
+}
+
+type QueryResult struct {
+ Cursor *Cursor `protobuf:"bytes,1,opt,name=cursor" json:"cursor,omitempty"`
+ Result []*EntityProto `protobuf:"bytes,2,rep,name=result" json:"result,omitempty"`
+ SkippedResults *int32 `protobuf:"varint,7,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ MoreResults *bool `protobuf:"varint,3,req,name=more_results" json:"more_results,omitempty"`
+ KeysOnly *bool `protobuf:"varint,4,opt,name=keys_only" json:"keys_only,omitempty"`
+ IndexOnly *bool `protobuf:"varint,9,opt,name=index_only" json:"index_only,omitempty"`
+ SmallOps *bool `protobuf:"varint,10,opt,name=small_ops" json:"small_ops,omitempty"`
+ CompiledQuery *CompiledQuery `protobuf:"bytes,5,opt,name=compiled_query" json:"compiled_query,omitempty"`
+ CompiledCursor *CompiledCursor `protobuf:"bytes,6,opt,name=compiled_cursor" json:"compiled_cursor,omitempty"`
+ Index []*CompositeIndex `protobuf:"bytes,8,rep,name=index" json:"index,omitempty"`
+ Version []int64 `protobuf:"varint,11,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResult) Reset() { *m = QueryResult{} }
+func (m *QueryResult) String() string { return proto.CompactTextString(m) }
+func (*QueryResult) ProtoMessage() {}
+
+func (m *QueryResult) GetCursor() *Cursor {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetResult() []*EntityProto {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *QueryResult) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+func (m *QueryResult) GetMoreResults() bool {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return false
+}
+
+func (m *QueryResult) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetIndexOnly() bool {
+ if m != nil && m.IndexOnly != nil {
+ return *m.IndexOnly
+ }
+ return false
+}
+
+func (m *QueryResult) GetSmallOps() bool {
+ if m != nil && m.SmallOps != nil {
+ return *m.SmallOps
+ }
+ return false
+}
+
+func (m *QueryResult) GetCompiledQuery() *CompiledQuery {
+ if m != nil {
+ return m.CompiledQuery
+ }
+ return nil
+}
+
+func (m *QueryResult) GetCompiledCursor() *CompiledCursor {
+ if m != nil {
+ return m.CompiledCursor
+ }
+ return nil
+}
+
+func (m *QueryResult) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+func (m *QueryResult) GetVersion() []int64 {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type AllocateIdsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,4,opt,name=header" json:"header,omitempty"`
+ ModelKey *Reference `protobuf:"bytes,1,opt,name=model_key" json:"model_key,omitempty"`
+ Size *int64 `protobuf:"varint,2,opt,name=size" json:"size,omitempty"`
+ Max *int64 `protobuf:"varint,3,opt,name=max" json:"max,omitempty"`
+ Reserve []*Reference `protobuf:"bytes,5,rep,name=reserve" json:"reserve,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetModelKey() *Reference {
+ if m != nil {
+ return m.ModelKey
+ }
+ return nil
+}
+
+func (m *AllocateIdsRequest) GetSize() int64 {
+ if m != nil && m.Size != nil {
+ return *m.Size
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetMax() int64 {
+ if m != nil && m.Max != nil {
+ return *m.Max
+ }
+ return 0
+}
+
+func (m *AllocateIdsRequest) GetReserve() []*Reference {
+ if m != nil {
+ return m.Reserve
+ }
+ return nil
+}
+
+type AllocateIdsResponse struct {
+ Start *int64 `protobuf:"varint,1,req,name=start" json:"start,omitempty"`
+ End *int64 `protobuf:"varint,2,req,name=end" json:"end,omitempty"`
+ Cost *Cost `protobuf:"bytes,3,opt,name=cost" json:"cost,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetStart() int64 {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetEnd() int64 {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return 0
+}
+
+func (m *AllocateIdsResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+type CompositeIndices struct {
+ Index []*CompositeIndex `protobuf:"bytes,1,rep,name=index" json:"index,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeIndices) Reset() { *m = CompositeIndices{} }
+func (m *CompositeIndices) String() string { return proto.CompactTextString(m) }
+func (*CompositeIndices) ProtoMessage() {}
+
+func (m *CompositeIndices) GetIndex() []*CompositeIndex {
+ if m != nil {
+ return m.Index
+ }
+ return nil
+}
+
+type AddActionsRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ Transaction *Transaction `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ Action []*Action `protobuf:"bytes,2,rep,name=action" json:"action,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsRequest) Reset() { *m = AddActionsRequest{} }
+func (m *AddActionsRequest) String() string { return proto.CompactTextString(m) }
+func (*AddActionsRequest) ProtoMessage() {}
+
+func (m *AddActionsRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetTransaction() *Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *AddActionsRequest) GetAction() []*Action {
+ if m != nil {
+ return m.Action
+ }
+ return nil
+}
+
+type AddActionsResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AddActionsResponse) Reset() { *m = AddActionsResponse{} }
+func (m *AddActionsResponse) String() string { return proto.CompactTextString(m) }
+func (*AddActionsResponse) ProtoMessage() {}
+
+type BeginTransactionRequest struct {
+ Header *InternalHeader `protobuf:"bytes,3,opt,name=header" json:"header,omitempty"`
+ App *string `protobuf:"bytes,1,req,name=app" json:"app,omitempty"`
+ AllowMultipleEg *bool `protobuf:"varint,2,opt,name=allow_multiple_eg,def=0" json:"allow_multiple_eg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_AllowMultipleEg bool = false
+
+func (m *BeginTransactionRequest) GetHeader() *InternalHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *BeginTransactionRequest) GetApp() string {
+ if m != nil && m.App != nil {
+ return *m.App
+ }
+ return ""
+}
+
+func (m *BeginTransactionRequest) GetAllowMultipleEg() bool {
+ if m != nil && m.AllowMultipleEg != nil {
+ return *m.AllowMultipleEg
+ }
+ return Default_BeginTransactionRequest_AllowMultipleEg
+}
+
+type CommitResponse struct {
+ Cost *Cost `protobuf:"bytes,1,opt,name=cost" json:"cost,omitempty"`
+ Version []*CommitResponse_Version `protobuf:"group,3,rep" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetCost() *Cost {
+ if m != nil {
+ return m.Cost
+ }
+ return nil
+}
+
+func (m *CommitResponse) GetVersion() []*CommitResponse_Version {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type CommitResponse_Version struct {
+ RootEntityKey *Reference `protobuf:"bytes,4,req,name=root_entity_key" json:"root_entity_key,omitempty"`
+ Version *int64 `protobuf:"varint,5,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse_Version) Reset() { *m = CommitResponse_Version{} }
+func (m *CommitResponse_Version) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse_Version) ProtoMessage() {}
+
+func (m *CommitResponse_Version) GetRootEntityKey() *Reference {
+ if m != nil {
+ return m.RootEntityKey
+ }
+ return nil
+}
+
+func (m *CommitResponse_Version) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("appengine.Property_Meaning", Property_Meaning_name, Property_Meaning_value)
+ proto.RegisterEnum("appengine.Property_FtsTokenizationOption", Property_FtsTokenizationOption_name, Property_FtsTokenizationOption_value)
+ proto.RegisterEnum("appengine.EntityProto_Kind", EntityProto_Kind_name, EntityProto_Kind_value)
+ proto.RegisterEnum("appengine.Index_Property_Direction", Index_Property_Direction_name, Index_Property_Direction_value)
+ proto.RegisterEnum("appengine.CompositeIndex_State", CompositeIndex_State_name, CompositeIndex_State_value)
+ proto.RegisterEnum("appengine.Snapshot_Status", Snapshot_Status_name, Snapshot_Status_value)
+ proto.RegisterEnum("appengine.Query_Hint", Query_Hint_name, Query_Hint_value)
+ proto.RegisterEnum("appengine.Query_Filter_Operator", Query_Filter_Operator_name, Query_Filter_Operator_value)
+ proto.RegisterEnum("appengine.Query_Order_Direction", Query_Order_Direction_name, Query_Order_Direction_value)
+ proto.RegisterEnum("appengine.Error_ErrorCode", Error_ErrorCode_name, Error_ErrorCode_value)
+ proto.RegisterEnum("appengine.PutRequest_AutoIdPolicy", PutRequest_AutoIdPolicy_name, PutRequest_AutoIdPolicy_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.proto
new file mode 100644
index 000000000000..e76f126ff7c6
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/datastore/datastore_v3.proto
@@ -0,0 +1,541 @@
+syntax = "proto2";
+option go_package = "datastore";
+
+package appengine;
+
+message Action{}
+
+message PropertyValue {
+ optional int64 int64Value = 1;
+ optional bool booleanValue = 2;
+ optional string stringValue = 3;
+ optional double doubleValue = 4;
+
+ optional group PointValue = 5 {
+ required double x = 6;
+ required double y = 7;
+ }
+
+ optional group UserValue = 8 {
+ required string email = 9;
+ required string auth_domain = 10;
+ optional string nickname = 11;
+ optional string federated_identity = 21;
+ optional string federated_provider = 22;
+ }
+
+ optional group ReferenceValue = 12 {
+ required string app = 13;
+ optional string name_space = 20;
+ repeated group PathElement = 14 {
+ required string type = 15;
+ optional int64 id = 16;
+ optional string name = 17;
+ }
+ }
+}
+
+message Property {
+ enum Meaning {
+ NO_MEANING = 0;
+ BLOB = 14;
+ TEXT = 15;
+ BYTESTRING = 16;
+
+ ATOM_CATEGORY = 1;
+ ATOM_LINK = 2;
+ ATOM_TITLE = 3;
+ ATOM_CONTENT = 4;
+ ATOM_SUMMARY = 5;
+ ATOM_AUTHOR = 6;
+
+ GD_WHEN = 7;
+ GD_EMAIL = 8;
+ GEORSS_POINT = 9;
+ GD_IM = 10;
+
+ GD_PHONENUMBER = 11;
+ GD_POSTALADDRESS = 12;
+
+ GD_RATING = 13;
+
+ BLOBKEY = 17;
+ ENTITY_PROTO = 19;
+
+ INDEX_VALUE = 18;
+ };
+
+ optional Meaning meaning = 1 [default = NO_MEANING];
+ optional string meaning_uri = 2;
+
+ required string name = 3;
+
+ required PropertyValue value = 5;
+
+ required bool multiple = 4;
+
+ optional bool searchable = 6 [default=false];
+
+ enum FtsTokenizationOption {
+ HTML = 1;
+ ATOM = 2;
+ }
+
+ optional FtsTokenizationOption fts_tokenization_option = 8;
+
+ optional string locale = 9 [default = "en"];
+}
+
+message Path {
+ repeated group Element = 1 {
+ required string type = 2;
+ optional int64 id = 3;
+ optional string name = 4;
+ }
+}
+
+message Reference {
+ required string app = 13;
+ optional string name_space = 20;
+ required Path path = 14;
+}
+
+message User {
+ required string email = 1;
+ required string auth_domain = 2;
+ optional string nickname = 3;
+ optional string federated_identity = 6;
+ optional string federated_provider = 7;
+}
+
+message EntityProto {
+ required Reference key = 13;
+ required Path entity_group = 16;
+ optional User owner = 17;
+
+ enum Kind {
+ GD_CONTACT = 1;
+ GD_EVENT = 2;
+ GD_MESSAGE = 3;
+ }
+ optional Kind kind = 4;
+ optional string kind_uri = 5;
+
+ repeated Property property = 14;
+ repeated Property raw_property = 15;
+
+ optional int32 rank = 18;
+}
+
+message CompositeProperty {
+ required int64 index_id = 1;
+ repeated string value = 2;
+}
+
+message Index {
+ required string entity_type = 1;
+ required bool ancestor = 5;
+ repeated group Property = 2 {
+ required string name = 3;
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ optional Direction direction = 4 [default = ASCENDING];
+ }
+}
+
+message CompositeIndex {
+ required string app_id = 1;
+ required int64 id = 2;
+ required Index definition = 3;
+
+ enum State {
+ WRITE_ONLY = 1;
+ READ_WRITE = 2;
+ DELETED = 3;
+ ERROR = 4;
+ }
+ required State state = 4;
+
+ optional bool only_use_if_required = 6 [default = false];
+}
+
+message IndexPostfix {
+ message IndexValue {
+ required string property_name = 1;
+ required PropertyValue value = 2;
+ }
+
+ repeated IndexValue index_value = 1;
+
+ optional Reference key = 2;
+
+ optional bool before = 3 [default=true];
+}
+
+message IndexPosition {
+ optional string key = 1;
+
+ optional bool before = 2 [default=true];
+}
+
+message Snapshot {
+ enum Status {
+ INACTIVE = 0;
+ ACTIVE = 1;
+ }
+
+ required int64 ts = 1;
+}
+
+message InternalHeader {
+ optional string qos = 1;
+}
+
+message Transaction {
+ optional InternalHeader header = 4;
+ required fixed64 handle = 1;
+ required string app = 2;
+ optional bool mark_changes = 3 [default = false];
+}
+
+message Query {
+ optional InternalHeader header = 39;
+
+ required string app = 1;
+ optional string name_space = 29;
+
+ optional string kind = 3;
+ optional Reference ancestor = 17;
+
+ repeated group Filter = 4 {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+ IN = 6;
+ EXISTS = 7;
+ }
+
+ required Operator op = 6;
+ repeated Property property = 14;
+ }
+
+ optional string search_query = 8;
+
+ repeated group Order = 9 {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+
+ required string property = 10;
+ optional Direction direction = 11 [default = ASCENDING];
+ }
+
+ enum Hint {
+ ORDER_FIRST = 1;
+ ANCESTOR_FIRST = 2;
+ FILTER_FIRST = 3;
+ }
+ optional Hint hint = 18;
+
+ optional int32 count = 23;
+
+ optional int32 offset = 12 [default = 0];
+
+ optional int32 limit = 16;
+
+ optional CompiledCursor compiled_cursor = 30;
+ optional CompiledCursor end_compiled_cursor = 31;
+
+ repeated CompositeIndex composite_index = 19;
+
+ optional bool require_perfect_plan = 20 [default = false];
+
+ optional bool keys_only = 21 [default = false];
+
+ optional Transaction transaction = 22;
+
+ optional bool compile = 25 [default = false];
+
+ optional int64 failover_ms = 26;
+
+ optional bool strong = 32;
+
+ repeated string property_name = 33;
+
+ repeated string group_by_property_name = 34;
+
+ optional bool distinct = 24;
+
+ optional int64 min_safe_time_seconds = 35;
+
+ repeated string safe_replica_name = 36;
+
+ optional bool persist_offset = 37 [default=false];
+}
+
+message CompiledQuery {
+ required group PrimaryScan = 1 {
+ optional string index_name = 2;
+
+ optional string start_key = 3;
+ optional bool start_inclusive = 4;
+ optional string end_key = 5;
+ optional bool end_inclusive = 6;
+
+ repeated string start_postfix_value = 22;
+ repeated string end_postfix_value = 23;
+
+ optional int64 end_unapplied_log_timestamp_us = 19;
+ }
+
+ repeated group MergeJoinScan = 7 {
+ required string index_name = 8;
+
+ repeated string prefix_value = 9;
+
+ optional bool value_prefix = 20 [default=false];
+ }
+
+ optional Index index_def = 21;
+
+ optional int32 offset = 10 [default = 0];
+
+ optional int32 limit = 11;
+
+ required bool keys_only = 12;
+
+ repeated string property_name = 24;
+
+ optional int32 distinct_infix_size = 25;
+
+ optional group EntityFilter = 13 {
+ optional bool distinct = 14 [default=false];
+
+ optional string kind = 17;
+ optional Reference ancestor = 18;
+ }
+}
+
+message CompiledCursor {
+ optional group Position = 2 {
+ optional string start_key = 27;
+
+ repeated group IndexValue = 29 {
+ optional string property = 30;
+ required PropertyValue value = 31;
+ }
+
+ optional Reference key = 32;
+
+ optional bool start_inclusive = 28 [default=true];
+ }
+}
+
+message Cursor {
+ required fixed64 cursor = 1;
+
+ optional string app = 2;
+}
+
+message Error {
+ enum ErrorCode {
+ BAD_REQUEST = 1;
+ CONCURRENT_TRANSACTION = 2;
+ INTERNAL_ERROR = 3;
+ NEED_INDEX = 4;
+ TIMEOUT = 5;
+ PERMISSION_DENIED = 6;
+ BIGTABLE_ERROR = 7;
+ COMMITTED_BUT_STILL_APPLYING = 8;
+ CAPABILITY_DISABLED = 9;
+ TRY_ALTERNATE_BACKEND = 10;
+ SAFE_TIME_TOO_OLD = 11;
+ }
+}
+
+message Cost {
+ optional int32 index_writes = 1;
+ optional int32 index_write_bytes = 2;
+ optional int32 entity_writes = 3;
+ optional int32 entity_write_bytes = 4;
+ optional group CommitCost = 5 {
+ optional int32 requested_entity_puts = 6;
+ optional int32 requested_entity_deletes = 7;
+ };
+ optional int32 approximate_storage_delta = 8;
+ optional int32 id_sequence_updates = 9;
+}
+
+message GetRequest {
+ optional InternalHeader header = 6;
+
+ repeated Reference key = 1;
+ optional Transaction transaction = 2;
+
+ optional int64 failover_ms = 3;
+
+ optional bool strong = 4;
+
+ optional bool allow_deferred = 5 [default=false];
+}
+
+message GetResponse {
+ repeated group Entity = 1 {
+ optional EntityProto entity = 2;
+ optional Reference key = 4;
+
+ optional int64 version = 3;
+ }
+
+ repeated Reference deferred = 5;
+
+ optional bool in_order = 6 [default=true];
+}
+
+message PutRequest {
+ optional InternalHeader header = 11;
+
+ repeated EntityProto entity = 1;
+ optional Transaction transaction = 2;
+ repeated CompositeIndex composite_index = 3;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+
+ enum AutoIdPolicy {
+ CURRENT = 0;
+ SEQUENTIAL = 1;
+ }
+ optional AutoIdPolicy auto_id_policy = 10 [default = CURRENT];
+}
+
+message PutResponse {
+ repeated Reference key = 1;
+ optional Cost cost = 2;
+ repeated int64 version = 3;
+}
+
+message TouchRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 1;
+ repeated CompositeIndex composite_index = 2;
+ optional bool force = 3 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message TouchResponse {
+ optional Cost cost = 1;
+}
+
+message DeleteRequest {
+ optional InternalHeader header = 10;
+
+ repeated Reference key = 6;
+ optional Transaction transaction = 5;
+
+ optional bool trusted = 4 [default = false];
+
+ optional bool force = 7 [default = false];
+
+ optional bool mark_changes = 8 [default = false];
+ repeated Snapshot snapshot = 9;
+}
+
+message DeleteResponse {
+ optional Cost cost = 1;
+ repeated int64 version = 3;
+}
+
+message NextRequest {
+ optional InternalHeader header = 5;
+
+ required Cursor cursor = 1;
+ optional int32 count = 2;
+
+ optional int32 offset = 4 [default = 0];
+
+ optional bool compile = 3 [default = false];
+}
+
+message QueryResult {
+ optional Cursor cursor = 1;
+
+ repeated EntityProto result = 2;
+
+ optional int32 skipped_results = 7;
+
+ required bool more_results = 3;
+
+ optional bool keys_only = 4;
+
+ optional bool index_only = 9;
+
+ optional bool small_ops = 10;
+
+ optional CompiledQuery compiled_query = 5;
+
+ optional CompiledCursor compiled_cursor = 6;
+
+ repeated CompositeIndex index = 8;
+
+ repeated int64 version = 11;
+}
+
+message AllocateIdsRequest {
+ optional InternalHeader header = 4;
+
+ optional Reference model_key = 1;
+
+ optional int64 size = 2;
+
+ optional int64 max = 3;
+
+ repeated Reference reserve = 5;
+}
+
+message AllocateIdsResponse {
+ required int64 start = 1;
+ required int64 end = 2;
+ optional Cost cost = 3;
+}
+
+message CompositeIndices {
+ repeated CompositeIndex index = 1;
+}
+
+message AddActionsRequest {
+ optional InternalHeader header = 3;
+
+ required Transaction transaction = 1;
+ repeated Action action = 2;
+}
+
+message AddActionsResponse {
+}
+
+message BeginTransactionRequest {
+ optional InternalHeader header = 3;
+
+ required string app = 1;
+ optional bool allow_multiple_eg = 2 [default = false];
+}
+
+message CommitResponse {
+ optional Cost cost = 1;
+
+ repeated group Version = 3 {
+ required Reference root_entity_key = 4;
+ required int64 version = 5;
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/identity.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/identity.go
new file mode 100644
index 000000000000..e26347c88234
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/identity.go
@@ -0,0 +1,12 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+func AppID(fqai string) string {
+ return appID(fqai)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/identity_vm.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/identity_vm.go
new file mode 100644
index 000000000000..2a649010e4f3
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/identity_vm.go
@@ -0,0 +1,85 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "net/http"
+ "os"
+)
+
+// These functions are implementations of the wrapper functions
+// in ../appengine/identity.go. See that file for commentary.
+
+const (
+ hDefaultVersionHostname = "X-AppEngine-Default-Version-Hostname"
+ hRequestLogId = "X-AppEngine-Request-Log-Id"
+ hDatacenter = "X-AppEngine-Datacenter"
+)
+
+func DefaultVersionHostname(req interface{}) string {
+ return req.(*http.Request).Header.Get(hDefaultVersionHostname)
+}
+
+func RequestID(req interface{}) string {
+ return req.(*http.Request).Header.Get(hRequestLogId)
+}
+
+func Datacenter(req interface{}) string {
+ return req.(*http.Request).Header.Get(hDatacenter)
+}
+
+func ServerSoftware() string {
+ // TODO(dsymonds): Remove fallback when we've verified this.
+ if s := os.Getenv("SERVER_SOFTWARE"); s != "" {
+ return s
+ }
+ return "Google App Engine/1.x.x"
+}
+
+// TODO(dsymonds): Remove the metadata fetches.
+
+func ModuleName() string {
+ if s := os.Getenv("GAE_MODULE_NAME"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_name"))
+}
+
+func VersionID() string {
+ if s := os.Getenv("GAE_MODULE_VERSION"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_version"))
+}
+
+func InstanceID() string {
+ if s := os.Getenv("GAE_MODULE_INSTANCE"); s != "" {
+ return s
+ }
+ return string(mustGetMetadata("instance/attributes/gae_backend_instance"))
+}
+
+func partitionlessAppID() string {
+ // gae_project has everything except the partition prefix.
+ appID := os.Getenv("GAE_LONG_APP_ID")
+ if appID == "" {
+ appID = string(mustGetMetadata("instance/attributes/gae_project"))
+ }
+ return appID
+}
+
+func fullyQualifiedAppID() string {
+ appID := partitionlessAppID()
+
+ part := os.Getenv("GAE_PARTITION")
+ if part == "" {
+ part = string(mustGetMetadata("instance/attributes/gae_partition"))
+ }
+
+ if part != "" {
+ appID = part + "~" + appID
+ }
+ return appID
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.pb.go
new file mode 100644
index 000000000000..b66097e4612b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.pb.go
@@ -0,0 +1,848 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/image/images_service.proto
+// DO NOT EDIT!
+
+/*
+Package image is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/image/images_service.proto
+
+It has these top-level messages:
+ ImagesServiceError
+ ImagesServiceTransform
+ Transform
+ ImageData
+ InputSettings
+ OutputSettings
+ ImagesTransformRequest
+ ImagesTransformResponse
+ CompositeImageOptions
+ ImagesCanvas
+ ImagesCompositeRequest
+ ImagesCompositeResponse
+ ImagesHistogramRequest
+ ImagesHistogram
+ ImagesHistogramResponse
+ ImagesGetUrlBaseRequest
+ ImagesGetUrlBaseResponse
+ ImagesDeleteUrlBaseRequest
+ ImagesDeleteUrlBaseResponse
+*/
+package image
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ImagesServiceError_ErrorCode int32
+
+const (
+ ImagesServiceError_UNSPECIFIED_ERROR ImagesServiceError_ErrorCode = 1
+ ImagesServiceError_BAD_TRANSFORM_DATA ImagesServiceError_ErrorCode = 2
+ ImagesServiceError_NOT_IMAGE ImagesServiceError_ErrorCode = 3
+ ImagesServiceError_BAD_IMAGE_DATA ImagesServiceError_ErrorCode = 4
+ ImagesServiceError_IMAGE_TOO_LARGE ImagesServiceError_ErrorCode = 5
+ ImagesServiceError_INVALID_BLOB_KEY ImagesServiceError_ErrorCode = 6
+ ImagesServiceError_ACCESS_DENIED ImagesServiceError_ErrorCode = 7
+ ImagesServiceError_OBJECT_NOT_FOUND ImagesServiceError_ErrorCode = 8
+)
+
+var ImagesServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "BAD_TRANSFORM_DATA",
+ 3: "NOT_IMAGE",
+ 4: "BAD_IMAGE_DATA",
+ 5: "IMAGE_TOO_LARGE",
+ 6: "INVALID_BLOB_KEY",
+ 7: "ACCESS_DENIED",
+ 8: "OBJECT_NOT_FOUND",
+}
+var ImagesServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "BAD_TRANSFORM_DATA": 2,
+ "NOT_IMAGE": 3,
+ "BAD_IMAGE_DATA": 4,
+ "IMAGE_TOO_LARGE": 5,
+ "INVALID_BLOB_KEY": 6,
+ "ACCESS_DENIED": 7,
+ "OBJECT_NOT_FOUND": 8,
+}
+
+func (x ImagesServiceError_ErrorCode) Enum() *ImagesServiceError_ErrorCode {
+ p := new(ImagesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ImagesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ImagesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ImagesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceError_ErrorCode_value, data, "ImagesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ImagesServiceTransform_Type int32
+
+const (
+ ImagesServiceTransform_RESIZE ImagesServiceTransform_Type = 1
+ ImagesServiceTransform_ROTATE ImagesServiceTransform_Type = 2
+ ImagesServiceTransform_HORIZONTAL_FLIP ImagesServiceTransform_Type = 3
+ ImagesServiceTransform_VERTICAL_FLIP ImagesServiceTransform_Type = 4
+ ImagesServiceTransform_CROP ImagesServiceTransform_Type = 5
+ ImagesServiceTransform_IM_FEELING_LUCKY ImagesServiceTransform_Type = 6
+)
+
+var ImagesServiceTransform_Type_name = map[int32]string{
+ 1: "RESIZE",
+ 2: "ROTATE",
+ 3: "HORIZONTAL_FLIP",
+ 4: "VERTICAL_FLIP",
+ 5: "CROP",
+ 6: "IM_FEELING_LUCKY",
+}
+var ImagesServiceTransform_Type_value = map[string]int32{
+ "RESIZE": 1,
+ "ROTATE": 2,
+ "HORIZONTAL_FLIP": 3,
+ "VERTICAL_FLIP": 4,
+ "CROP": 5,
+ "IM_FEELING_LUCKY": 6,
+}
+
+func (x ImagesServiceTransform_Type) Enum() *ImagesServiceTransform_Type {
+ p := new(ImagesServiceTransform_Type)
+ *p = x
+ return p
+}
+func (x ImagesServiceTransform_Type) String() string {
+ return proto.EnumName(ImagesServiceTransform_Type_name, int32(x))
+}
+func (x *ImagesServiceTransform_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ImagesServiceTransform_Type_value, data, "ImagesServiceTransform_Type")
+ if err != nil {
+ return err
+ }
+ *x = ImagesServiceTransform_Type(value)
+ return nil
+}
+
+type InputSettings_ORIENTATION_CORRECTION_TYPE int32
+
+const (
+ InputSettings_UNCHANGED_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 0
+ InputSettings_CORRECT_ORIENTATION InputSettings_ORIENTATION_CORRECTION_TYPE = 1
+)
+
+var InputSettings_ORIENTATION_CORRECTION_TYPE_name = map[int32]string{
+ 0: "UNCHANGED_ORIENTATION",
+ 1: "CORRECT_ORIENTATION",
+}
+var InputSettings_ORIENTATION_CORRECTION_TYPE_value = map[string]int32{
+ "UNCHANGED_ORIENTATION": 0,
+ "CORRECT_ORIENTATION": 1,
+}
+
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) Enum() *InputSettings_ORIENTATION_CORRECTION_TYPE {
+ p := new(InputSettings_ORIENTATION_CORRECTION_TYPE)
+ *p = x
+ return p
+}
+func (x InputSettings_ORIENTATION_CORRECTION_TYPE) String() string {
+ return proto.EnumName(InputSettings_ORIENTATION_CORRECTION_TYPE_name, int32(x))
+}
+func (x *InputSettings_ORIENTATION_CORRECTION_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(InputSettings_ORIENTATION_CORRECTION_TYPE_value, data, "InputSettings_ORIENTATION_CORRECTION_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = InputSettings_ORIENTATION_CORRECTION_TYPE(value)
+ return nil
+}
+
+type OutputSettings_MIME_TYPE int32
+
+const (
+ OutputSettings_PNG OutputSettings_MIME_TYPE = 0
+ OutputSettings_JPEG OutputSettings_MIME_TYPE = 1
+ OutputSettings_WEBP OutputSettings_MIME_TYPE = 2
+)
+
+var OutputSettings_MIME_TYPE_name = map[int32]string{
+ 0: "PNG",
+ 1: "JPEG",
+ 2: "WEBP",
+}
+var OutputSettings_MIME_TYPE_value = map[string]int32{
+ "PNG": 0,
+ "JPEG": 1,
+ "WEBP": 2,
+}
+
+func (x OutputSettings_MIME_TYPE) Enum() *OutputSettings_MIME_TYPE {
+ p := new(OutputSettings_MIME_TYPE)
+ *p = x
+ return p
+}
+func (x OutputSettings_MIME_TYPE) String() string {
+ return proto.EnumName(OutputSettings_MIME_TYPE_name, int32(x))
+}
+func (x *OutputSettings_MIME_TYPE) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(OutputSettings_MIME_TYPE_value, data, "OutputSettings_MIME_TYPE")
+ if err != nil {
+ return err
+ }
+ *x = OutputSettings_MIME_TYPE(value)
+ return nil
+}
+
+type CompositeImageOptions_ANCHOR int32
+
+const (
+ CompositeImageOptions_TOP_LEFT CompositeImageOptions_ANCHOR = 0
+ CompositeImageOptions_TOP CompositeImageOptions_ANCHOR = 1
+ CompositeImageOptions_TOP_RIGHT CompositeImageOptions_ANCHOR = 2
+ CompositeImageOptions_LEFT CompositeImageOptions_ANCHOR = 3
+ CompositeImageOptions_CENTER CompositeImageOptions_ANCHOR = 4
+ CompositeImageOptions_RIGHT CompositeImageOptions_ANCHOR = 5
+ CompositeImageOptions_BOTTOM_LEFT CompositeImageOptions_ANCHOR = 6
+ CompositeImageOptions_BOTTOM CompositeImageOptions_ANCHOR = 7
+ CompositeImageOptions_BOTTOM_RIGHT CompositeImageOptions_ANCHOR = 8
+)
+
+var CompositeImageOptions_ANCHOR_name = map[int32]string{
+ 0: "TOP_LEFT",
+ 1: "TOP",
+ 2: "TOP_RIGHT",
+ 3: "LEFT",
+ 4: "CENTER",
+ 5: "RIGHT",
+ 6: "BOTTOM_LEFT",
+ 7: "BOTTOM",
+ 8: "BOTTOM_RIGHT",
+}
+var CompositeImageOptions_ANCHOR_value = map[string]int32{
+ "TOP_LEFT": 0,
+ "TOP": 1,
+ "TOP_RIGHT": 2,
+ "LEFT": 3,
+ "CENTER": 4,
+ "RIGHT": 5,
+ "BOTTOM_LEFT": 6,
+ "BOTTOM": 7,
+ "BOTTOM_RIGHT": 8,
+}
+
+func (x CompositeImageOptions_ANCHOR) Enum() *CompositeImageOptions_ANCHOR {
+ p := new(CompositeImageOptions_ANCHOR)
+ *p = x
+ return p
+}
+func (x CompositeImageOptions_ANCHOR) String() string {
+ return proto.EnumName(CompositeImageOptions_ANCHOR_name, int32(x))
+}
+func (x *CompositeImageOptions_ANCHOR) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeImageOptions_ANCHOR_value, data, "CompositeImageOptions_ANCHOR")
+ if err != nil {
+ return err
+ }
+ *x = CompositeImageOptions_ANCHOR(value)
+ return nil
+}
+
+type ImagesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceError) Reset() { *m = ImagesServiceError{} }
+func (m *ImagesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceError) ProtoMessage() {}
+
+type ImagesServiceTransform struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesServiceTransform) Reset() { *m = ImagesServiceTransform{} }
+func (m *ImagesServiceTransform) String() string { return proto.CompactTextString(m) }
+func (*ImagesServiceTransform) ProtoMessage() {}
+
+type Transform struct {
+ Width *int32 `protobuf:"varint,1,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,opt,name=height" json:"height,omitempty"`
+ CropToFit *bool `protobuf:"varint,11,opt,name=crop_to_fit,def=0" json:"crop_to_fit,omitempty"`
+ CropOffsetX *float32 `protobuf:"fixed32,12,opt,name=crop_offset_x,def=0.5" json:"crop_offset_x,omitempty"`
+ CropOffsetY *float32 `protobuf:"fixed32,13,opt,name=crop_offset_y,def=0.5" json:"crop_offset_y,omitempty"`
+ Rotate *int32 `protobuf:"varint,3,opt,name=rotate,def=0" json:"rotate,omitempty"`
+ HorizontalFlip *bool `protobuf:"varint,4,opt,name=horizontal_flip,def=0" json:"horizontal_flip,omitempty"`
+ VerticalFlip *bool `protobuf:"varint,5,opt,name=vertical_flip,def=0" json:"vertical_flip,omitempty"`
+ CropLeftX *float32 `protobuf:"fixed32,6,opt,name=crop_left_x,def=0" json:"crop_left_x,omitempty"`
+ CropTopY *float32 `protobuf:"fixed32,7,opt,name=crop_top_y,def=0" json:"crop_top_y,omitempty"`
+ CropRightX *float32 `protobuf:"fixed32,8,opt,name=crop_right_x,def=1" json:"crop_right_x,omitempty"`
+ CropBottomY *float32 `protobuf:"fixed32,9,opt,name=crop_bottom_y,def=1" json:"crop_bottom_y,omitempty"`
+ Autolevels *bool `protobuf:"varint,10,opt,name=autolevels,def=0" json:"autolevels,omitempty"`
+ AllowStretch *bool `protobuf:"varint,14,opt,name=allow_stretch,def=0" json:"allow_stretch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Transform) Reset() { *m = Transform{} }
+func (m *Transform) String() string { return proto.CompactTextString(m) }
+func (*Transform) ProtoMessage() {}
+
+const Default_Transform_CropToFit bool = false
+const Default_Transform_CropOffsetX float32 = 0.5
+const Default_Transform_CropOffsetY float32 = 0.5
+const Default_Transform_Rotate int32 = 0
+const Default_Transform_HorizontalFlip bool = false
+const Default_Transform_VerticalFlip bool = false
+const Default_Transform_CropLeftX float32 = 0
+const Default_Transform_CropTopY float32 = 0
+const Default_Transform_CropRightX float32 = 1
+const Default_Transform_CropBottomY float32 = 1
+const Default_Transform_Autolevels bool = false
+const Default_Transform_AllowStretch bool = false
+
+func (m *Transform) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *Transform) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *Transform) GetCropToFit() bool {
+ if m != nil && m.CropToFit != nil {
+ return *m.CropToFit
+ }
+ return Default_Transform_CropToFit
+}
+
+func (m *Transform) GetCropOffsetX() float32 {
+ if m != nil && m.CropOffsetX != nil {
+ return *m.CropOffsetX
+ }
+ return Default_Transform_CropOffsetX
+}
+
+func (m *Transform) GetCropOffsetY() float32 {
+ if m != nil && m.CropOffsetY != nil {
+ return *m.CropOffsetY
+ }
+ return Default_Transform_CropOffsetY
+}
+
+func (m *Transform) GetRotate() int32 {
+ if m != nil && m.Rotate != nil {
+ return *m.Rotate
+ }
+ return Default_Transform_Rotate
+}
+
+func (m *Transform) GetHorizontalFlip() bool {
+ if m != nil && m.HorizontalFlip != nil {
+ return *m.HorizontalFlip
+ }
+ return Default_Transform_HorizontalFlip
+}
+
+func (m *Transform) GetVerticalFlip() bool {
+ if m != nil && m.VerticalFlip != nil {
+ return *m.VerticalFlip
+ }
+ return Default_Transform_VerticalFlip
+}
+
+func (m *Transform) GetCropLeftX() float32 {
+ if m != nil && m.CropLeftX != nil {
+ return *m.CropLeftX
+ }
+ return Default_Transform_CropLeftX
+}
+
+func (m *Transform) GetCropTopY() float32 {
+ if m != nil && m.CropTopY != nil {
+ return *m.CropTopY
+ }
+ return Default_Transform_CropTopY
+}
+
+func (m *Transform) GetCropRightX() float32 {
+ if m != nil && m.CropRightX != nil {
+ return *m.CropRightX
+ }
+ return Default_Transform_CropRightX
+}
+
+func (m *Transform) GetCropBottomY() float32 {
+ if m != nil && m.CropBottomY != nil {
+ return *m.CropBottomY
+ }
+ return Default_Transform_CropBottomY
+}
+
+func (m *Transform) GetAutolevels() bool {
+ if m != nil && m.Autolevels != nil {
+ return *m.Autolevels
+ }
+ return Default_Transform_Autolevels
+}
+
+func (m *Transform) GetAllowStretch() bool {
+ if m != nil && m.AllowStretch != nil {
+ return *m.AllowStretch
+ }
+ return Default_Transform_AllowStretch
+}
+
+type ImageData struct {
+ Content []byte `protobuf:"bytes,1,req,name=content" json:"content,omitempty"`
+ BlobKey *string `protobuf:"bytes,2,opt,name=blob_key" json:"blob_key,omitempty"`
+ Width *int32 `protobuf:"varint,3,opt,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,4,opt,name=height" json:"height,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImageData) Reset() { *m = ImageData{} }
+func (m *ImageData) String() string { return proto.CompactTextString(m) }
+func (*ImageData) ProtoMessage() {}
+
+func (m *ImageData) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *ImageData) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImageData) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImageData) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+type InputSettings struct {
+ CorrectExifOrientation *InputSettings_ORIENTATION_CORRECTION_TYPE `protobuf:"varint,1,opt,name=correct_exif_orientation,enum=appengine.InputSettings_ORIENTATION_CORRECTION_TYPE,def=0" json:"correct_exif_orientation,omitempty"`
+ ParseMetadata *bool `protobuf:"varint,2,opt,name=parse_metadata,def=0" json:"parse_metadata,omitempty"`
+ TransparentSubstitutionRgb *int32 `protobuf:"varint,3,opt,name=transparent_substitution_rgb" json:"transparent_substitution_rgb,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *InputSettings) Reset() { *m = InputSettings{} }
+func (m *InputSettings) String() string { return proto.CompactTextString(m) }
+func (*InputSettings) ProtoMessage() {}
+
+const Default_InputSettings_CorrectExifOrientation InputSettings_ORIENTATION_CORRECTION_TYPE = InputSettings_UNCHANGED_ORIENTATION
+const Default_InputSettings_ParseMetadata bool = false
+
+func (m *InputSettings) GetCorrectExifOrientation() InputSettings_ORIENTATION_CORRECTION_TYPE {
+ if m != nil && m.CorrectExifOrientation != nil {
+ return *m.CorrectExifOrientation
+ }
+ return Default_InputSettings_CorrectExifOrientation
+}
+
+func (m *InputSettings) GetParseMetadata() bool {
+ if m != nil && m.ParseMetadata != nil {
+ return *m.ParseMetadata
+ }
+ return Default_InputSettings_ParseMetadata
+}
+
+func (m *InputSettings) GetTransparentSubstitutionRgb() int32 {
+ if m != nil && m.TransparentSubstitutionRgb != nil {
+ return *m.TransparentSubstitutionRgb
+ }
+ return 0
+}
+
+type OutputSettings struct {
+ MimeType *OutputSettings_MIME_TYPE `protobuf:"varint,1,opt,name=mime_type,enum=appengine.OutputSettings_MIME_TYPE,def=0" json:"mime_type,omitempty"`
+ Quality *int32 `protobuf:"varint,2,opt,name=quality" json:"quality,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *OutputSettings) Reset() { *m = OutputSettings{} }
+func (m *OutputSettings) String() string { return proto.CompactTextString(m) }
+func (*OutputSettings) ProtoMessage() {}
+
+const Default_OutputSettings_MimeType OutputSettings_MIME_TYPE = OutputSettings_PNG
+
+func (m *OutputSettings) GetMimeType() OutputSettings_MIME_TYPE {
+ if m != nil && m.MimeType != nil {
+ return *m.MimeType
+ }
+ return Default_OutputSettings_MimeType
+}
+
+func (m *OutputSettings) GetQuality() int32 {
+ if m != nil && m.Quality != nil {
+ return *m.Quality
+ }
+ return 0
+}
+
+type ImagesTransformRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ Transform []*Transform `protobuf:"bytes,2,rep,name=transform" json:"transform,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Input *InputSettings `protobuf:"bytes,4,opt,name=input" json:"input,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformRequest) Reset() { *m = ImagesTransformRequest{} }
+func (m *ImagesTransformRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformRequest) ProtoMessage() {}
+
+func (m *ImagesTransformRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetTransform() []*Transform {
+ if m != nil {
+ return m.Transform
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesTransformRequest) GetInput() *InputSettings {
+ if m != nil {
+ return m.Input
+ }
+ return nil
+}
+
+type ImagesTransformResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ SourceMetadata *string `protobuf:"bytes,2,opt,name=source_metadata" json:"source_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesTransformResponse) Reset() { *m = ImagesTransformResponse{} }
+func (m *ImagesTransformResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesTransformResponse) ProtoMessage() {}
+
+func (m *ImagesTransformResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesTransformResponse) GetSourceMetadata() string {
+ if m != nil && m.SourceMetadata != nil {
+ return *m.SourceMetadata
+ }
+ return ""
+}
+
+type CompositeImageOptions struct {
+ SourceIndex *int32 `protobuf:"varint,1,req,name=source_index" json:"source_index,omitempty"`
+ XOffset *int32 `protobuf:"varint,2,req,name=x_offset" json:"x_offset,omitempty"`
+ YOffset *int32 `protobuf:"varint,3,req,name=y_offset" json:"y_offset,omitempty"`
+ Opacity *float32 `protobuf:"fixed32,4,req,name=opacity" json:"opacity,omitempty"`
+ Anchor *CompositeImageOptions_ANCHOR `protobuf:"varint,5,req,name=anchor,enum=appengine.CompositeImageOptions_ANCHOR" json:"anchor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeImageOptions) Reset() { *m = CompositeImageOptions{} }
+func (m *CompositeImageOptions) String() string { return proto.CompactTextString(m) }
+func (*CompositeImageOptions) ProtoMessage() {}
+
+func (m *CompositeImageOptions) GetSourceIndex() int32 {
+ if m != nil && m.SourceIndex != nil {
+ return *m.SourceIndex
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetXOffset() int32 {
+ if m != nil && m.XOffset != nil {
+ return *m.XOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetYOffset() int32 {
+ if m != nil && m.YOffset != nil {
+ return *m.YOffset
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetOpacity() float32 {
+ if m != nil && m.Opacity != nil {
+ return *m.Opacity
+ }
+ return 0
+}
+
+func (m *CompositeImageOptions) GetAnchor() CompositeImageOptions_ANCHOR {
+ if m != nil && m.Anchor != nil {
+ return *m.Anchor
+ }
+ return CompositeImageOptions_TOP_LEFT
+}
+
+type ImagesCanvas struct {
+ Width *int32 `protobuf:"varint,1,req,name=width" json:"width,omitempty"`
+ Height *int32 `protobuf:"varint,2,req,name=height" json:"height,omitempty"`
+ Output *OutputSettings `protobuf:"bytes,3,req,name=output" json:"output,omitempty"`
+ Color *int32 `protobuf:"varint,4,opt,name=color,def=-1" json:"color,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCanvas) Reset() { *m = ImagesCanvas{} }
+func (m *ImagesCanvas) String() string { return proto.CompactTextString(m) }
+func (*ImagesCanvas) ProtoMessage() {}
+
+const Default_ImagesCanvas_Color int32 = -1
+
+func (m *ImagesCanvas) GetWidth() int32 {
+ if m != nil && m.Width != nil {
+ return *m.Width
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetHeight() int32 {
+ if m != nil && m.Height != nil {
+ return *m.Height
+ }
+ return 0
+}
+
+func (m *ImagesCanvas) GetOutput() *OutputSettings {
+ if m != nil {
+ return m.Output
+ }
+ return nil
+}
+
+func (m *ImagesCanvas) GetColor() int32 {
+ if m != nil && m.Color != nil {
+ return *m.Color
+ }
+ return Default_ImagesCanvas_Color
+}
+
+type ImagesCompositeRequest struct {
+ Image []*ImageData `protobuf:"bytes,1,rep,name=image" json:"image,omitempty"`
+ Options []*CompositeImageOptions `protobuf:"bytes,2,rep,name=options" json:"options,omitempty"`
+ Canvas *ImagesCanvas `protobuf:"bytes,3,req,name=canvas" json:"canvas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeRequest) Reset() { *m = ImagesCompositeRequest{} }
+func (m *ImagesCompositeRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeRequest) ProtoMessage() {}
+
+func (m *ImagesCompositeRequest) GetImage() []*ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetOptions() []*CompositeImageOptions {
+ if m != nil {
+ return m.Options
+ }
+ return nil
+}
+
+func (m *ImagesCompositeRequest) GetCanvas() *ImagesCanvas {
+ if m != nil {
+ return m.Canvas
+ }
+ return nil
+}
+
+type ImagesCompositeResponse struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesCompositeResponse) Reset() { *m = ImagesCompositeResponse{} }
+func (m *ImagesCompositeResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesCompositeResponse) ProtoMessage() {}
+
+func (m *ImagesCompositeResponse) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogramRequest struct {
+ Image *ImageData `protobuf:"bytes,1,req,name=image" json:"image,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramRequest) Reset() { *m = ImagesHistogramRequest{} }
+func (m *ImagesHistogramRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramRequest) ProtoMessage() {}
+
+func (m *ImagesHistogramRequest) GetImage() *ImageData {
+ if m != nil {
+ return m.Image
+ }
+ return nil
+}
+
+type ImagesHistogram struct {
+ Red []int32 `protobuf:"varint,1,rep,name=red" json:"red,omitempty"`
+ Green []int32 `protobuf:"varint,2,rep,name=green" json:"green,omitempty"`
+ Blue []int32 `protobuf:"varint,3,rep,name=blue" json:"blue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogram) Reset() { *m = ImagesHistogram{} }
+func (m *ImagesHistogram) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogram) ProtoMessage() {}
+
+func (m *ImagesHistogram) GetRed() []int32 {
+ if m != nil {
+ return m.Red
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetGreen() []int32 {
+ if m != nil {
+ return m.Green
+ }
+ return nil
+}
+
+func (m *ImagesHistogram) GetBlue() []int32 {
+ if m != nil {
+ return m.Blue
+ }
+ return nil
+}
+
+type ImagesHistogramResponse struct {
+ Histogram *ImagesHistogram `protobuf:"bytes,1,req,name=histogram" json:"histogram,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesHistogramResponse) Reset() { *m = ImagesHistogramResponse{} }
+func (m *ImagesHistogramResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesHistogramResponse) ProtoMessage() {}
+
+func (m *ImagesHistogramResponse) GetHistogram() *ImagesHistogram {
+ if m != nil {
+ return m.Histogram
+ }
+ return nil
+}
+
+type ImagesGetUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ CreateSecureUrl *bool `protobuf:"varint,2,opt,name=create_secure_url,def=0" json:"create_secure_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseRequest) Reset() { *m = ImagesGetUrlBaseRequest{} }
+func (m *ImagesGetUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseRequest) ProtoMessage() {}
+
+const Default_ImagesGetUrlBaseRequest_CreateSecureUrl bool = false
+
+func (m *ImagesGetUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+func (m *ImagesGetUrlBaseRequest) GetCreateSecureUrl() bool {
+ if m != nil && m.CreateSecureUrl != nil {
+ return *m.CreateSecureUrl
+ }
+ return Default_ImagesGetUrlBaseRequest_CreateSecureUrl
+}
+
+type ImagesGetUrlBaseResponse struct {
+ Url *string `protobuf:"bytes,1,req,name=url" json:"url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesGetUrlBaseResponse) Reset() { *m = ImagesGetUrlBaseResponse{} }
+func (m *ImagesGetUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesGetUrlBaseResponse) ProtoMessage() {}
+
+func (m *ImagesGetUrlBaseResponse) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseRequest struct {
+ BlobKey *string `protobuf:"bytes,1,req,name=blob_key" json:"blob_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseRequest) Reset() { *m = ImagesDeleteUrlBaseRequest{} }
+func (m *ImagesDeleteUrlBaseRequest) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseRequest) ProtoMessage() {}
+
+func (m *ImagesDeleteUrlBaseRequest) GetBlobKey() string {
+ if m != nil && m.BlobKey != nil {
+ return *m.BlobKey
+ }
+ return ""
+}
+
+type ImagesDeleteUrlBaseResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ImagesDeleteUrlBaseResponse) Reset() { *m = ImagesDeleteUrlBaseResponse{} }
+func (m *ImagesDeleteUrlBaseResponse) String() string { return proto.CompactTextString(m) }
+func (*ImagesDeleteUrlBaseResponse) ProtoMessage() {}
+
+func init() {
+ proto.RegisterEnum("appengine.ImagesServiceError_ErrorCode", ImagesServiceError_ErrorCode_name, ImagesServiceError_ErrorCode_value)
+ proto.RegisterEnum("appengine.ImagesServiceTransform_Type", ImagesServiceTransform_Type_name, ImagesServiceTransform_Type_value)
+ proto.RegisterEnum("appengine.InputSettings_ORIENTATION_CORRECTION_TYPE", InputSettings_ORIENTATION_CORRECTION_TYPE_name, InputSettings_ORIENTATION_CORRECTION_TYPE_value)
+ proto.RegisterEnum("appengine.OutputSettings_MIME_TYPE", OutputSettings_MIME_TYPE_name, OutputSettings_MIME_TYPE_value)
+ proto.RegisterEnum("appengine.CompositeImageOptions_ANCHOR", CompositeImageOptions_ANCHOR_name, CompositeImageOptions_ANCHOR_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.proto
new file mode 100644
index 000000000000..f0d2ed5d362a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/image/images_service.proto
@@ -0,0 +1,162 @@
+syntax = "proto2";
+option go_package = "image";
+
+package appengine;
+
+message ImagesServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ BAD_TRANSFORM_DATA = 2;
+ NOT_IMAGE = 3;
+ BAD_IMAGE_DATA = 4;
+ IMAGE_TOO_LARGE = 5;
+ INVALID_BLOB_KEY = 6;
+ ACCESS_DENIED = 7;
+ OBJECT_NOT_FOUND = 8;
+ }
+}
+
+message ImagesServiceTransform {
+ enum Type {
+ RESIZE = 1;
+ ROTATE = 2;
+ HORIZONTAL_FLIP = 3;
+ VERTICAL_FLIP = 4;
+ CROP = 5;
+ IM_FEELING_LUCKY = 6;
+ }
+}
+
+message Transform {
+ optional int32 width = 1;
+ optional int32 height = 2;
+ optional bool crop_to_fit = 11 [default = false];
+ optional float crop_offset_x = 12 [default = 0.5];
+ optional float crop_offset_y = 13 [default = 0.5];
+
+ optional int32 rotate = 3 [default = 0];
+
+ optional bool horizontal_flip = 4 [default = false];
+
+ optional bool vertical_flip = 5 [default = false];
+
+ optional float crop_left_x = 6 [default = 0.0];
+ optional float crop_top_y = 7 [default = 0.0];
+ optional float crop_right_x = 8 [default = 1.0];
+ optional float crop_bottom_y = 9 [default = 1.0];
+
+ optional bool autolevels = 10 [default = false];
+
+ optional bool allow_stretch = 14 [default = false];
+}
+
+message ImageData {
+ required bytes content = 1 [ctype=CORD];
+ optional string blob_key = 2;
+
+ optional int32 width = 3;
+ optional int32 height = 4;
+}
+
+message InputSettings {
+ enum ORIENTATION_CORRECTION_TYPE {
+ UNCHANGED_ORIENTATION = 0;
+ CORRECT_ORIENTATION = 1;
+ }
+ optional ORIENTATION_CORRECTION_TYPE correct_exif_orientation = 1
+ [default=UNCHANGED_ORIENTATION];
+ optional bool parse_metadata = 2 [default=false];
+ optional int32 transparent_substitution_rgb = 3;
+}
+
+message OutputSettings {
+ enum MIME_TYPE {
+ PNG = 0;
+ JPEG = 1;
+ WEBP = 2;
+ }
+
+ optional MIME_TYPE mime_type = 1 [default=PNG];
+ optional int32 quality = 2;
+}
+
+message ImagesTransformRequest {
+ required ImageData image = 1;
+ repeated Transform transform = 2;
+ required OutputSettings output = 3;
+ optional InputSettings input = 4;
+}
+
+message ImagesTransformResponse {
+ required ImageData image = 1;
+ optional string source_metadata = 2;
+}
+
+message CompositeImageOptions {
+ required int32 source_index = 1;
+ required int32 x_offset = 2;
+ required int32 y_offset = 3;
+ required float opacity = 4;
+
+ enum ANCHOR {
+ TOP_LEFT = 0;
+ TOP = 1;
+ TOP_RIGHT = 2;
+ LEFT = 3;
+ CENTER = 4;
+ RIGHT = 5;
+ BOTTOM_LEFT = 6;
+ BOTTOM = 7;
+ BOTTOM_RIGHT = 8;
+ }
+
+ required ANCHOR anchor = 5;
+}
+
+message ImagesCanvas {
+ required int32 width = 1;
+ required int32 height = 2;
+ required OutputSettings output = 3;
+ optional int32 color = 4 [default=-1];
+}
+
+message ImagesCompositeRequest {
+ repeated ImageData image = 1;
+ repeated CompositeImageOptions options = 2;
+ required ImagesCanvas canvas = 3;
+}
+
+message ImagesCompositeResponse {
+ required ImageData image = 1;
+}
+
+message ImagesHistogramRequest {
+ required ImageData image = 1;
+}
+
+message ImagesHistogram {
+ repeated int32 red = 1;
+ repeated int32 green = 2;
+ repeated int32 blue = 3;
+}
+
+message ImagesHistogramResponse {
+ required ImagesHistogram histogram = 1;
+}
+
+message ImagesGetUrlBaseRequest {
+ required string blob_key = 1;
+
+ optional bool create_secure_url = 2 [default = false];
+}
+
+message ImagesGetUrlBaseResponse {
+ required string url = 1;
+}
+
+message ImagesDeleteUrlBaseRequest {
+ required string blob_key = 1;
+}
+
+message ImagesDeleteUrlBaseResponse {
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/internal.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/internal.go
new file mode 100644
index 000000000000..6c637209a67b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/internal.go
@@ -0,0 +1,165 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package internal provides support for package appengine.
+//
+// Programs should not use this package directly. Its API is not stable.
+// Use packages appengine and appengine/* instead.
+package internal
+
+import (
+ "fmt"
+ "io"
+ "log"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ remotepb "google.golang.org/appengine/internal/remote_api"
+)
+
+type CallOptions struct {
+ Timeout time.Duration // if non-zero, overrides RPC default
+}
+
+// errorCodeMaps is a map of service name to the error code map for the service.
+var errorCodeMaps = make(map[string]map[int32]string)
+
+// RegisterErrorCodeMap is called from API implementations to register their
+// error code map. This should only be called from init functions.
+func RegisterErrorCodeMap(service string, m map[int32]string) {
+ errorCodeMaps[service] = m
+}
+
+type timeoutCodeKey struct {
+ service string
+ code int32
+}
+
+// timeoutCodes is the set of service+code pairs that represent timeouts.
+var timeoutCodes = make(map[timeoutCodeKey]bool)
+
+func RegisterTimeoutErrorCode(service string, code int32) {
+ timeoutCodes[timeoutCodeKey{service, code}] = true
+}
+
+// APIError is the type returned by appengine.Context's Call method
+// when an API call fails in an API-specific way. This may be, for instance,
+// a taskqueue API call failing with TaskQueueServiceError::UNKNOWN_QUEUE.
+type APIError struct {
+ Service string
+ Detail string
+ Code int32 // API-specific error code
+}
+
+func (e *APIError) Error() string {
+ if e.Code == 0 {
+ if e.Detail == "" {
+ return "APIError "
+ }
+ return e.Detail
+ }
+ s := fmt.Sprintf("API error %d", e.Code)
+ if m, ok := errorCodeMaps[e.Service]; ok {
+ s += " (" + e.Service + ": " + m[e.Code] + ")"
+ } else {
+ // Shouldn't happen, but provide a bit more detail if it does.
+ s = e.Service + " " + s
+ }
+ if e.Detail != "" {
+ s += ": " + e.Detail
+ }
+ return s
+}
+
+func (e *APIError) IsTimeout() bool {
+ return timeoutCodes[timeoutCodeKey{e.Service, e.Code}]
+}
+
+// CallError is the type returned by appengine.Context's Call method when an
+// API call fails in a generic way, such as RpcError::CAPABILITY_DISABLED.
+type CallError struct {
+ Detail string
+ Code int32
+ // TODO: Remove this if we get a distinguishable error code.
+ Timeout bool
+}
+
+func (e *CallError) Error() string {
+ var msg string
+ switch remotepb.RpcError_ErrorCode(e.Code) {
+ case remotepb.RpcError_UNKNOWN:
+ return e.Detail
+ case remotepb.RpcError_OVER_QUOTA:
+ msg = "Over quota"
+ case remotepb.RpcError_CAPABILITY_DISABLED:
+ msg = "Capability disabled"
+ case remotepb.RpcError_CANCELLED:
+ msg = "Canceled"
+ default:
+ msg = fmt.Sprintf("Call error %d", e.Code)
+ }
+ s := msg + ": " + e.Detail
+ if e.Timeout {
+ s += " (timeout)"
+ }
+ return s
+}
+
+func (e *CallError) IsTimeout() bool {
+ return e.Timeout
+}
+
+// The comment below must not be changed.
+// It is used by go-app-builder to recognise that this package has
+// the internal.Main function to use in the synthetic main.
+// The gophers party all night; the rabbits provide the beats.
+
+// Main is designed so that the complete generated main package is:
+//
+// package main
+//
+// import (
+// "google.golang.org/appengine/internal"
+//
+// _ "myapp/package0"
+// _ "myapp/package1"
+// )
+//
+// func main() {
+// internal.Main()
+// }
+//
+// The "myapp/packageX" packages are expected to register HTTP handlers
+// in their init functions.
+func Main() {
+ installHealthChecker(http.DefaultServeMux)
+
+ if err := http.ListenAndServe(":8080", http.HandlerFunc(handleHTTP)); err != nil {
+ log.Fatalf("http.ListenAndServe: %v", err)
+ }
+}
+
+func installHealthChecker(mux *http.ServeMux) {
+ // If no health check handler has been installed by this point, add a trivial one.
+ const healthPath = "/_ah/health"
+ hreq := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Path: healthPath,
+ },
+ }
+ if _, pat := mux.Handler(hreq); pat != healthPath {
+ mux.HandleFunc(healthPath, func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "ok")
+ })
+ }
+}
+
+// NamespaceMods is a map from API service to a function that will mutate an RPC request to attach a namespace.
+// The function should be prepared to be called on the same message more than once; it should only modify the
+// RPC request the first time.
+var NamespaceMods = make(map[string]func(m proto.Message, namespace string))
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/internal_test.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/internal_test.go
new file mode 100644
index 000000000000..226028d810e1
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/internal_test.go
@@ -0,0 +1,54 @@
+package internal
+
+import (
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/http/httptest"
+ "testing"
+)
+
+func TestInstallingHealthChecker(t *testing.T) {
+ try := func(desc string, mux *http.ServeMux, wantCode int, wantBody string) {
+ installHealthChecker(mux)
+ srv := httptest.NewServer(mux)
+ defer srv.Close()
+
+ resp, err := http.Get(srv.URL + "/_ah/health")
+ if err != nil {
+ t.Errorf("%s: http.Get: %v", desc, err)
+ return
+ }
+ defer resp.Body.Close()
+ body, err := ioutil.ReadAll(resp.Body)
+ if err != nil {
+ t.Errorf("%s: reading body: %v", desc, err)
+ return
+ }
+
+ if resp.StatusCode != wantCode {
+ t.Errorf("%s: got HTTP %d, want %d", desc, resp.StatusCode, wantCode)
+ return
+ }
+ if wantBody != "" && string(body) != wantBody {
+ t.Errorf("%s: got HTTP body %q, want %q", desc, body, wantBody)
+ return
+ }
+ }
+
+ // If there's no handlers, or only a root handler, a health checker should be installed.
+ try("empty mux", http.NewServeMux(), 200, "ok")
+ mux := http.NewServeMux()
+ mux.HandleFunc("/", func(w http.ResponseWriter, r *http.Request) {
+ io.WriteString(w, "root handler")
+ })
+ try("mux with root handler", mux, 200, "ok")
+
+ // If there's a custom health check handler, one should not be installed.
+ mux = http.NewServeMux()
+ mux.HandleFunc("/_ah/health", func(w http.ResponseWriter, r *http.Request) {
+ w.WriteHeader(418)
+ io.WriteString(w, "I'm short and stout!")
+ })
+ try("mux with custom health checker", mux, 418, "I'm short and stout!")
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.pb.go
new file mode 100644
index 000000000000..5aae0a76a916
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.pb.go
@@ -0,0 +1,898 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/log/log_service.proto
+// DO NOT EDIT!
+
+/*
+Package log is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/log/log_service.proto
+
+It has these top-level messages:
+ LogServiceError
+ UserAppLogLine
+ UserAppLogGroup
+ FlushRequest
+ SetStatusRequest
+ LogOffset
+ LogLine
+ RequestLog
+ LogModuleVersion
+ LogReadRequest
+ LogReadResponse
+ LogUsageRecord
+ LogUsageRequest
+ LogUsageResponse
+*/
+package log
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type LogServiceError_ErrorCode int32
+
+const (
+ LogServiceError_OK LogServiceError_ErrorCode = 0
+ LogServiceError_INVALID_REQUEST LogServiceError_ErrorCode = 1
+ LogServiceError_STORAGE_ERROR LogServiceError_ErrorCode = 2
+)
+
+var LogServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "STORAGE_ERROR",
+}
+var LogServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "STORAGE_ERROR": 2,
+}
+
+func (x LogServiceError_ErrorCode) Enum() *LogServiceError_ErrorCode {
+ p := new(LogServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x LogServiceError_ErrorCode) String() string {
+ return proto.EnumName(LogServiceError_ErrorCode_name, int32(x))
+}
+func (x *LogServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(LogServiceError_ErrorCode_value, data, "LogServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = LogServiceError_ErrorCode(value)
+ return nil
+}
+
+type LogServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogServiceError) Reset() { *m = LogServiceError{} }
+func (m *LogServiceError) String() string { return proto.CompactTextString(m) }
+func (*LogServiceError) ProtoMessage() {}
+
+type UserAppLogLine struct {
+ TimestampUsec *int64 `protobuf:"varint,1,req,name=timestamp_usec" json:"timestamp_usec,omitempty"`
+ Level *int64 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ Message *string `protobuf:"bytes,3,req,name=message" json:"message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogLine) Reset() { *m = UserAppLogLine{} }
+func (m *UserAppLogLine) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogLine) ProtoMessage() {}
+
+func (m *UserAppLogLine) GetTimestampUsec() int64 {
+ if m != nil && m.TimestampUsec != nil {
+ return *m.TimestampUsec
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetLevel() int64 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *UserAppLogLine) GetMessage() string {
+ if m != nil && m.Message != nil {
+ return *m.Message
+ }
+ return ""
+}
+
+type UserAppLogGroup struct {
+ LogLine []*UserAppLogLine `protobuf:"bytes,2,rep,name=log_line" json:"log_line,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserAppLogGroup) Reset() { *m = UserAppLogGroup{} }
+func (m *UserAppLogGroup) String() string { return proto.CompactTextString(m) }
+func (*UserAppLogGroup) ProtoMessage() {}
+
+func (m *UserAppLogGroup) GetLogLine() []*UserAppLogLine {
+ if m != nil {
+ return m.LogLine
+ }
+ return nil
+}
+
+type FlushRequest struct {
+ Logs []byte `protobuf:"bytes,1,opt,name=logs" json:"logs,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FlushRequest) Reset() { *m = FlushRequest{} }
+func (m *FlushRequest) String() string { return proto.CompactTextString(m) }
+func (*FlushRequest) ProtoMessage() {}
+
+func (m *FlushRequest) GetLogs() []byte {
+ if m != nil {
+ return m.Logs
+ }
+ return nil
+}
+
+type SetStatusRequest struct {
+ Status *string `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetStatusRequest) Reset() { *m = SetStatusRequest{} }
+func (m *SetStatusRequest) String() string { return proto.CompactTextString(m) }
+func (*SetStatusRequest) ProtoMessage() {}
+
+func (m *SetStatusRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+type LogOffset struct {
+ RequestId []byte `protobuf:"bytes,1,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogOffset) Reset() { *m = LogOffset{} }
+func (m *LogOffset) String() string { return proto.CompactTextString(m) }
+func (*LogOffset) ProtoMessage() {}
+
+func (m *LogOffset) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+type LogLine struct {
+ Time *int64 `protobuf:"varint,1,req,name=time" json:"time,omitempty"`
+ Level *int32 `protobuf:"varint,2,req,name=level" json:"level,omitempty"`
+ LogMessage *string `protobuf:"bytes,3,req,name=log_message" json:"log_message,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogLine) Reset() { *m = LogLine{} }
+func (m *LogLine) String() string { return proto.CompactTextString(m) }
+func (*LogLine) ProtoMessage() {}
+
+func (m *LogLine) GetTime() int64 {
+ if m != nil && m.Time != nil {
+ return *m.Time
+ }
+ return 0
+}
+
+func (m *LogLine) GetLevel() int32 {
+ if m != nil && m.Level != nil {
+ return *m.Level
+ }
+ return 0
+}
+
+func (m *LogLine) GetLogMessage() string {
+ if m != nil && m.LogMessage != nil {
+ return *m.LogMessage
+ }
+ return ""
+}
+
+type RequestLog struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ ModuleId *string `protobuf:"bytes,37,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,req,name=version_id" json:"version_id,omitempty"`
+ RequestId []byte `protobuf:"bytes,3,req,name=request_id" json:"request_id,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,35,opt,name=offset" json:"offset,omitempty"`
+ Ip *string `protobuf:"bytes,4,req,name=ip" json:"ip,omitempty"`
+ Nickname *string `protobuf:"bytes,5,opt,name=nickname" json:"nickname,omitempty"`
+ StartTime *int64 `protobuf:"varint,6,req,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,7,req,name=end_time" json:"end_time,omitempty"`
+ Latency *int64 `protobuf:"varint,8,req,name=latency" json:"latency,omitempty"`
+ Mcycles *int64 `protobuf:"varint,9,req,name=mcycles" json:"mcycles,omitempty"`
+ Method *string `protobuf:"bytes,10,req,name=method" json:"method,omitempty"`
+ Resource *string `protobuf:"bytes,11,req,name=resource" json:"resource,omitempty"`
+ HttpVersion *string `protobuf:"bytes,12,req,name=http_version" json:"http_version,omitempty"`
+ Status *int32 `protobuf:"varint,13,req,name=status" json:"status,omitempty"`
+ ResponseSize *int64 `protobuf:"varint,14,req,name=response_size" json:"response_size,omitempty"`
+ Referrer *string `protobuf:"bytes,15,opt,name=referrer" json:"referrer,omitempty"`
+ UserAgent *string `protobuf:"bytes,16,opt,name=user_agent" json:"user_agent,omitempty"`
+ UrlMapEntry *string `protobuf:"bytes,17,req,name=url_map_entry" json:"url_map_entry,omitempty"`
+ Combined *string `protobuf:"bytes,18,req,name=combined" json:"combined,omitempty"`
+ ApiMcycles *int64 `protobuf:"varint,19,opt,name=api_mcycles" json:"api_mcycles,omitempty"`
+ Host *string `protobuf:"bytes,20,opt,name=host" json:"host,omitempty"`
+ Cost *float64 `protobuf:"fixed64,21,opt,name=cost" json:"cost,omitempty"`
+ TaskQueueName *string `protobuf:"bytes,22,opt,name=task_queue_name" json:"task_queue_name,omitempty"`
+ TaskName *string `protobuf:"bytes,23,opt,name=task_name" json:"task_name,omitempty"`
+ WasLoadingRequest *bool `protobuf:"varint,24,opt,name=was_loading_request" json:"was_loading_request,omitempty"`
+ PendingTime *int64 `protobuf:"varint,25,opt,name=pending_time" json:"pending_time,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,26,opt,name=replica_index,def=-1" json:"replica_index,omitempty"`
+ Finished *bool `protobuf:"varint,27,opt,name=finished,def=1" json:"finished,omitempty"`
+ CloneKey []byte `protobuf:"bytes,28,opt,name=clone_key" json:"clone_key,omitempty"`
+ Line []*LogLine `protobuf:"bytes,29,rep,name=line" json:"line,omitempty"`
+ LinesIncomplete *bool `protobuf:"varint,36,opt,name=lines_incomplete" json:"lines_incomplete,omitempty"`
+ AppEngineRelease []byte `protobuf:"bytes,38,opt,name=app_engine_release" json:"app_engine_release,omitempty"`
+ ExitReason *int32 `protobuf:"varint,30,opt,name=exit_reason" json:"exit_reason,omitempty"`
+ WasThrottledForTime *bool `protobuf:"varint,31,opt,name=was_throttled_for_time" json:"was_throttled_for_time,omitempty"`
+ WasThrottledForRequests *bool `protobuf:"varint,32,opt,name=was_throttled_for_requests" json:"was_throttled_for_requests,omitempty"`
+ ThrottledTime *int64 `protobuf:"varint,33,opt,name=throttled_time" json:"throttled_time,omitempty"`
+ ServerName []byte `protobuf:"bytes,34,opt,name=server_name" json:"server_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestLog) Reset() { *m = RequestLog{} }
+func (m *RequestLog) String() string { return proto.CompactTextString(m) }
+func (*RequestLog) ProtoMessage() {}
+
+const Default_RequestLog_ModuleId string = "default"
+const Default_RequestLog_ReplicaIndex int32 = -1
+const Default_RequestLog_Finished bool = true
+
+func (m *RequestLog) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_RequestLog_ModuleId
+}
+
+func (m *RequestLog) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *RequestLog) GetRequestId() []byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *RequestLog) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *RequestLog) GetIp() string {
+ if m != nil && m.Ip != nil {
+ return *m.Ip
+ }
+ return ""
+}
+
+func (m *RequestLog) GetNickname() string {
+ if m != nil && m.Nickname != nil {
+ return *m.Nickname
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetLatency() int64 {
+ if m != nil && m.Latency != nil {
+ return *m.Latency
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMcycles() int64 {
+ if m != nil && m.Mcycles != nil {
+ return *m.Mcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *RequestLog) GetResource() string {
+ if m != nil && m.Resource != nil {
+ return *m.Resource
+ }
+ return ""
+}
+
+func (m *RequestLog) GetHttpVersion() string {
+ if m != nil && m.HttpVersion != nil {
+ return *m.HttpVersion
+ }
+ return ""
+}
+
+func (m *RequestLog) GetStatus() int32 {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return 0
+}
+
+func (m *RequestLog) GetResponseSize() int64 {
+ if m != nil && m.ResponseSize != nil {
+ return *m.ResponseSize
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReferrer() string {
+ if m != nil && m.Referrer != nil {
+ return *m.Referrer
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUserAgent() string {
+ if m != nil && m.UserAgent != nil {
+ return *m.UserAgent
+ }
+ return ""
+}
+
+func (m *RequestLog) GetUrlMapEntry() string {
+ if m != nil && m.UrlMapEntry != nil {
+ return *m.UrlMapEntry
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCombined() string {
+ if m != nil && m.Combined != nil {
+ return *m.Combined
+ }
+ return ""
+}
+
+func (m *RequestLog) GetApiMcycles() int64 {
+ if m != nil && m.ApiMcycles != nil {
+ return *m.ApiMcycles
+ }
+ return 0
+}
+
+func (m *RequestLog) GetHost() string {
+ if m != nil && m.Host != nil {
+ return *m.Host
+ }
+ return ""
+}
+
+func (m *RequestLog) GetCost() float64 {
+ if m != nil && m.Cost != nil {
+ return *m.Cost
+ }
+ return 0
+}
+
+func (m *RequestLog) GetTaskQueueName() string {
+ if m != nil && m.TaskQueueName != nil {
+ return *m.TaskQueueName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetTaskName() string {
+ if m != nil && m.TaskName != nil {
+ return *m.TaskName
+ }
+ return ""
+}
+
+func (m *RequestLog) GetWasLoadingRequest() bool {
+ if m != nil && m.WasLoadingRequest != nil {
+ return *m.WasLoadingRequest
+ }
+ return false
+}
+
+func (m *RequestLog) GetPendingTime() int64 {
+ if m != nil && m.PendingTime != nil {
+ return *m.PendingTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return Default_RequestLog_ReplicaIndex
+}
+
+func (m *RequestLog) GetFinished() bool {
+ if m != nil && m.Finished != nil {
+ return *m.Finished
+ }
+ return Default_RequestLog_Finished
+}
+
+func (m *RequestLog) GetCloneKey() []byte {
+ if m != nil {
+ return m.CloneKey
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLine() []*LogLine {
+ if m != nil {
+ return m.Line
+ }
+ return nil
+}
+
+func (m *RequestLog) GetLinesIncomplete() bool {
+ if m != nil && m.LinesIncomplete != nil {
+ return *m.LinesIncomplete
+ }
+ return false
+}
+
+func (m *RequestLog) GetAppEngineRelease() []byte {
+ if m != nil {
+ return m.AppEngineRelease
+ }
+ return nil
+}
+
+func (m *RequestLog) GetExitReason() int32 {
+ if m != nil && m.ExitReason != nil {
+ return *m.ExitReason
+ }
+ return 0
+}
+
+func (m *RequestLog) GetWasThrottledForTime() bool {
+ if m != nil && m.WasThrottledForTime != nil {
+ return *m.WasThrottledForTime
+ }
+ return false
+}
+
+func (m *RequestLog) GetWasThrottledForRequests() bool {
+ if m != nil && m.WasThrottledForRequests != nil {
+ return *m.WasThrottledForRequests
+ }
+ return false
+}
+
+func (m *RequestLog) GetThrottledTime() int64 {
+ if m != nil && m.ThrottledTime != nil {
+ return *m.ThrottledTime
+ }
+ return 0
+}
+
+func (m *RequestLog) GetServerName() []byte {
+ if m != nil {
+ return m.ServerName
+ }
+ return nil
+}
+
+type LogModuleVersion struct {
+ ModuleId *string `protobuf:"bytes,1,opt,name=module_id,def=default" json:"module_id,omitempty"`
+ VersionId *string `protobuf:"bytes,2,opt,name=version_id" json:"version_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogModuleVersion) Reset() { *m = LogModuleVersion{} }
+func (m *LogModuleVersion) String() string { return proto.CompactTextString(m) }
+func (*LogModuleVersion) ProtoMessage() {}
+
+const Default_LogModuleVersion_ModuleId string = "default"
+
+func (m *LogModuleVersion) GetModuleId() string {
+ if m != nil && m.ModuleId != nil {
+ return *m.ModuleId
+ }
+ return Default_LogModuleVersion_ModuleId
+}
+
+func (m *LogModuleVersion) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+type LogReadRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ ModuleVersion []*LogModuleVersion `protobuf:"bytes,19,rep,name=module_version" json:"module_version,omitempty"`
+ StartTime *int64 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int64 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,5,opt,name=offset" json:"offset,omitempty"`
+ RequestId [][]byte `protobuf:"bytes,6,rep,name=request_id" json:"request_id,omitempty"`
+ MinimumLogLevel *int32 `protobuf:"varint,7,opt,name=minimum_log_level" json:"minimum_log_level,omitempty"`
+ IncludeIncomplete *bool `protobuf:"varint,8,opt,name=include_incomplete" json:"include_incomplete,omitempty"`
+ Count *int64 `protobuf:"varint,9,opt,name=count" json:"count,omitempty"`
+ CombinedLogRegex *string `protobuf:"bytes,14,opt,name=combined_log_regex" json:"combined_log_regex,omitempty"`
+ HostRegex *string `protobuf:"bytes,15,opt,name=host_regex" json:"host_regex,omitempty"`
+ ReplicaIndex *int32 `protobuf:"varint,16,opt,name=replica_index" json:"replica_index,omitempty"`
+ IncludeAppLogs *bool `protobuf:"varint,10,opt,name=include_app_logs" json:"include_app_logs,omitempty"`
+ AppLogsPerRequest *int32 `protobuf:"varint,17,opt,name=app_logs_per_request" json:"app_logs_per_request,omitempty"`
+ IncludeHost *bool `protobuf:"varint,11,opt,name=include_host" json:"include_host,omitempty"`
+ IncludeAll *bool `protobuf:"varint,12,opt,name=include_all" json:"include_all,omitempty"`
+ CacheIterator *bool `protobuf:"varint,13,opt,name=cache_iterator" json:"cache_iterator,omitempty"`
+ NumShards *int32 `protobuf:"varint,18,opt,name=num_shards" json:"num_shards,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadRequest) Reset() { *m = LogReadRequest{} }
+func (m *LogReadRequest) String() string { return proto.CompactTextString(m) }
+func (*LogReadRequest) ProtoMessage() {}
+
+func (m *LogReadRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetModuleVersion() []*LogModuleVersion {
+ if m != nil {
+ return m.ModuleVersion
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetStartTime() int64 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetEndTime() int64 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetRequestId() [][]byte {
+ if m != nil {
+ return m.RequestId
+ }
+ return nil
+}
+
+func (m *LogReadRequest) GetMinimumLogLevel() int32 {
+ if m != nil && m.MinimumLogLevel != nil {
+ return *m.MinimumLogLevel
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeIncomplete() bool {
+ if m != nil && m.IncludeIncomplete != nil {
+ return *m.IncludeIncomplete
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetCombinedLogRegex() string {
+ if m != nil && m.CombinedLogRegex != nil {
+ return *m.CombinedLogRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetHostRegex() string {
+ if m != nil && m.HostRegex != nil {
+ return *m.HostRegex
+ }
+ return ""
+}
+
+func (m *LogReadRequest) GetReplicaIndex() int32 {
+ if m != nil && m.ReplicaIndex != nil {
+ return *m.ReplicaIndex
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeAppLogs() bool {
+ if m != nil && m.IncludeAppLogs != nil {
+ return *m.IncludeAppLogs
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetAppLogsPerRequest() int32 {
+ if m != nil && m.AppLogsPerRequest != nil {
+ return *m.AppLogsPerRequest
+ }
+ return 0
+}
+
+func (m *LogReadRequest) GetIncludeHost() bool {
+ if m != nil && m.IncludeHost != nil {
+ return *m.IncludeHost
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetIncludeAll() bool {
+ if m != nil && m.IncludeAll != nil {
+ return *m.IncludeAll
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetCacheIterator() bool {
+ if m != nil && m.CacheIterator != nil {
+ return *m.CacheIterator
+ }
+ return false
+}
+
+func (m *LogReadRequest) GetNumShards() int32 {
+ if m != nil && m.NumShards != nil {
+ return *m.NumShards
+ }
+ return 0
+}
+
+type LogReadResponse struct {
+ Log []*RequestLog `protobuf:"bytes,1,rep,name=log" json:"log,omitempty"`
+ Offset *LogOffset `protobuf:"bytes,2,opt,name=offset" json:"offset,omitempty"`
+ LastEndTime *int64 `protobuf:"varint,3,opt,name=last_end_time" json:"last_end_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogReadResponse) Reset() { *m = LogReadResponse{} }
+func (m *LogReadResponse) String() string { return proto.CompactTextString(m) }
+func (*LogReadResponse) ProtoMessage() {}
+
+func (m *LogReadResponse) GetLog() []*RequestLog {
+ if m != nil {
+ return m.Log
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetOffset() *LogOffset {
+ if m != nil {
+ return m.Offset
+ }
+ return nil
+}
+
+func (m *LogReadResponse) GetLastEndTime() int64 {
+ if m != nil && m.LastEndTime != nil {
+ return *m.LastEndTime
+ }
+ return 0
+}
+
+type LogUsageRecord struct {
+ VersionId *string `protobuf:"bytes,1,opt,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,2,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,3,opt,name=end_time" json:"end_time,omitempty"`
+ Count *int64 `protobuf:"varint,4,opt,name=count" json:"count,omitempty"`
+ TotalSize *int64 `protobuf:"varint,5,opt,name=total_size" json:"total_size,omitempty"`
+ Records *int32 `protobuf:"varint,6,opt,name=records" json:"records,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRecord) Reset() { *m = LogUsageRecord{} }
+func (m *LogUsageRecord) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRecord) ProtoMessage() {}
+
+func (m *LogUsageRecord) GetVersionId() string {
+ if m != nil && m.VersionId != nil {
+ return *m.VersionId
+ }
+ return ""
+}
+
+func (m *LogUsageRecord) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetCount() int64 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetTotalSize() int64 {
+ if m != nil && m.TotalSize != nil {
+ return *m.TotalSize
+ }
+ return 0
+}
+
+func (m *LogUsageRecord) GetRecords() int32 {
+ if m != nil && m.Records != nil {
+ return *m.Records
+ }
+ return 0
+}
+
+type LogUsageRequest struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ VersionId []string `protobuf:"bytes,2,rep,name=version_id" json:"version_id,omitempty"`
+ StartTime *int32 `protobuf:"varint,3,opt,name=start_time" json:"start_time,omitempty"`
+ EndTime *int32 `protobuf:"varint,4,opt,name=end_time" json:"end_time,omitempty"`
+ ResolutionHours *uint32 `protobuf:"varint,5,opt,name=resolution_hours,def=1" json:"resolution_hours,omitempty"`
+ CombineVersions *bool `protobuf:"varint,6,opt,name=combine_versions" json:"combine_versions,omitempty"`
+ UsageVersion *int32 `protobuf:"varint,7,opt,name=usage_version" json:"usage_version,omitempty"`
+ VersionsOnly *bool `protobuf:"varint,8,opt,name=versions_only" json:"versions_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageRequest) Reset() { *m = LogUsageRequest{} }
+func (m *LogUsageRequest) String() string { return proto.CompactTextString(m) }
+func (*LogUsageRequest) ProtoMessage() {}
+
+const Default_LogUsageRequest_ResolutionHours uint32 = 1
+
+func (m *LogUsageRequest) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *LogUsageRequest) GetVersionId() []string {
+ if m != nil {
+ return m.VersionId
+ }
+ return nil
+}
+
+func (m *LogUsageRequest) GetStartTime() int32 {
+ if m != nil && m.StartTime != nil {
+ return *m.StartTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetEndTime() int32 {
+ if m != nil && m.EndTime != nil {
+ return *m.EndTime
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetResolutionHours() uint32 {
+ if m != nil && m.ResolutionHours != nil {
+ return *m.ResolutionHours
+ }
+ return Default_LogUsageRequest_ResolutionHours
+}
+
+func (m *LogUsageRequest) GetCombineVersions() bool {
+ if m != nil && m.CombineVersions != nil {
+ return *m.CombineVersions
+ }
+ return false
+}
+
+func (m *LogUsageRequest) GetUsageVersion() int32 {
+ if m != nil && m.UsageVersion != nil {
+ return *m.UsageVersion
+ }
+ return 0
+}
+
+func (m *LogUsageRequest) GetVersionsOnly() bool {
+ if m != nil && m.VersionsOnly != nil {
+ return *m.VersionsOnly
+ }
+ return false
+}
+
+type LogUsageResponse struct {
+ Usage []*LogUsageRecord `protobuf:"bytes,1,rep,name=usage" json:"usage,omitempty"`
+ Summary *LogUsageRecord `protobuf:"bytes,2,opt,name=summary" json:"summary,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LogUsageResponse) Reset() { *m = LogUsageResponse{} }
+func (m *LogUsageResponse) String() string { return proto.CompactTextString(m) }
+func (*LogUsageResponse) ProtoMessage() {}
+
+func (m *LogUsageResponse) GetUsage() []*LogUsageRecord {
+ if m != nil {
+ return m.Usage
+ }
+ return nil
+}
+
+func (m *LogUsageResponse) GetSummary() *LogUsageRecord {
+ if m != nil {
+ return m.Summary
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("appengine.LogServiceError_ErrorCode", LogServiceError_ErrorCode_name, LogServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.proto
new file mode 100644
index 000000000000..8981dc47577c
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/log/log_service.proto
@@ -0,0 +1,150 @@
+syntax = "proto2";
+option go_package = "log";
+
+package appengine;
+
+message LogServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ STORAGE_ERROR = 2;
+ }
+}
+
+message UserAppLogLine {
+ required int64 timestamp_usec = 1;
+ required int64 level = 2;
+ required string message = 3;
+}
+
+message UserAppLogGroup {
+ repeated UserAppLogLine log_line = 2;
+}
+
+message FlushRequest {
+ optional bytes logs = 1;
+}
+
+message SetStatusRequest {
+ required string status = 1;
+}
+
+
+message LogOffset {
+ optional bytes request_id = 1;
+}
+
+message LogLine {
+ required int64 time = 1;
+ required int32 level = 2;
+ required string log_message = 3;
+}
+
+message RequestLog {
+ required string app_id = 1;
+ optional string module_id = 37 [default="default"];
+ required string version_id = 2;
+ required bytes request_id = 3;
+ optional LogOffset offset = 35;
+ required string ip = 4;
+ optional string nickname = 5;
+ required int64 start_time = 6;
+ required int64 end_time = 7;
+ required int64 latency = 8;
+ required int64 mcycles = 9;
+ required string method = 10;
+ required string resource = 11;
+ required string http_version = 12;
+ required int32 status = 13;
+ required int64 response_size = 14;
+ optional string referrer = 15;
+ optional string user_agent = 16;
+ required string url_map_entry = 17;
+ required string combined = 18;
+ optional int64 api_mcycles = 19;
+ optional string host = 20;
+ optional double cost = 21;
+
+ optional string task_queue_name = 22;
+ optional string task_name = 23;
+
+ optional bool was_loading_request = 24;
+ optional int64 pending_time = 25;
+ optional int32 replica_index = 26 [default = -1];
+ optional bool finished = 27 [default = true];
+ optional bytes clone_key = 28;
+
+ repeated LogLine line = 29;
+
+ optional bool lines_incomplete = 36;
+ optional bytes app_engine_release = 38;
+
+ optional int32 exit_reason = 30;
+ optional bool was_throttled_for_time = 31;
+ optional bool was_throttled_for_requests = 32;
+ optional int64 throttled_time = 33;
+
+ optional bytes server_name = 34;
+}
+
+message LogModuleVersion {
+ optional string module_id = 1 [default="default"];
+ optional string version_id = 2;
+}
+
+message LogReadRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ repeated LogModuleVersion module_version = 19;
+
+ optional int64 start_time = 3;
+ optional int64 end_time = 4;
+ optional LogOffset offset = 5;
+ repeated bytes request_id = 6;
+
+ optional int32 minimum_log_level = 7;
+ optional bool include_incomplete = 8;
+ optional int64 count = 9;
+
+ optional string combined_log_regex = 14;
+ optional string host_regex = 15;
+ optional int32 replica_index = 16;
+
+ optional bool include_app_logs = 10;
+ optional int32 app_logs_per_request = 17;
+ optional bool include_host = 11;
+ optional bool include_all = 12;
+ optional bool cache_iterator = 13;
+ optional int32 num_shards = 18;
+}
+
+message LogReadResponse {
+ repeated RequestLog log = 1;
+ optional LogOffset offset = 2;
+ optional int64 last_end_time = 3;
+}
+
+message LogUsageRecord {
+ optional string version_id = 1;
+ optional int32 start_time = 2;
+ optional int32 end_time = 3;
+ optional int64 count = 4;
+ optional int64 total_size = 5;
+ optional int32 records = 6;
+}
+
+message LogUsageRequest {
+ required string app_id = 1;
+ repeated string version_id = 2;
+ optional int32 start_time = 3;
+ optional int32 end_time = 4;
+ optional uint32 resolution_hours = 5 [default = 1];
+ optional bool combine_versions = 6;
+ optional int32 usage_version = 7;
+ optional bool versions_only = 8;
+}
+
+message LogUsageResponse {
+ repeated LogUsageRecord usage = 1;
+ optional LogUsageRecord summary = 2;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.pb.go
new file mode 100644
index 000000000000..dd2b33040589
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.pb.go
@@ -0,0 +1,228 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/mail/mail_service.proto
+// DO NOT EDIT!
+
+/*
+Package mail is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/mail/mail_service.proto
+
+It has these top-level messages:
+ MailServiceError
+ MailAttachment
+ MailHeader
+ MailMessage
+*/
+package mail
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MailServiceError_ErrorCode int32
+
+const (
+ MailServiceError_OK MailServiceError_ErrorCode = 0
+ MailServiceError_INTERNAL_ERROR MailServiceError_ErrorCode = 1
+ MailServiceError_BAD_REQUEST MailServiceError_ErrorCode = 2
+ MailServiceError_UNAUTHORIZED_SENDER MailServiceError_ErrorCode = 3
+ MailServiceError_INVALID_ATTACHMENT_TYPE MailServiceError_ErrorCode = 4
+ MailServiceError_INVALID_HEADER_NAME MailServiceError_ErrorCode = 5
+ MailServiceError_INVALID_CONTENT_ID MailServiceError_ErrorCode = 6
+)
+
+var MailServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INTERNAL_ERROR",
+ 2: "BAD_REQUEST",
+ 3: "UNAUTHORIZED_SENDER",
+ 4: "INVALID_ATTACHMENT_TYPE",
+ 5: "INVALID_HEADER_NAME",
+ 6: "INVALID_CONTENT_ID",
+}
+var MailServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INTERNAL_ERROR": 1,
+ "BAD_REQUEST": 2,
+ "UNAUTHORIZED_SENDER": 3,
+ "INVALID_ATTACHMENT_TYPE": 4,
+ "INVALID_HEADER_NAME": 5,
+ "INVALID_CONTENT_ID": 6,
+}
+
+func (x MailServiceError_ErrorCode) Enum() *MailServiceError_ErrorCode {
+ p := new(MailServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MailServiceError_ErrorCode) String() string {
+ return proto.EnumName(MailServiceError_ErrorCode_name, int32(x))
+}
+func (x *MailServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MailServiceError_ErrorCode_value, data, "MailServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MailServiceError_ErrorCode(value)
+ return nil
+}
+
+type MailServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailServiceError) Reset() { *m = MailServiceError{} }
+func (m *MailServiceError) String() string { return proto.CompactTextString(m) }
+func (*MailServiceError) ProtoMessage() {}
+
+type MailAttachment struct {
+ FileName *string `protobuf:"bytes,1,req" json:"FileName,omitempty"`
+ Data []byte `protobuf:"bytes,2,req" json:"Data,omitempty"`
+ ContentID *string `protobuf:"bytes,3,opt" json:"ContentID,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailAttachment) Reset() { *m = MailAttachment{} }
+func (m *MailAttachment) String() string { return proto.CompactTextString(m) }
+func (*MailAttachment) ProtoMessage() {}
+
+func (m *MailAttachment) GetFileName() string {
+ if m != nil && m.FileName != nil {
+ return *m.FileName
+ }
+ return ""
+}
+
+func (m *MailAttachment) GetData() []byte {
+ if m != nil {
+ return m.Data
+ }
+ return nil
+}
+
+func (m *MailAttachment) GetContentID() string {
+ if m != nil && m.ContentID != nil {
+ return *m.ContentID
+ }
+ return ""
+}
+
+type MailHeader struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *string `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailHeader) Reset() { *m = MailHeader{} }
+func (m *MailHeader) String() string { return proto.CompactTextString(m) }
+func (*MailHeader) ProtoMessage() {}
+
+func (m *MailHeader) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *MailHeader) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type MailMessage struct {
+ Sender *string `protobuf:"bytes,1,req" json:"Sender,omitempty"`
+ ReplyTo *string `protobuf:"bytes,2,opt" json:"ReplyTo,omitempty"`
+ To []string `protobuf:"bytes,3,rep" json:"To,omitempty"`
+ Cc []string `protobuf:"bytes,4,rep" json:"Cc,omitempty"`
+ Bcc []string `protobuf:"bytes,5,rep" json:"Bcc,omitempty"`
+ Subject *string `protobuf:"bytes,6,req" json:"Subject,omitempty"`
+ TextBody *string `protobuf:"bytes,7,opt" json:"TextBody,omitempty"`
+ HtmlBody *string `protobuf:"bytes,8,opt" json:"HtmlBody,omitempty"`
+ Attachment []*MailAttachment `protobuf:"bytes,9,rep" json:"Attachment,omitempty"`
+ Header []*MailHeader `protobuf:"bytes,10,rep" json:"Header,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MailMessage) Reset() { *m = MailMessage{} }
+func (m *MailMessage) String() string { return proto.CompactTextString(m) }
+func (*MailMessage) ProtoMessage() {}
+
+func (m *MailMessage) GetSender() string {
+ if m != nil && m.Sender != nil {
+ return *m.Sender
+ }
+ return ""
+}
+
+func (m *MailMessage) GetReplyTo() string {
+ if m != nil && m.ReplyTo != nil {
+ return *m.ReplyTo
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTo() []string {
+ if m != nil {
+ return m.To
+ }
+ return nil
+}
+
+func (m *MailMessage) GetCc() []string {
+ if m != nil {
+ return m.Cc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetBcc() []string {
+ if m != nil {
+ return m.Bcc
+ }
+ return nil
+}
+
+func (m *MailMessage) GetSubject() string {
+ if m != nil && m.Subject != nil {
+ return *m.Subject
+ }
+ return ""
+}
+
+func (m *MailMessage) GetTextBody() string {
+ if m != nil && m.TextBody != nil {
+ return *m.TextBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetHtmlBody() string {
+ if m != nil && m.HtmlBody != nil {
+ return *m.HtmlBody
+ }
+ return ""
+}
+
+func (m *MailMessage) GetAttachment() []*MailAttachment {
+ if m != nil {
+ return m.Attachment
+ }
+ return nil
+}
+
+func (m *MailMessage) GetHeader() []*MailHeader {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("appengine.MailServiceError_ErrorCode", MailServiceError_ErrorCode_name, MailServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.proto
new file mode 100644
index 000000000000..4e57b7aa51a9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/mail/mail_service.proto
@@ -0,0 +1,45 @@
+syntax = "proto2";
+option go_package = "mail";
+
+package appengine;
+
+message MailServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INTERNAL_ERROR = 1;
+ BAD_REQUEST = 2;
+ UNAUTHORIZED_SENDER = 3;
+ INVALID_ATTACHMENT_TYPE = 4;
+ INVALID_HEADER_NAME = 5;
+ INVALID_CONTENT_ID = 6;
+ }
+}
+
+message MailAttachment {
+ required string FileName = 1;
+ required bytes Data = 2;
+ optional string ContentID = 3;
+}
+
+message MailHeader {
+ required string name = 1;
+ required string value = 2;
+}
+
+message MailMessage {
+ required string Sender = 1;
+ optional string ReplyTo = 2;
+
+ repeated string To = 3;
+ repeated string Cc = 4;
+ repeated string Bcc = 5;
+
+ required string Subject = 6;
+
+ optional string TextBody = 7;
+ optional string HtmlBody = 8;
+
+ repeated MailAttachment Attachment = 9;
+
+ repeated MailHeader Header = 10;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
new file mode 100644
index 000000000000..301755e4e92b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.pb.go
@@ -0,0 +1,942 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/memcache/memcache_service.proto
+// DO NOT EDIT!
+
+/*
+Package memcache is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/memcache/memcache_service.proto
+
+It has these top-level messages:
+ MemcacheServiceError
+ AppOverride
+ MemcacheGetRequest
+ MemcacheGetResponse
+ MemcacheSetRequest
+ MemcacheSetResponse
+ MemcacheDeleteRequest
+ MemcacheDeleteResponse
+ MemcacheIncrementRequest
+ MemcacheIncrementResponse
+ MemcacheBatchIncrementRequest
+ MemcacheBatchIncrementResponse
+ MemcacheFlushRequest
+ MemcacheFlushResponse
+ MemcacheStatsRequest
+ MergedNamespaceStats
+ MemcacheStatsResponse
+ MemcacheGrabTailRequest
+ MemcacheGrabTailResponse
+*/
+package memcache
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type MemcacheServiceError_ErrorCode int32
+
+const (
+ MemcacheServiceError_OK MemcacheServiceError_ErrorCode = 0
+ MemcacheServiceError_UNSPECIFIED_ERROR MemcacheServiceError_ErrorCode = 1
+ MemcacheServiceError_NAMESPACE_NOT_SET MemcacheServiceError_ErrorCode = 2
+ MemcacheServiceError_PERMISSION_DENIED MemcacheServiceError_ErrorCode = 3
+ MemcacheServiceError_INVALID_VALUE MemcacheServiceError_ErrorCode = 6
+)
+
+var MemcacheServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNSPECIFIED_ERROR",
+ 2: "NAMESPACE_NOT_SET",
+ 3: "PERMISSION_DENIED",
+ 6: "INVALID_VALUE",
+}
+var MemcacheServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNSPECIFIED_ERROR": 1,
+ "NAMESPACE_NOT_SET": 2,
+ "PERMISSION_DENIED": 3,
+ "INVALID_VALUE": 6,
+}
+
+func (x MemcacheServiceError_ErrorCode) Enum() *MemcacheServiceError_ErrorCode {
+ p := new(MemcacheServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x MemcacheServiceError_ErrorCode) String() string {
+ return proto.EnumName(MemcacheServiceError_ErrorCode_name, int32(x))
+}
+func (x *MemcacheServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheServiceError_ErrorCode_value, data, "MemcacheServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheServiceError_ErrorCode(value)
+ return nil
+}
+
+type MemcacheSetRequest_SetPolicy int32
+
+const (
+ MemcacheSetRequest_SET MemcacheSetRequest_SetPolicy = 1
+ MemcacheSetRequest_ADD MemcacheSetRequest_SetPolicy = 2
+ MemcacheSetRequest_REPLACE MemcacheSetRequest_SetPolicy = 3
+ MemcacheSetRequest_CAS MemcacheSetRequest_SetPolicy = 4
+)
+
+var MemcacheSetRequest_SetPolicy_name = map[int32]string{
+ 1: "SET",
+ 2: "ADD",
+ 3: "REPLACE",
+ 4: "CAS",
+}
+var MemcacheSetRequest_SetPolicy_value = map[string]int32{
+ "SET": 1,
+ "ADD": 2,
+ "REPLACE": 3,
+ "CAS": 4,
+}
+
+func (x MemcacheSetRequest_SetPolicy) Enum() *MemcacheSetRequest_SetPolicy {
+ p := new(MemcacheSetRequest_SetPolicy)
+ *p = x
+ return p
+}
+func (x MemcacheSetRequest_SetPolicy) String() string {
+ return proto.EnumName(MemcacheSetRequest_SetPolicy_name, int32(x))
+}
+func (x *MemcacheSetRequest_SetPolicy) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetRequest_SetPolicy_value, data, "MemcacheSetRequest_SetPolicy")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetRequest_SetPolicy(value)
+ return nil
+}
+
+type MemcacheSetResponse_SetStatusCode int32
+
+const (
+ MemcacheSetResponse_STORED MemcacheSetResponse_SetStatusCode = 1
+ MemcacheSetResponse_NOT_STORED MemcacheSetResponse_SetStatusCode = 2
+ MemcacheSetResponse_ERROR MemcacheSetResponse_SetStatusCode = 3
+ MemcacheSetResponse_EXISTS MemcacheSetResponse_SetStatusCode = 4
+)
+
+var MemcacheSetResponse_SetStatusCode_name = map[int32]string{
+ 1: "STORED",
+ 2: "NOT_STORED",
+ 3: "ERROR",
+ 4: "EXISTS",
+}
+var MemcacheSetResponse_SetStatusCode_value = map[string]int32{
+ "STORED": 1,
+ "NOT_STORED": 2,
+ "ERROR": 3,
+ "EXISTS": 4,
+}
+
+func (x MemcacheSetResponse_SetStatusCode) Enum() *MemcacheSetResponse_SetStatusCode {
+ p := new(MemcacheSetResponse_SetStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheSetResponse_SetStatusCode) String() string {
+ return proto.EnumName(MemcacheSetResponse_SetStatusCode_name, int32(x))
+}
+func (x *MemcacheSetResponse_SetStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheSetResponse_SetStatusCode_value, data, "MemcacheSetResponse_SetStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheSetResponse_SetStatusCode(value)
+ return nil
+}
+
+type MemcacheDeleteResponse_DeleteStatusCode int32
+
+const (
+ MemcacheDeleteResponse_DELETED MemcacheDeleteResponse_DeleteStatusCode = 1
+ MemcacheDeleteResponse_NOT_FOUND MemcacheDeleteResponse_DeleteStatusCode = 2
+)
+
+var MemcacheDeleteResponse_DeleteStatusCode_name = map[int32]string{
+ 1: "DELETED",
+ 2: "NOT_FOUND",
+}
+var MemcacheDeleteResponse_DeleteStatusCode_value = map[string]int32{
+ "DELETED": 1,
+ "NOT_FOUND": 2,
+}
+
+func (x MemcacheDeleteResponse_DeleteStatusCode) Enum() *MemcacheDeleteResponse_DeleteStatusCode {
+ p := new(MemcacheDeleteResponse_DeleteStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheDeleteResponse_DeleteStatusCode) String() string {
+ return proto.EnumName(MemcacheDeleteResponse_DeleteStatusCode_name, int32(x))
+}
+func (x *MemcacheDeleteResponse_DeleteStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheDeleteResponse_DeleteStatusCode_value, data, "MemcacheDeleteResponse_DeleteStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheDeleteResponse_DeleteStatusCode(value)
+ return nil
+}
+
+type MemcacheIncrementRequest_Direction int32
+
+const (
+ MemcacheIncrementRequest_INCREMENT MemcacheIncrementRequest_Direction = 1
+ MemcacheIncrementRequest_DECREMENT MemcacheIncrementRequest_Direction = 2
+)
+
+var MemcacheIncrementRequest_Direction_name = map[int32]string{
+ 1: "INCREMENT",
+ 2: "DECREMENT",
+}
+var MemcacheIncrementRequest_Direction_value = map[string]int32{
+ "INCREMENT": 1,
+ "DECREMENT": 2,
+}
+
+func (x MemcacheIncrementRequest_Direction) Enum() *MemcacheIncrementRequest_Direction {
+ p := new(MemcacheIncrementRequest_Direction)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementRequest_Direction) String() string {
+ return proto.EnumName(MemcacheIncrementRequest_Direction_name, int32(x))
+}
+func (x *MemcacheIncrementRequest_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementRequest_Direction_value, data, "MemcacheIncrementRequest_Direction")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementRequest_Direction(value)
+ return nil
+}
+
+type MemcacheIncrementResponse_IncrementStatusCode int32
+
+const (
+ MemcacheIncrementResponse_OK MemcacheIncrementResponse_IncrementStatusCode = 1
+ MemcacheIncrementResponse_NOT_CHANGED MemcacheIncrementResponse_IncrementStatusCode = 2
+ MemcacheIncrementResponse_ERROR MemcacheIncrementResponse_IncrementStatusCode = 3
+)
+
+var MemcacheIncrementResponse_IncrementStatusCode_name = map[int32]string{
+ 1: "OK",
+ 2: "NOT_CHANGED",
+ 3: "ERROR",
+}
+var MemcacheIncrementResponse_IncrementStatusCode_value = map[string]int32{
+ "OK": 1,
+ "NOT_CHANGED": 2,
+ "ERROR": 3,
+}
+
+func (x MemcacheIncrementResponse_IncrementStatusCode) Enum() *MemcacheIncrementResponse_IncrementStatusCode {
+ p := new(MemcacheIncrementResponse_IncrementStatusCode)
+ *p = x
+ return p
+}
+func (x MemcacheIncrementResponse_IncrementStatusCode) String() string {
+ return proto.EnumName(MemcacheIncrementResponse_IncrementStatusCode_name, int32(x))
+}
+func (x *MemcacheIncrementResponse_IncrementStatusCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(MemcacheIncrementResponse_IncrementStatusCode_value, data, "MemcacheIncrementResponse_IncrementStatusCode")
+ if err != nil {
+ return err
+ }
+ *x = MemcacheIncrementResponse_IncrementStatusCode(value)
+ return nil
+}
+
+type MemcacheServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheServiceError) Reset() { *m = MemcacheServiceError{} }
+func (m *MemcacheServiceError) String() string { return proto.CompactTextString(m) }
+func (*MemcacheServiceError) ProtoMessage() {}
+
+type AppOverride struct {
+ AppId *string `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ NumMemcachegBackends *int32 `protobuf:"varint,2,opt,name=num_memcacheg_backends" json:"num_memcacheg_backends,omitempty"`
+ IgnoreShardlock *bool `protobuf:"varint,3,opt,name=ignore_shardlock" json:"ignore_shardlock,omitempty"`
+ MemcachePoolHint *string `protobuf:"bytes,4,opt,name=memcache_pool_hint" json:"memcache_pool_hint,omitempty"`
+ MemcacheShardingStrategy []byte `protobuf:"bytes,5,opt,name=memcache_sharding_strategy" json:"memcache_sharding_strategy,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AppOverride) Reset() { *m = AppOverride{} }
+func (m *AppOverride) String() string { return proto.CompactTextString(m) }
+func (*AppOverride) ProtoMessage() {}
+
+func (m *AppOverride) GetAppId() string {
+ if m != nil && m.AppId != nil {
+ return *m.AppId
+ }
+ return ""
+}
+
+func (m *AppOverride) GetNumMemcachegBackends() int32 {
+ if m != nil && m.NumMemcachegBackends != nil {
+ return *m.NumMemcachegBackends
+ }
+ return 0
+}
+
+func (m *AppOverride) GetIgnoreShardlock() bool {
+ if m != nil && m.IgnoreShardlock != nil {
+ return *m.IgnoreShardlock
+ }
+ return false
+}
+
+func (m *AppOverride) GetMemcachePoolHint() string {
+ if m != nil && m.MemcachePoolHint != nil {
+ return *m.MemcachePoolHint
+ }
+ return ""
+}
+
+func (m *AppOverride) GetMemcacheShardingStrategy() []byte {
+ if m != nil {
+ return m.MemcacheShardingStrategy
+ }
+ return nil
+}
+
+type MemcacheGetRequest struct {
+ Key [][]byte `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ ForCas *bool `protobuf:"varint,4,opt,name=for_cas" json:"for_cas,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetRequest) Reset() { *m = MemcacheGetRequest{} }
+func (m *MemcacheGetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetRequest) ProtoMessage() {}
+
+func (m *MemcacheGetRequest) GetKey() [][]byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGetRequest) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+func (m *MemcacheGetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGetResponse struct {
+ Item []*MemcacheGetResponse_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse) Reset() { *m = MemcacheGetResponse{} }
+func (m *MemcacheGetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse) ProtoMessage() {}
+
+func (m *MemcacheGetResponse) GetItem() []*MemcacheGetResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGetResponse_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,5,opt,name=cas_id" json:"cas_id,omitempty"`
+ ExpiresInSeconds *int32 `protobuf:"varint,6,opt,name=expires_in_seconds" json:"expires_in_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGetResponse_Item) Reset() { *m = MemcacheGetResponse_Item{} }
+func (m *MemcacheGetResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGetResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGetResponse_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGetResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheGetResponse_Item) GetExpiresInSeconds() int32 {
+ if m != nil && m.ExpiresInSeconds != nil {
+ return *m.ExpiresInSeconds
+ }
+ return 0
+}
+
+type MemcacheSetRequest struct {
+ Item []*MemcacheSetRequest_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,7,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,10,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest) Reset() { *m = MemcacheSetRequest{} }
+func (m *MemcacheSetRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest) ProtoMessage() {}
+
+func (m *MemcacheSetRequest) GetItem() []*MemcacheSetRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheSetRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheSetRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,4,opt,name=flags" json:"flags,omitempty"`
+ SetPolicy *MemcacheSetRequest_SetPolicy `protobuf:"varint,5,opt,name=set_policy,enum=appengine.MemcacheSetRequest_SetPolicy,def=1" json:"set_policy,omitempty"`
+ ExpirationTime *uint32 `protobuf:"fixed32,6,opt,name=expiration_time,def=0" json:"expiration_time,omitempty"`
+ CasId *uint64 `protobuf:"fixed64,8,opt,name=cas_id" json:"cas_id,omitempty"`
+ ForCas *bool `protobuf:"varint,9,opt,name=for_cas" json:"for_cas,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetRequest_Item) Reset() { *m = MemcacheSetRequest_Item{} }
+func (m *MemcacheSetRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheSetRequest_Item_SetPolicy MemcacheSetRequest_SetPolicy = MemcacheSetRequest_SET
+const Default_MemcacheSetRequest_Item_ExpirationTime uint32 = 0
+
+func (m *MemcacheSetRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheSetRequest_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetSetPolicy() MemcacheSetRequest_SetPolicy {
+ if m != nil && m.SetPolicy != nil {
+ return *m.SetPolicy
+ }
+ return Default_MemcacheSetRequest_Item_SetPolicy
+}
+
+func (m *MemcacheSetRequest_Item) GetExpirationTime() uint32 {
+ if m != nil && m.ExpirationTime != nil {
+ return *m.ExpirationTime
+ }
+ return Default_MemcacheSetRequest_Item_ExpirationTime
+}
+
+func (m *MemcacheSetRequest_Item) GetCasId() uint64 {
+ if m != nil && m.CasId != nil {
+ return *m.CasId
+ }
+ return 0
+}
+
+func (m *MemcacheSetRequest_Item) GetForCas() bool {
+ if m != nil && m.ForCas != nil {
+ return *m.ForCas
+ }
+ return false
+}
+
+type MemcacheSetResponse struct {
+ SetStatus []MemcacheSetResponse_SetStatusCode `protobuf:"varint,1,rep,name=set_status,enum=appengine.MemcacheSetResponse_SetStatusCode" json:"set_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheSetResponse) Reset() { *m = MemcacheSetResponse{} }
+func (m *MemcacheSetResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheSetResponse) ProtoMessage() {}
+
+func (m *MemcacheSetResponse) GetSetStatus() []MemcacheSetResponse_SetStatusCode {
+ if m != nil {
+ return m.SetStatus
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest struct {
+ Item []*MemcacheDeleteRequest_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,5,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest) Reset() { *m = MemcacheDeleteRequest{} }
+func (m *MemcacheDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest) ProtoMessage() {}
+
+func (m *MemcacheDeleteRequest) GetItem() []*MemcacheDeleteRequest_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheDeleteRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheDeleteRequest_Item struct {
+ Key []byte `protobuf:"bytes,2,req,name=key" json:"key,omitempty"`
+ DeleteTime *uint32 `protobuf:"fixed32,3,opt,name=delete_time,def=0" json:"delete_time,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteRequest_Item) Reset() { *m = MemcacheDeleteRequest_Item{} }
+func (m *MemcacheDeleteRequest_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteRequest_Item) ProtoMessage() {}
+
+const Default_MemcacheDeleteRequest_Item_DeleteTime uint32 = 0
+
+func (m *MemcacheDeleteRequest_Item) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheDeleteRequest_Item) GetDeleteTime() uint32 {
+ if m != nil && m.DeleteTime != nil {
+ return *m.DeleteTime
+ }
+ return Default_MemcacheDeleteRequest_Item_DeleteTime
+}
+
+type MemcacheDeleteResponse struct {
+ DeleteStatus []MemcacheDeleteResponse_DeleteStatusCode `protobuf:"varint,1,rep,name=delete_status,enum=appengine.MemcacheDeleteResponse_DeleteStatusCode" json:"delete_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheDeleteResponse) Reset() { *m = MemcacheDeleteResponse{} }
+func (m *MemcacheDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheDeleteResponse) ProtoMessage() {}
+
+func (m *MemcacheDeleteResponse) GetDeleteStatus() []MemcacheDeleteResponse_DeleteStatusCode {
+ if m != nil {
+ return m.DeleteStatus
+ }
+ return nil
+}
+
+type MemcacheIncrementRequest struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ NameSpace *string `protobuf:"bytes,4,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Delta *uint64 `protobuf:"varint,2,opt,name=delta,def=1" json:"delta,omitempty"`
+ Direction *MemcacheIncrementRequest_Direction `protobuf:"varint,3,opt,name=direction,enum=appengine.MemcacheIncrementRequest_Direction,def=1" json:"direction,omitempty"`
+ InitialValue *uint64 `protobuf:"varint,5,opt,name=initial_value" json:"initial_value,omitempty"`
+ InitialFlags *uint32 `protobuf:"fixed32,6,opt,name=initial_flags" json:"initial_flags,omitempty"`
+ Override *AppOverride `protobuf:"bytes,7,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementRequest) Reset() { *m = MemcacheIncrementRequest{} }
+func (m *MemcacheIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementRequest) ProtoMessage() {}
+
+const Default_MemcacheIncrementRequest_Delta uint64 = 1
+const Default_MemcacheIncrementRequest_Direction MemcacheIncrementRequest_Direction = MemcacheIncrementRequest_INCREMENT
+
+func (m *MemcacheIncrementRequest) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *MemcacheIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheIncrementRequest) GetDelta() uint64 {
+ if m != nil && m.Delta != nil {
+ return *m.Delta
+ }
+ return Default_MemcacheIncrementRequest_Delta
+}
+
+func (m *MemcacheIncrementRequest) GetDirection() MemcacheIncrementRequest_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_MemcacheIncrementRequest_Direction
+}
+
+func (m *MemcacheIncrementRequest) GetInitialValue() uint64 {
+ if m != nil && m.InitialValue != nil {
+ return *m.InitialValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetInitialFlags() uint32 {
+ if m != nil && m.InitialFlags != nil {
+ return *m.InitialFlags
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheIncrementResponse struct {
+ NewValue *uint64 `protobuf:"varint,1,opt,name=new_value" json:"new_value,omitempty"`
+ IncrementStatus *MemcacheIncrementResponse_IncrementStatusCode `protobuf:"varint,2,opt,name=increment_status,enum=appengine.MemcacheIncrementResponse_IncrementStatusCode" json:"increment_status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheIncrementResponse) Reset() { *m = MemcacheIncrementResponse{} }
+func (m *MemcacheIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheIncrementResponse) GetNewValue() uint64 {
+ if m != nil && m.NewValue != nil {
+ return *m.NewValue
+ }
+ return 0
+}
+
+func (m *MemcacheIncrementResponse) GetIncrementStatus() MemcacheIncrementResponse_IncrementStatusCode {
+ if m != nil && m.IncrementStatus != nil {
+ return *m.IncrementStatus
+ }
+ return MemcacheIncrementResponse_OK
+}
+
+type MemcacheBatchIncrementRequest struct {
+ NameSpace *string `protobuf:"bytes,1,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Item []*MemcacheIncrementRequest `protobuf:"bytes,2,rep,name=item" json:"item,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementRequest) Reset() { *m = MemcacheBatchIncrementRequest{} }
+func (m *MemcacheBatchIncrementRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementRequest) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheBatchIncrementRequest) GetItem() []*MemcacheIncrementRequest {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+func (m *MemcacheBatchIncrementRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheBatchIncrementResponse struct {
+ Item []*MemcacheIncrementResponse `protobuf:"bytes,1,rep,name=item" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheBatchIncrementResponse) Reset() { *m = MemcacheBatchIncrementResponse{} }
+func (m *MemcacheBatchIncrementResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheBatchIncrementResponse) ProtoMessage() {}
+
+func (m *MemcacheBatchIncrementResponse) GetItem() []*MemcacheIncrementResponse {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheFlushRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushRequest) Reset() { *m = MemcacheFlushRequest{} }
+func (m *MemcacheFlushRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushRequest) ProtoMessage() {}
+
+func (m *MemcacheFlushRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheFlushResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheFlushResponse) Reset() { *m = MemcacheFlushResponse{} }
+func (m *MemcacheFlushResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheFlushResponse) ProtoMessage() {}
+
+type MemcacheStatsRequest struct {
+ Override *AppOverride `protobuf:"bytes,1,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsRequest) Reset() { *m = MemcacheStatsRequest{} }
+func (m *MemcacheStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsRequest) ProtoMessage() {}
+
+func (m *MemcacheStatsRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MergedNamespaceStats struct {
+ Hits *uint64 `protobuf:"varint,1,req,name=hits" json:"hits,omitempty"`
+ Misses *uint64 `protobuf:"varint,2,req,name=misses" json:"misses,omitempty"`
+ ByteHits *uint64 `protobuf:"varint,3,req,name=byte_hits" json:"byte_hits,omitempty"`
+ Items *uint64 `protobuf:"varint,4,req,name=items" json:"items,omitempty"`
+ Bytes *uint64 `protobuf:"varint,5,req,name=bytes" json:"bytes,omitempty"`
+ OldestItemAge *uint32 `protobuf:"fixed32,6,req,name=oldest_item_age" json:"oldest_item_age,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MergedNamespaceStats) Reset() { *m = MergedNamespaceStats{} }
+func (m *MergedNamespaceStats) String() string { return proto.CompactTextString(m) }
+func (*MergedNamespaceStats) ProtoMessage() {}
+
+func (m *MergedNamespaceStats) GetHits() uint64 {
+ if m != nil && m.Hits != nil {
+ return *m.Hits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetMisses() uint64 {
+ if m != nil && m.Misses != nil {
+ return *m.Misses
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetByteHits() uint64 {
+ if m != nil && m.ByteHits != nil {
+ return *m.ByteHits
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetItems() uint64 {
+ if m != nil && m.Items != nil {
+ return *m.Items
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetBytes() uint64 {
+ if m != nil && m.Bytes != nil {
+ return *m.Bytes
+ }
+ return 0
+}
+
+func (m *MergedNamespaceStats) GetOldestItemAge() uint32 {
+ if m != nil && m.OldestItemAge != nil {
+ return *m.OldestItemAge
+ }
+ return 0
+}
+
+type MemcacheStatsResponse struct {
+ Stats *MergedNamespaceStats `protobuf:"bytes,1,opt,name=stats" json:"stats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheStatsResponse) Reset() { *m = MemcacheStatsResponse{} }
+func (m *MemcacheStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheStatsResponse) ProtoMessage() {}
+
+func (m *MemcacheStatsResponse) GetStats() *MergedNamespaceStats {
+ if m != nil {
+ return m.Stats
+ }
+ return nil
+}
+
+type MemcacheGrabTailRequest struct {
+ ItemCount *int32 `protobuf:"varint,1,req,name=item_count" json:"item_count,omitempty"`
+ NameSpace *string `protobuf:"bytes,2,opt,name=name_space,def=" json:"name_space,omitempty"`
+ Override *AppOverride `protobuf:"bytes,3,opt,name=override" json:"override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailRequest) Reset() { *m = MemcacheGrabTailRequest{} }
+func (m *MemcacheGrabTailRequest) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailRequest) ProtoMessage() {}
+
+func (m *MemcacheGrabTailRequest) GetItemCount() int32 {
+ if m != nil && m.ItemCount != nil {
+ return *m.ItemCount
+ }
+ return 0
+}
+
+func (m *MemcacheGrabTailRequest) GetNameSpace() string {
+ if m != nil && m.NameSpace != nil {
+ return *m.NameSpace
+ }
+ return ""
+}
+
+func (m *MemcacheGrabTailRequest) GetOverride() *AppOverride {
+ if m != nil {
+ return m.Override
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse struct {
+ Item []*MemcacheGrabTailResponse_Item `protobuf:"group,1,rep" json:"item,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse) Reset() { *m = MemcacheGrabTailResponse{} }
+func (m *MemcacheGrabTailResponse) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse) GetItem() []*MemcacheGrabTailResponse_Item {
+ if m != nil {
+ return m.Item
+ }
+ return nil
+}
+
+type MemcacheGrabTailResponse_Item struct {
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ Flags *uint32 `protobuf:"fixed32,3,opt,name=flags" json:"flags,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MemcacheGrabTailResponse_Item) Reset() { *m = MemcacheGrabTailResponse_Item{} }
+func (m *MemcacheGrabTailResponse_Item) String() string { return proto.CompactTextString(m) }
+func (*MemcacheGrabTailResponse_Item) ProtoMessage() {}
+
+func (m *MemcacheGrabTailResponse_Item) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *MemcacheGrabTailResponse_Item) GetFlags() uint32 {
+ if m != nil && m.Flags != nil {
+ return *m.Flags
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("appengine.MemcacheServiceError_ErrorCode", MemcacheServiceError_ErrorCode_name, MemcacheServiceError_ErrorCode_value)
+ proto.RegisterEnum("appengine.MemcacheSetRequest_SetPolicy", MemcacheSetRequest_SetPolicy_name, MemcacheSetRequest_SetPolicy_value)
+ proto.RegisterEnum("appengine.MemcacheSetResponse_SetStatusCode", MemcacheSetResponse_SetStatusCode_name, MemcacheSetResponse_SetStatusCode_value)
+ proto.RegisterEnum("appengine.MemcacheDeleteResponse_DeleteStatusCode", MemcacheDeleteResponse_DeleteStatusCode_name, MemcacheDeleteResponse_DeleteStatusCode_value)
+ proto.RegisterEnum("appengine.MemcacheIncrementRequest_Direction", MemcacheIncrementRequest_Direction_name, MemcacheIncrementRequest_Direction_value)
+ proto.RegisterEnum("appengine.MemcacheIncrementResponse_IncrementStatusCode", MemcacheIncrementResponse_IncrementStatusCode_name, MemcacheIncrementResponse_IncrementStatusCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.proto
new file mode 100644
index 000000000000..5f0edcdc7be3
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/memcache/memcache_service.proto
@@ -0,0 +1,165 @@
+syntax = "proto2";
+option go_package = "memcache";
+
+package appengine;
+
+message MemcacheServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNSPECIFIED_ERROR = 1;
+ NAMESPACE_NOT_SET = 2;
+ PERMISSION_DENIED = 3;
+ INVALID_VALUE = 6;
+ }
+}
+
+message AppOverride {
+ required string app_id = 1;
+
+ optional int32 num_memcacheg_backends = 2 [deprecated=true];
+ optional bool ignore_shardlock = 3 [deprecated=true];
+ optional string memcache_pool_hint = 4 [deprecated=true];
+ optional bytes memcache_sharding_strategy = 5 [deprecated=true];
+}
+
+message MemcacheGetRequest {
+ repeated bytes key = 1;
+ optional string name_space = 2 [default = ""];
+ optional bool for_cas = 4;
+ optional AppOverride override = 5;
+}
+
+message MemcacheGetResponse {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+ optional fixed32 flags = 4;
+ optional fixed64 cas_id = 5;
+ optional int32 expires_in_seconds = 6;
+ }
+}
+
+message MemcacheSetRequest {
+ enum SetPolicy {
+ SET = 1;
+ ADD = 2;
+ REPLACE = 3;
+ CAS = 4;
+ }
+ repeated group Item = 1 {
+ required bytes key = 2;
+ required bytes value = 3;
+
+ optional fixed32 flags = 4;
+ optional SetPolicy set_policy = 5 [default = SET];
+ optional fixed32 expiration_time = 6 [default = 0];
+
+ optional fixed64 cas_id = 8;
+ optional bool for_cas = 9;
+ }
+ optional string name_space = 7 [default = ""];
+ optional AppOverride override = 10;
+}
+
+message MemcacheSetResponse {
+ enum SetStatusCode {
+ STORED = 1;
+ NOT_STORED = 2;
+ ERROR = 3;
+ EXISTS = 4;
+ }
+ repeated SetStatusCode set_status = 1;
+}
+
+message MemcacheDeleteRequest {
+ repeated group Item = 1 {
+ required bytes key = 2;
+ optional fixed32 delete_time = 3 [default = 0];
+ }
+ optional string name_space = 4 [default = ""];
+ optional AppOverride override = 5;
+}
+
+message MemcacheDeleteResponse {
+ enum DeleteStatusCode {
+ DELETED = 1;
+ NOT_FOUND = 2;
+ }
+ repeated DeleteStatusCode delete_status = 1;
+}
+
+message MemcacheIncrementRequest {
+ enum Direction {
+ INCREMENT = 1;
+ DECREMENT = 2;
+ }
+ required bytes key = 1;
+ optional string name_space = 4 [default = ""];
+
+ optional uint64 delta = 2 [default = 1];
+ optional Direction direction = 3 [default = INCREMENT];
+
+ optional uint64 initial_value = 5;
+ optional fixed32 initial_flags = 6;
+ optional AppOverride override = 7;
+}
+
+message MemcacheIncrementResponse {
+ enum IncrementStatusCode {
+ OK = 1;
+ NOT_CHANGED = 2;
+ ERROR = 3;
+ }
+
+ optional uint64 new_value = 1;
+ optional IncrementStatusCode increment_status = 2;
+}
+
+message MemcacheBatchIncrementRequest {
+ optional string name_space = 1 [default = ""];
+ repeated MemcacheIncrementRequest item = 2;
+ optional AppOverride override = 3;
+}
+
+message MemcacheBatchIncrementResponse {
+ repeated MemcacheIncrementResponse item = 1;
+}
+
+message MemcacheFlushRequest {
+ optional AppOverride override = 1;
+}
+
+message MemcacheFlushResponse {
+}
+
+message MemcacheStatsRequest {
+ optional AppOverride override = 1;
+}
+
+message MergedNamespaceStats {
+ required uint64 hits = 1;
+ required uint64 misses = 2;
+ required uint64 byte_hits = 3;
+
+ required uint64 items = 4;
+ required uint64 bytes = 5;
+
+ required fixed32 oldest_item_age = 6;
+}
+
+message MemcacheStatsResponse {
+ optional MergedNamespaceStats stats = 1;
+}
+
+message MemcacheGrabTailRequest {
+ required int32 item_count = 1;
+ optional string name_space = 2 [default = ""];
+ optional AppOverride override = 3;
+}
+
+message MemcacheGrabTailResponse {
+ repeated group Item = 1 {
+ required bytes value = 2;
+ optional fixed32 flags = 3;
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/metadata.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/metadata.go
new file mode 100644
index 000000000000..b68fb753678b
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/metadata.go
@@ -0,0 +1,61 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file has code for accessing metadata.
+//
+// References:
+// https://cloud.google.com/compute/docs/metadata
+
+import (
+ "fmt"
+ "io/ioutil"
+ "log"
+ "net/http"
+ "net/url"
+)
+
+const (
+ metadataHost = "metadata"
+ metadataPath = "/computeMetadata/v1/"
+)
+
+var (
+ metadataRequestHeaders = http.Header{
+ "X-Google-Metadata-Request": []string{"True"},
+ }
+)
+
+// TODO(dsymonds): Do we need to support default values, like Python?
+func mustGetMetadata(key string) []byte {
+ b, err := getMetadata(key)
+ if err != nil {
+ log.Fatalf("Metadata fetch failed: %v", err)
+ }
+ return b
+}
+
+func getMetadata(key string) ([]byte, error) {
+ // TODO(dsymonds): May need to use url.Parse to support keys with query args.
+ req := &http.Request{
+ Method: "GET",
+ URL: &url.URL{
+ Scheme: "http",
+ Host: metadataHost,
+ Path: metadataPath + key,
+ },
+ Header: metadataRequestHeaders,
+ Host: metadataHost,
+ }
+ resp, err := http.DefaultClient.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode != 200 {
+ return nil, fmt.Errorf("metadata server returned HTTP %d", resp.StatusCode)
+ }
+ return ioutil.ReadAll(resp.Body)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.pb.go
new file mode 100644
index 000000000000..043b3faf00c7
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.pb.go
@@ -0,0 +1,374 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/modules/modules_service.proto
+// DO NOT EDIT!
+
+/*
+Package modules is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/modules/modules_service.proto
+
+It has these top-level messages:
+ ModulesServiceError
+ GetModulesRequest
+ GetModulesResponse
+ GetVersionsRequest
+ GetVersionsResponse
+ GetDefaultVersionRequest
+ GetDefaultVersionResponse
+ GetNumInstancesRequest
+ GetNumInstancesResponse
+ SetNumInstancesRequest
+ SetNumInstancesResponse
+ StartModuleRequest
+ StartModuleResponse
+ StopModuleRequest
+ StopModuleResponse
+ GetHostnameRequest
+ GetHostnameResponse
+*/
+package modules
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type ModulesServiceError_ErrorCode int32
+
+const (
+ ModulesServiceError_OK ModulesServiceError_ErrorCode = 0
+ ModulesServiceError_INVALID_MODULE ModulesServiceError_ErrorCode = 1
+ ModulesServiceError_INVALID_VERSION ModulesServiceError_ErrorCode = 2
+ ModulesServiceError_INVALID_INSTANCES ModulesServiceError_ErrorCode = 3
+ ModulesServiceError_TRANSIENT_ERROR ModulesServiceError_ErrorCode = 4
+ ModulesServiceError_UNEXPECTED_STATE ModulesServiceError_ErrorCode = 5
+)
+
+var ModulesServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_MODULE",
+ 2: "INVALID_VERSION",
+ 3: "INVALID_INSTANCES",
+ 4: "TRANSIENT_ERROR",
+ 5: "UNEXPECTED_STATE",
+}
+var ModulesServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_MODULE": 1,
+ "INVALID_VERSION": 2,
+ "INVALID_INSTANCES": 3,
+ "TRANSIENT_ERROR": 4,
+ "UNEXPECTED_STATE": 5,
+}
+
+func (x ModulesServiceError_ErrorCode) Enum() *ModulesServiceError_ErrorCode {
+ p := new(ModulesServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x ModulesServiceError_ErrorCode) String() string {
+ return proto.EnumName(ModulesServiceError_ErrorCode_name, int32(x))
+}
+func (x *ModulesServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ModulesServiceError_ErrorCode_value, data, "ModulesServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = ModulesServiceError_ErrorCode(value)
+ return nil
+}
+
+type ModulesServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ModulesServiceError) Reset() { *m = ModulesServiceError{} }
+func (m *ModulesServiceError) String() string { return proto.CompactTextString(m) }
+func (*ModulesServiceError) ProtoMessage() {}
+
+type GetModulesRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesRequest) Reset() { *m = GetModulesRequest{} }
+func (m *GetModulesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetModulesRequest) ProtoMessage() {}
+
+type GetModulesResponse struct {
+ Module []string `protobuf:"bytes,1,rep,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetModulesResponse) Reset() { *m = GetModulesResponse{} }
+func (m *GetModulesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetModulesResponse) ProtoMessage() {}
+
+func (m *GetModulesResponse) GetModule() []string {
+ if m != nil {
+ return m.Module
+ }
+ return nil
+}
+
+type GetVersionsRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsRequest) Reset() { *m = GetVersionsRequest{} }
+func (m *GetVersionsRequest) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsRequest) ProtoMessage() {}
+
+func (m *GetVersionsRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetVersionsResponse struct {
+ Version []string `protobuf:"bytes,1,rep,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetVersionsResponse) Reset() { *m = GetVersionsResponse{} }
+func (m *GetVersionsResponse) String() string { return proto.CompactTextString(m) }
+func (*GetVersionsResponse) ProtoMessage() {}
+
+func (m *GetVersionsResponse) GetVersion() []string {
+ if m != nil {
+ return m.Version
+ }
+ return nil
+}
+
+type GetDefaultVersionRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionRequest) Reset() { *m = GetDefaultVersionRequest{} }
+func (m *GetDefaultVersionRequest) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionRequest) ProtoMessage() {}
+
+func (m *GetDefaultVersionRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+type GetDefaultVersionResponse struct {
+ Version *string `protobuf:"bytes,1,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetDefaultVersionResponse) Reset() { *m = GetDefaultVersionResponse{} }
+func (m *GetDefaultVersionResponse) String() string { return proto.CompactTextString(m) }
+func (*GetDefaultVersionResponse) ProtoMessage() {}
+
+func (m *GetDefaultVersionResponse) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesRequest) Reset() { *m = GetNumInstancesRequest{} }
+func (m *GetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesRequest) ProtoMessage() {}
+
+func (m *GetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type GetNumInstancesResponse struct {
+ Instances *int64 `protobuf:"varint,1,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetNumInstancesResponse) Reset() { *m = GetNumInstancesResponse{} }
+func (m *GetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*GetNumInstancesResponse) ProtoMessage() {}
+
+func (m *GetNumInstancesResponse) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instances *int64 `protobuf:"varint,3,req,name=instances" json:"instances,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesRequest) Reset() { *m = SetNumInstancesRequest{} }
+func (m *SetNumInstancesRequest) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesRequest) ProtoMessage() {}
+
+func (m *SetNumInstancesRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *SetNumInstancesRequest) GetInstances() int64 {
+ if m != nil && m.Instances != nil {
+ return *m.Instances
+ }
+ return 0
+}
+
+type SetNumInstancesResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SetNumInstancesResponse) Reset() { *m = SetNumInstancesResponse{} }
+func (m *SetNumInstancesResponse) String() string { return proto.CompactTextString(m) }
+func (*SetNumInstancesResponse) ProtoMessage() {}
+
+type StartModuleRequest struct {
+ Module *string `protobuf:"bytes,1,req,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,req,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleRequest) Reset() { *m = StartModuleRequest{} }
+func (m *StartModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StartModuleRequest) ProtoMessage() {}
+
+func (m *StartModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StartModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StartModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StartModuleResponse) Reset() { *m = StartModuleResponse{} }
+func (m *StartModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StartModuleResponse) ProtoMessage() {}
+
+type StopModuleRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleRequest) Reset() { *m = StopModuleRequest{} }
+func (m *StopModuleRequest) String() string { return proto.CompactTextString(m) }
+func (*StopModuleRequest) ProtoMessage() {}
+
+func (m *StopModuleRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *StopModuleRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+type StopModuleResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *StopModuleResponse) Reset() { *m = StopModuleResponse{} }
+func (m *StopModuleResponse) String() string { return proto.CompactTextString(m) }
+func (*StopModuleResponse) ProtoMessage() {}
+
+type GetHostnameRequest struct {
+ Module *string `protobuf:"bytes,1,opt,name=module" json:"module,omitempty"`
+ Version *string `protobuf:"bytes,2,opt,name=version" json:"version,omitempty"`
+ Instance *string `protobuf:"bytes,3,opt,name=instance" json:"instance,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameRequest) Reset() { *m = GetHostnameRequest{} }
+func (m *GetHostnameRequest) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameRequest) ProtoMessage() {}
+
+func (m *GetHostnameRequest) GetModule() string {
+ if m != nil && m.Module != nil {
+ return *m.Module
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetVersion() string {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return ""
+}
+
+func (m *GetHostnameRequest) GetInstance() string {
+ if m != nil && m.Instance != nil {
+ return *m.Instance
+ }
+ return ""
+}
+
+type GetHostnameResponse struct {
+ Hostname *string `protobuf:"bytes,1,req,name=hostname" json:"hostname,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetHostnameResponse) Reset() { *m = GetHostnameResponse{} }
+func (m *GetHostnameResponse) String() string { return proto.CompactTextString(m) }
+func (*GetHostnameResponse) ProtoMessage() {}
+
+func (m *GetHostnameResponse) GetHostname() string {
+ if m != nil && m.Hostname != nil {
+ return *m.Hostname
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("appengine.ModulesServiceError_ErrorCode", ModulesServiceError_ErrorCode_name, ModulesServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.proto
new file mode 100644
index 000000000000..d29f0065a2f8
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/modules/modules_service.proto
@@ -0,0 +1,80 @@
+syntax = "proto2";
+option go_package = "modules";
+
+package appengine;
+
+message ModulesServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_MODULE = 1;
+ INVALID_VERSION = 2;
+ INVALID_INSTANCES = 3;
+ TRANSIENT_ERROR = 4;
+ UNEXPECTED_STATE = 5;
+ }
+}
+
+message GetModulesRequest {
+}
+
+message GetModulesResponse {
+ repeated string module = 1;
+}
+
+message GetVersionsRequest {
+ optional string module = 1;
+}
+
+message GetVersionsResponse {
+ repeated string version = 1;
+}
+
+message GetDefaultVersionRequest {
+ optional string module = 1;
+}
+
+message GetDefaultVersionResponse {
+ required string version = 1;
+}
+
+message GetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message GetNumInstancesResponse {
+ required int64 instances = 1;
+}
+
+message SetNumInstancesRequest {
+ optional string module = 1;
+ optional string version = 2;
+ required int64 instances = 3;
+}
+
+message SetNumInstancesResponse {}
+
+message StartModuleRequest {
+ required string module = 1;
+ required string version = 2;
+}
+
+message StartModuleResponse {}
+
+message StopModuleRequest {
+ optional string module = 1;
+ optional string version = 2;
+}
+
+message StopModuleResponse {}
+
+message GetHostnameRequest {
+ optional string module = 1;
+ optional string version = 2;
+ optional string instance = 3;
+}
+
+message GetHostnameResponse {
+ required string hostname = 1;
+}
+
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/net.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/net.go
new file mode 100644
index 000000000000..12ddfbf5905a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/net.go
@@ -0,0 +1,63 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+// This file implements a network dialer that limits the number of concurrent connections.
+// It is only used for API calls.
+
+import (
+ "log"
+ "net"
+ "runtime"
+ "sync"
+ "time"
+)
+
+var limitSem = make(chan int, 100) // TODO(dsymonds): Use environment variable.
+
+func limitRelease() {
+ // non-blocking
+ select {
+ case <-limitSem:
+ default:
+ // This should not normally happen.
+ log.Print("appengine: unbalanced limitSem release!")
+ }
+}
+
+func limitDial(network, addr string) (net.Conn, error) {
+ limitSem <- 1
+
+ // Dial with a timeout in case the API host is MIA.
+ // The connection should normally be very fast.
+ conn, err := net.DialTimeout(network, addr, 500*time.Millisecond)
+ if err != nil {
+ limitRelease()
+ return nil, err
+ }
+ lc := &limitConn{Conn: conn}
+ runtime.SetFinalizer(lc, (*limitConn).Close) // shouldn't usually be required
+ return lc, nil
+}
+
+type limitConn struct {
+ mu sync.Mutex // only for closing the net.Conn
+ net.Conn
+}
+
+func (lc *limitConn) Close() error {
+ lc.mu.Lock()
+ defer lc.mu.Unlock()
+
+ if lc.Conn == nil {
+ // Silently ignore double close.
+ return nil
+ }
+ limitRelease()
+ err := lc.Conn.Close()
+ lc.Conn = nil
+ runtime.SetFinalizer(lc, nil)
+ return err
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/net_test.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/net_test.go
new file mode 100644
index 000000000000..4bdbcc5caf00
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/net_test.go
@@ -0,0 +1,55 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package internal
+
+import (
+ "sync"
+ "testing"
+ "time"
+
+ basepb "google.golang.org/appengine/internal/base"
+)
+
+func TestDialLimit(t *testing.T) {
+ // Fill up semaphore with false acquisitions to permit only two TCP connections at a time.
+ // We don't replace limitSem because that results in a data race when net/http lazily closes connections.
+ nFake := cap(limitSem) - 2
+ for i := 0; i < nFake; i++ {
+ limitSem <- 1
+ }
+ defer func() {
+ for i := 0; i < nFake; i++ {
+ <-limitSem
+ }
+ }()
+
+ f, c, cleanup := setup() // setup is in api_test.go
+ defer cleanup()
+ f.hang = make(chan int)
+
+ // If we make two RunSlowly RPCs (which will wait for f.hang to be strobed),
+ // then the simple Non200 RPC should hang.
+ var wg sync.WaitGroup
+ wg.Add(2)
+ for i := 0; i < 2; i++ {
+ go func() {
+ defer wg.Done()
+ c.Call("errors", "RunSlowly", &basepb.VoidProto{}, &basepb.VoidProto{}, nil)
+ }()
+ }
+ time.Sleep(50 * time.Millisecond) // let those two RPCs start
+
+ err := c.Call("errors", "Non200", &basepb.VoidProto{}, &basepb.VoidProto{}, &CallOptions{
+ Timeout: 50 * time.Millisecond,
+ })
+ if err != errTimeout {
+ t.Errorf("Non200 RPC returned with err %v, want errTimeout", err)
+ }
+
+ // Drain the two RunSlowly calls.
+ f.hang <- 1
+ f.hang <- 1
+ wg.Wait()
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/regen.sh b/Godeps/_workspace/src/google.golang.org/appengine/internal/regen.sh
new file mode 100644
index 000000000000..1dc33054c09f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/regen.sh
@@ -0,0 +1,36 @@
+#!/bin/bash -e
+#
+# This script rebuilds the generated code for the protocol buffers.
+# To run this you will need protoc and goprotobuf installed;
+# see https://github.com/golang/protobuf for instructions.
+
+PKG=google.golang.org/appengine
+
+function die() {
+ echo 1>&2 $*
+ exit 1
+}
+
+# Sanity check that the right tools are accessible.
+for tool in go protoc protoc-gen-go; do
+ q=$(which $tool) || die "didn't find $tool"
+ echo 1>&2 "$tool: $q"
+done
+
+echo -n 1>&2 "finding package dir... "
+pkgdir=$(go list -f '{{.Dir}}' $PKG)
+echo 1>&2 $pkgdir
+base=$(echo $pkgdir | sed "s,/$PKG\$,,")
+echo 1>&2 "base: $base"
+cd $base
+for f in $(find $PKG/internal -name '*.proto'); do
+ echo 1>&2 "* $f"
+ protoc --go_out=. $f
+done
+
+# Fix up import lines.
+# This should be fixed upstream.
+# https://code.google.com/p/goprotobuf/issues/detail?id=32
+for f in $(find $PKG/internal -name '*.pb.go'); do
+ sed -i '/^import.*\.pb"$/s,/[^/]*\.pb"$,",' $f
+done
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
new file mode 100644
index 000000000000..2bc44b9758b9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.pb.go
@@ -0,0 +1,230 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/remote_api/remote_api.proto
+// DO NOT EDIT!
+
+/*
+Package remote_api is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/remote_api/remote_api.proto
+
+It has these top-level messages:
+ Request
+ ApplicationError
+ RpcError
+ Response
+*/
+package remote_api
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type RpcError_ErrorCode int32
+
+const (
+ RpcError_UNKNOWN RpcError_ErrorCode = 0
+ RpcError_CALL_NOT_FOUND RpcError_ErrorCode = 1
+ RpcError_PARSE_ERROR RpcError_ErrorCode = 2
+ RpcError_SECURITY_VIOLATION RpcError_ErrorCode = 3
+ RpcError_OVER_QUOTA RpcError_ErrorCode = 4
+ RpcError_REQUEST_TOO_LARGE RpcError_ErrorCode = 5
+ RpcError_CAPABILITY_DISABLED RpcError_ErrorCode = 6
+ RpcError_FEATURE_DISABLED RpcError_ErrorCode = 7
+ RpcError_BAD_REQUEST RpcError_ErrorCode = 8
+ RpcError_RESPONSE_TOO_LARGE RpcError_ErrorCode = 9
+ RpcError_CANCELLED RpcError_ErrorCode = 10
+ RpcError_REPLAY_ERROR RpcError_ErrorCode = 11
+ RpcError_DEADLINE_EXCEEDED RpcError_ErrorCode = 12
+)
+
+var RpcError_ErrorCode_name = map[int32]string{
+ 0: "UNKNOWN",
+ 1: "CALL_NOT_FOUND",
+ 2: "PARSE_ERROR",
+ 3: "SECURITY_VIOLATION",
+ 4: "OVER_QUOTA",
+ 5: "REQUEST_TOO_LARGE",
+ 6: "CAPABILITY_DISABLED",
+ 7: "FEATURE_DISABLED",
+ 8: "BAD_REQUEST",
+ 9: "RESPONSE_TOO_LARGE",
+ 10: "CANCELLED",
+ 11: "REPLAY_ERROR",
+ 12: "DEADLINE_EXCEEDED",
+}
+var RpcError_ErrorCode_value = map[string]int32{
+ "UNKNOWN": 0,
+ "CALL_NOT_FOUND": 1,
+ "PARSE_ERROR": 2,
+ "SECURITY_VIOLATION": 3,
+ "OVER_QUOTA": 4,
+ "REQUEST_TOO_LARGE": 5,
+ "CAPABILITY_DISABLED": 6,
+ "FEATURE_DISABLED": 7,
+ "BAD_REQUEST": 8,
+ "RESPONSE_TOO_LARGE": 9,
+ "CANCELLED": 10,
+ "REPLAY_ERROR": 11,
+ "DEADLINE_EXCEEDED": 12,
+}
+
+func (x RpcError_ErrorCode) Enum() *RpcError_ErrorCode {
+ p := new(RpcError_ErrorCode)
+ *p = x
+ return p
+}
+func (x RpcError_ErrorCode) String() string {
+ return proto.EnumName(RpcError_ErrorCode_name, int32(x))
+}
+func (x *RpcError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(RpcError_ErrorCode_value, data, "RpcError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = RpcError_ErrorCode(value)
+ return nil
+}
+
+type Request struct {
+ ServiceName *string `protobuf:"bytes,2,req,name=service_name" json:"service_name,omitempty"`
+ Method *string `protobuf:"bytes,3,req,name=method" json:"method,omitempty"`
+ Request []byte `protobuf:"bytes,4,req,name=request" json:"request,omitempty"`
+ RequestId *string `protobuf:"bytes,5,opt,name=request_id" json:"request_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Request) Reset() { *m = Request{} }
+func (m *Request) String() string { return proto.CompactTextString(m) }
+func (*Request) ProtoMessage() {}
+
+func (m *Request) GetServiceName() string {
+ if m != nil && m.ServiceName != nil {
+ return *m.ServiceName
+ }
+ return ""
+}
+
+func (m *Request) GetMethod() string {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return ""
+}
+
+func (m *Request) GetRequest() []byte {
+ if m != nil {
+ return m.Request
+ }
+ return nil
+}
+
+func (m *Request) GetRequestId() string {
+ if m != nil && m.RequestId != nil {
+ return *m.RequestId
+ }
+ return ""
+}
+
+type ApplicationError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,req,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ApplicationError) Reset() { *m = ApplicationError{} }
+func (m *ApplicationError) String() string { return proto.CompactTextString(m) }
+func (*ApplicationError) ProtoMessage() {}
+
+func (m *ApplicationError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *ApplicationError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type RpcError struct {
+ Code *int32 `protobuf:"varint,1,req,name=code" json:"code,omitempty"`
+ Detail *string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RpcError) Reset() { *m = RpcError{} }
+func (m *RpcError) String() string { return proto.CompactTextString(m) }
+func (*RpcError) ProtoMessage() {}
+
+func (m *RpcError) GetCode() int32 {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return 0
+}
+
+func (m *RpcError) GetDetail() string {
+ if m != nil && m.Detail != nil {
+ return *m.Detail
+ }
+ return ""
+}
+
+type Response struct {
+ Response []byte `protobuf:"bytes,1,opt,name=response" json:"response,omitempty"`
+ Exception []byte `protobuf:"bytes,2,opt,name=exception" json:"exception,omitempty"`
+ ApplicationError *ApplicationError `protobuf:"bytes,3,opt,name=application_error" json:"application_error,omitempty"`
+ JavaException []byte `protobuf:"bytes,4,opt,name=java_exception" json:"java_exception,omitempty"`
+ RpcError *RpcError `protobuf:"bytes,5,opt,name=rpc_error" json:"rpc_error,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Response) Reset() { *m = Response{} }
+func (m *Response) String() string { return proto.CompactTextString(m) }
+func (*Response) ProtoMessage() {}
+
+func (m *Response) GetResponse() []byte {
+ if m != nil {
+ return m.Response
+ }
+ return nil
+}
+
+func (m *Response) GetException() []byte {
+ if m != nil {
+ return m.Exception
+ }
+ return nil
+}
+
+func (m *Response) GetApplicationError() *ApplicationError {
+ if m != nil {
+ return m.ApplicationError
+ }
+ return nil
+}
+
+func (m *Response) GetJavaException() []byte {
+ if m != nil {
+ return m.JavaException
+ }
+ return nil
+}
+
+func (m *Response) GetRpcError() *RpcError {
+ if m != nil {
+ return m.RpcError
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("remote_api.RpcError_ErrorCode", RpcError_ErrorCode_name, RpcError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.proto
new file mode 100644
index 000000000000..f21763a4e239
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/remote_api/remote_api.proto
@@ -0,0 +1,44 @@
+syntax = "proto2";
+option go_package = "remote_api";
+
+package remote_api;
+
+message Request {
+ required string service_name = 2;
+ required string method = 3;
+ required bytes request = 4;
+ optional string request_id = 5;
+}
+
+message ApplicationError {
+ required int32 code = 1;
+ required string detail = 2;
+}
+
+message RpcError {
+ enum ErrorCode {
+ UNKNOWN = 0;
+ CALL_NOT_FOUND = 1;
+ PARSE_ERROR = 2;
+ SECURITY_VIOLATION = 3;
+ OVER_QUOTA = 4;
+ REQUEST_TOO_LARGE = 5;
+ CAPABILITY_DISABLED = 6;
+ FEATURE_DISABLED = 7;
+ BAD_REQUEST = 8;
+ RESPONSE_TOO_LARGE = 9;
+ CANCELLED = 10;
+ REPLAY_ERROR = 11;
+ DEADLINE_EXCEEDED = 12;
+ }
+ required int32 code = 1;
+ optional string detail = 2;
+}
+
+message Response {
+ optional bytes response = 1;
+ optional bytes exception = 2;
+ optional ApplicationError application_error = 3;
+ optional bytes java_exception = 4;
+ optional RpcError rpc_error = 5;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.pb.go
new file mode 100644
index 000000000000..3da87ce376bb
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.pb.go
@@ -0,0 +1,2072 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/search/search.proto
+// DO NOT EDIT!
+
+/*
+Package search is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/search/search.proto
+
+It has these top-level messages:
+ Scope
+ Entry
+ AccessControlList
+ FieldValue
+ Field
+ FieldTypes
+ FacetValue
+ Facet
+ Document
+ SearchServiceError
+ RequestStatus
+ IndexSpec
+ IndexMetadata
+ IndexDocumentParams
+ IndexDocumentRequest
+ IndexDocumentResponse
+ DeleteDocumentParams
+ DeleteDocumentRequest
+ DeleteDocumentResponse
+ ListDocumentsParams
+ ListDocumentsRequest
+ ListDocumentsResponse
+ ListIndexesParams
+ ListIndexesRequest
+ ListIndexesResponse
+ DeleteSchemaParams
+ DeleteSchemaRequest
+ DeleteSchemaResponse
+ SortSpec
+ ScorerSpec
+ FieldSpec
+ FacetRange
+ FacetRequestParam
+ FacetAutoDetectParam
+ FacetRequest
+ FacetRefine
+ SearchParams
+ SearchRequest
+ FacetResultValue
+ FacetResult
+ SearchResult
+ SearchResponse
+*/
+package search
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type Scope_Type int32
+
+const (
+ Scope_USER_BY_CANONICAL_ID Scope_Type = 1
+ Scope_USER_BY_EMAIL Scope_Type = 2
+ Scope_GROUP_BY_CANONICAL_ID Scope_Type = 3
+ Scope_GROUP_BY_EMAIL Scope_Type = 4
+ Scope_GROUP_BY_DOMAIN Scope_Type = 5
+ Scope_ALL_USERS Scope_Type = 6
+ Scope_ALL_AUTHENTICATED_USERS Scope_Type = 7
+)
+
+var Scope_Type_name = map[int32]string{
+ 1: "USER_BY_CANONICAL_ID",
+ 2: "USER_BY_EMAIL",
+ 3: "GROUP_BY_CANONICAL_ID",
+ 4: "GROUP_BY_EMAIL",
+ 5: "GROUP_BY_DOMAIN",
+ 6: "ALL_USERS",
+ 7: "ALL_AUTHENTICATED_USERS",
+}
+var Scope_Type_value = map[string]int32{
+ "USER_BY_CANONICAL_ID": 1,
+ "USER_BY_EMAIL": 2,
+ "GROUP_BY_CANONICAL_ID": 3,
+ "GROUP_BY_EMAIL": 4,
+ "GROUP_BY_DOMAIN": 5,
+ "ALL_USERS": 6,
+ "ALL_AUTHENTICATED_USERS": 7,
+}
+
+func (x Scope_Type) Enum() *Scope_Type {
+ p := new(Scope_Type)
+ *p = x
+ return p
+}
+func (x Scope_Type) String() string {
+ return proto.EnumName(Scope_Type_name, int32(x))
+}
+func (x *Scope_Type) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Scope_Type_value, data, "Scope_Type")
+ if err != nil {
+ return err
+ }
+ *x = Scope_Type(value)
+ return nil
+}
+
+type Entry_Permission int32
+
+const (
+ Entry_READ Entry_Permission = 1
+ Entry_WRITE Entry_Permission = 2
+ Entry_FULL_CONTROL Entry_Permission = 3
+)
+
+var Entry_Permission_name = map[int32]string{
+ 1: "READ",
+ 2: "WRITE",
+ 3: "FULL_CONTROL",
+}
+var Entry_Permission_value = map[string]int32{
+ "READ": 1,
+ "WRITE": 2,
+ "FULL_CONTROL": 3,
+}
+
+func (x Entry_Permission) Enum() *Entry_Permission {
+ p := new(Entry_Permission)
+ *p = x
+ return p
+}
+func (x Entry_Permission) String() string {
+ return proto.EnumName(Entry_Permission_name, int32(x))
+}
+func (x *Entry_Permission) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Entry_Permission_value, data, "Entry_Permission")
+ if err != nil {
+ return err
+ }
+ *x = Entry_Permission(value)
+ return nil
+}
+
+type FieldValue_ContentType int32
+
+const (
+ FieldValue_TEXT FieldValue_ContentType = 0
+ FieldValue_HTML FieldValue_ContentType = 1
+ FieldValue_ATOM FieldValue_ContentType = 2
+ FieldValue_DATE FieldValue_ContentType = 3
+ FieldValue_NUMBER FieldValue_ContentType = 4
+ FieldValue_GEO FieldValue_ContentType = 5
+)
+
+var FieldValue_ContentType_name = map[int32]string{
+ 0: "TEXT",
+ 1: "HTML",
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+ 5: "GEO",
+}
+var FieldValue_ContentType_value = map[string]int32{
+ "TEXT": 0,
+ "HTML": 1,
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+ "GEO": 5,
+}
+
+func (x FieldValue_ContentType) Enum() *FieldValue_ContentType {
+ p := new(FieldValue_ContentType)
+ *p = x
+ return p
+}
+func (x FieldValue_ContentType) String() string {
+ return proto.EnumName(FieldValue_ContentType_name, int32(x))
+}
+func (x *FieldValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FieldValue_ContentType_value, data, "FieldValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FieldValue_ContentType(value)
+ return nil
+}
+
+type FacetValue_ContentType int32
+
+const (
+ FacetValue_ATOM FacetValue_ContentType = 2
+ FacetValue_DATE FacetValue_ContentType = 3
+ FacetValue_NUMBER FacetValue_ContentType = 4
+)
+
+var FacetValue_ContentType_name = map[int32]string{
+ 2: "ATOM",
+ 3: "DATE",
+ 4: "NUMBER",
+}
+var FacetValue_ContentType_value = map[string]int32{
+ "ATOM": 2,
+ "DATE": 3,
+ "NUMBER": 4,
+}
+
+func (x FacetValue_ContentType) Enum() *FacetValue_ContentType {
+ p := new(FacetValue_ContentType)
+ *p = x
+ return p
+}
+func (x FacetValue_ContentType) String() string {
+ return proto.EnumName(FacetValue_ContentType_name, int32(x))
+}
+func (x *FacetValue_ContentType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(FacetValue_ContentType_value, data, "FacetValue_ContentType")
+ if err != nil {
+ return err
+ }
+ *x = FacetValue_ContentType(value)
+ return nil
+}
+
+type Document_Storage int32
+
+const (
+ Document_DISK Document_Storage = 0
+)
+
+var Document_Storage_name = map[int32]string{
+ 0: "DISK",
+}
+var Document_Storage_value = map[string]int32{
+ "DISK": 0,
+}
+
+func (x Document_Storage) Enum() *Document_Storage {
+ p := new(Document_Storage)
+ *p = x
+ return p
+}
+func (x Document_Storage) String() string {
+ return proto.EnumName(Document_Storage_name, int32(x))
+}
+func (x *Document_Storage) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(Document_Storage_value, data, "Document_Storage")
+ if err != nil {
+ return err
+ }
+ *x = Document_Storage(value)
+ return nil
+}
+
+type SearchServiceError_ErrorCode int32
+
+const (
+ SearchServiceError_OK SearchServiceError_ErrorCode = 0
+ SearchServiceError_INVALID_REQUEST SearchServiceError_ErrorCode = 1
+ SearchServiceError_TRANSIENT_ERROR SearchServiceError_ErrorCode = 2
+ SearchServiceError_INTERNAL_ERROR SearchServiceError_ErrorCode = 3
+ SearchServiceError_PERMISSION_DENIED SearchServiceError_ErrorCode = 4
+ SearchServiceError_TIMEOUT SearchServiceError_ErrorCode = 5
+ SearchServiceError_CONCURRENT_TRANSACTION SearchServiceError_ErrorCode = 6
+)
+
+var SearchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_REQUEST",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "PERMISSION_DENIED",
+ 5: "TIMEOUT",
+ 6: "CONCURRENT_TRANSACTION",
+}
+var SearchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_REQUEST": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "PERMISSION_DENIED": 4,
+ "TIMEOUT": 5,
+ "CONCURRENT_TRANSACTION": 6,
+}
+
+func (x SearchServiceError_ErrorCode) Enum() *SearchServiceError_ErrorCode {
+ p := new(SearchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x SearchServiceError_ErrorCode) String() string {
+ return proto.EnumName(SearchServiceError_ErrorCode_name, int32(x))
+}
+func (x *SearchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchServiceError_ErrorCode_value, data, "SearchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = SearchServiceError_ErrorCode(value)
+ return nil
+}
+
+type IndexSpec_Consistency int32
+
+const (
+ IndexSpec_GLOBAL IndexSpec_Consistency = 0
+ IndexSpec_PER_DOCUMENT IndexSpec_Consistency = 1
+)
+
+var IndexSpec_Consistency_name = map[int32]string{
+ 0: "GLOBAL",
+ 1: "PER_DOCUMENT",
+}
+var IndexSpec_Consistency_value = map[string]int32{
+ "GLOBAL": 0,
+ "PER_DOCUMENT": 1,
+}
+
+func (x IndexSpec_Consistency) Enum() *IndexSpec_Consistency {
+ p := new(IndexSpec_Consistency)
+ *p = x
+ return p
+}
+func (x IndexSpec_Consistency) String() string {
+ return proto.EnumName(IndexSpec_Consistency_name, int32(x))
+}
+func (x *IndexSpec_Consistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Consistency_value, data, "IndexSpec_Consistency")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Consistency(value)
+ return nil
+}
+
+type IndexSpec_Source int32
+
+const (
+ IndexSpec_SEARCH IndexSpec_Source = 0
+ IndexSpec_DATASTORE IndexSpec_Source = 1
+ IndexSpec_CLOUD_STORAGE IndexSpec_Source = 2
+)
+
+var IndexSpec_Source_name = map[int32]string{
+ 0: "SEARCH",
+ 1: "DATASTORE",
+ 2: "CLOUD_STORAGE",
+}
+var IndexSpec_Source_value = map[string]int32{
+ "SEARCH": 0,
+ "DATASTORE": 1,
+ "CLOUD_STORAGE": 2,
+}
+
+func (x IndexSpec_Source) Enum() *IndexSpec_Source {
+ p := new(IndexSpec_Source)
+ *p = x
+ return p
+}
+func (x IndexSpec_Source) String() string {
+ return proto.EnumName(IndexSpec_Source_name, int32(x))
+}
+func (x *IndexSpec_Source) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Source_value, data, "IndexSpec_Source")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Source(value)
+ return nil
+}
+
+type IndexSpec_Mode int32
+
+const (
+ IndexSpec_PRIORITY IndexSpec_Mode = 0
+ IndexSpec_BACKGROUND IndexSpec_Mode = 1
+)
+
+var IndexSpec_Mode_name = map[int32]string{
+ 0: "PRIORITY",
+ 1: "BACKGROUND",
+}
+var IndexSpec_Mode_value = map[string]int32{
+ "PRIORITY": 0,
+ "BACKGROUND": 1,
+}
+
+func (x IndexSpec_Mode) Enum() *IndexSpec_Mode {
+ p := new(IndexSpec_Mode)
+ *p = x
+ return p
+}
+func (x IndexSpec_Mode) String() string {
+ return proto.EnumName(IndexSpec_Mode_name, int32(x))
+}
+func (x *IndexSpec_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexSpec_Mode_value, data, "IndexSpec_Mode")
+ if err != nil {
+ return err
+ }
+ *x = IndexSpec_Mode(value)
+ return nil
+}
+
+type IndexDocumentParams_Freshness int32
+
+const (
+ IndexDocumentParams_SYNCHRONOUSLY IndexDocumentParams_Freshness = 0
+ IndexDocumentParams_WHEN_CONVENIENT IndexDocumentParams_Freshness = 1
+)
+
+var IndexDocumentParams_Freshness_name = map[int32]string{
+ 0: "SYNCHRONOUSLY",
+ 1: "WHEN_CONVENIENT",
+}
+var IndexDocumentParams_Freshness_value = map[string]int32{
+ "SYNCHRONOUSLY": 0,
+ "WHEN_CONVENIENT": 1,
+}
+
+func (x IndexDocumentParams_Freshness) Enum() *IndexDocumentParams_Freshness {
+ p := new(IndexDocumentParams_Freshness)
+ *p = x
+ return p
+}
+func (x IndexDocumentParams_Freshness) String() string {
+ return proto.EnumName(IndexDocumentParams_Freshness_name, int32(x))
+}
+func (x *IndexDocumentParams_Freshness) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(IndexDocumentParams_Freshness_value, data, "IndexDocumentParams_Freshness")
+ if err != nil {
+ return err
+ }
+ *x = IndexDocumentParams_Freshness(value)
+ return nil
+}
+
+type ScorerSpec_Scorer int32
+
+const (
+ ScorerSpec_RESCORING_MATCH_SCORER ScorerSpec_Scorer = 0
+ ScorerSpec_MATCH_SCORER ScorerSpec_Scorer = 2
+)
+
+var ScorerSpec_Scorer_name = map[int32]string{
+ 0: "RESCORING_MATCH_SCORER",
+ 2: "MATCH_SCORER",
+}
+var ScorerSpec_Scorer_value = map[string]int32{
+ "RESCORING_MATCH_SCORER": 0,
+ "MATCH_SCORER": 2,
+}
+
+func (x ScorerSpec_Scorer) Enum() *ScorerSpec_Scorer {
+ p := new(ScorerSpec_Scorer)
+ *p = x
+ return p
+}
+func (x ScorerSpec_Scorer) String() string {
+ return proto.EnumName(ScorerSpec_Scorer_name, int32(x))
+}
+func (x *ScorerSpec_Scorer) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ScorerSpec_Scorer_value, data, "ScorerSpec_Scorer")
+ if err != nil {
+ return err
+ }
+ *x = ScorerSpec_Scorer(value)
+ return nil
+}
+
+type SearchParams_CursorType int32
+
+const (
+ SearchParams_NONE SearchParams_CursorType = 0
+ SearchParams_SINGLE SearchParams_CursorType = 1
+ SearchParams_PER_RESULT SearchParams_CursorType = 2
+)
+
+var SearchParams_CursorType_name = map[int32]string{
+ 0: "NONE",
+ 1: "SINGLE",
+ 2: "PER_RESULT",
+}
+var SearchParams_CursorType_value = map[string]int32{
+ "NONE": 0,
+ "SINGLE": 1,
+ "PER_RESULT": 2,
+}
+
+func (x SearchParams_CursorType) Enum() *SearchParams_CursorType {
+ p := new(SearchParams_CursorType)
+ *p = x
+ return p
+}
+func (x SearchParams_CursorType) String() string {
+ return proto.EnumName(SearchParams_CursorType_name, int32(x))
+}
+func (x *SearchParams_CursorType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_CursorType_value, data, "SearchParams_CursorType")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_CursorType(value)
+ return nil
+}
+
+type SearchParams_ParsingMode int32
+
+const (
+ SearchParams_STRICT SearchParams_ParsingMode = 0
+ SearchParams_RELAXED SearchParams_ParsingMode = 1
+)
+
+var SearchParams_ParsingMode_name = map[int32]string{
+ 0: "STRICT",
+ 1: "RELAXED",
+}
+var SearchParams_ParsingMode_value = map[string]int32{
+ "STRICT": 0,
+ "RELAXED": 1,
+}
+
+func (x SearchParams_ParsingMode) Enum() *SearchParams_ParsingMode {
+ p := new(SearchParams_ParsingMode)
+ *p = x
+ return p
+}
+func (x SearchParams_ParsingMode) String() string {
+ return proto.EnumName(SearchParams_ParsingMode_name, int32(x))
+}
+func (x *SearchParams_ParsingMode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(SearchParams_ParsingMode_value, data, "SearchParams_ParsingMode")
+ if err != nil {
+ return err
+ }
+ *x = SearchParams_ParsingMode(value)
+ return nil
+}
+
+type Scope struct {
+ Type *Scope_Type `protobuf:"varint,1,opt,name=type,enum=search.Scope_Type" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Scope) Reset() { *m = Scope{} }
+func (m *Scope) String() string { return proto.CompactTextString(m) }
+func (*Scope) ProtoMessage() {}
+
+func (m *Scope) GetType() Scope_Type {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Scope_USER_BY_CANONICAL_ID
+}
+
+func (m *Scope) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type Entry struct {
+ Scope *Scope `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Permission *Entry_Permission `protobuf:"varint,2,opt,name=permission,enum=search.Entry_Permission" json:"permission,omitempty"`
+ DisplayName *string `protobuf:"bytes,3,opt,name=display_name" json:"display_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entry) Reset() { *m = Entry{} }
+func (m *Entry) String() string { return proto.CompactTextString(m) }
+func (*Entry) ProtoMessage() {}
+
+func (m *Entry) GetScope() *Scope {
+ if m != nil {
+ return m.Scope
+ }
+ return nil
+}
+
+func (m *Entry) GetPermission() Entry_Permission {
+ if m != nil && m.Permission != nil {
+ return *m.Permission
+ }
+ return Entry_READ
+}
+
+func (m *Entry) GetDisplayName() string {
+ if m != nil && m.DisplayName != nil {
+ return *m.DisplayName
+ }
+ return ""
+}
+
+type AccessControlList struct {
+ Owner *string `protobuf:"bytes,1,opt,name=owner" json:"owner,omitempty"`
+ Entries []*Entry `protobuf:"bytes,2,rep,name=entries" json:"entries,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AccessControlList) Reset() { *m = AccessControlList{} }
+func (m *AccessControlList) String() string { return proto.CompactTextString(m) }
+func (*AccessControlList) ProtoMessage() {}
+
+func (m *AccessControlList) GetOwner() string {
+ if m != nil && m.Owner != nil {
+ return *m.Owner
+ }
+ return ""
+}
+
+func (m *AccessControlList) GetEntries() []*Entry {
+ if m != nil {
+ return m.Entries
+ }
+ return nil
+}
+
+type FieldValue struct {
+ Type *FieldValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FieldValue_ContentType,def=0" json:"type,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ Geo *FieldValue_Geo `protobuf:"group,4,opt" json:"geo,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue) Reset() { *m = FieldValue{} }
+func (m *FieldValue) String() string { return proto.CompactTextString(m) }
+func (*FieldValue) ProtoMessage() {}
+
+const Default_FieldValue_Type FieldValue_ContentType = FieldValue_TEXT
+const Default_FieldValue_Language string = "en"
+
+func (m *FieldValue) GetType() FieldValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FieldValue_Type
+}
+
+func (m *FieldValue) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_FieldValue_Language
+}
+
+func (m *FieldValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *FieldValue) GetGeo() *FieldValue_Geo {
+ if m != nil {
+ return m.Geo
+ }
+ return nil
+}
+
+type FieldValue_Geo struct {
+ Lat *float64 `protobuf:"fixed64,5,req,name=lat" json:"lat,omitempty"`
+ Lng *float64 `protobuf:"fixed64,6,req,name=lng" json:"lng,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldValue_Geo) Reset() { *m = FieldValue_Geo{} }
+func (m *FieldValue_Geo) String() string { return proto.CompactTextString(m) }
+func (*FieldValue_Geo) ProtoMessage() {}
+
+func (m *FieldValue_Geo) GetLat() float64 {
+ if m != nil && m.Lat != nil {
+ return *m.Lat
+ }
+ return 0
+}
+
+func (m *FieldValue_Geo) GetLng() float64 {
+ if m != nil && m.Lng != nil {
+ return *m.Lng
+ }
+ return 0
+}
+
+type Field struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FieldValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Field) Reset() { *m = Field{} }
+func (m *Field) String() string { return proto.CompactTextString(m) }
+func (*Field) ProtoMessage() {}
+
+func (m *Field) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Field) GetValue() *FieldValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type FieldTypes struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type []FieldValue_ContentType `protobuf:"varint,2,rep,name=type,enum=search.FieldValue_ContentType" json:"type,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldTypes) Reset() { *m = FieldTypes{} }
+func (m *FieldTypes) String() string { return proto.CompactTextString(m) }
+func (*FieldTypes) ProtoMessage() {}
+
+func (m *FieldTypes) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldTypes) GetType() []FieldValue_ContentType {
+ if m != nil {
+ return m.Type
+ }
+ return nil
+}
+
+type FacetValue struct {
+ Type *FacetValue_ContentType `protobuf:"varint,1,opt,name=type,enum=search.FacetValue_ContentType,def=2" json:"type,omitempty"`
+ StringValue *string `protobuf:"bytes,3,opt,name=string_value" json:"string_value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetValue) Reset() { *m = FacetValue{} }
+func (m *FacetValue) String() string { return proto.CompactTextString(m) }
+func (*FacetValue) ProtoMessage() {}
+
+const Default_FacetValue_Type FacetValue_ContentType = FacetValue_ATOM
+
+func (m *FacetValue) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_FacetValue_Type
+}
+
+func (m *FacetValue) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+type Facet struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Value *FacetValue `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Facet) Reset() { *m = Facet{} }
+func (m *Facet) String() string { return proto.CompactTextString(m) }
+func (*Facet) ProtoMessage() {}
+
+func (m *Facet) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Facet) GetValue() *FacetValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type Document struct {
+ Id *string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"`
+ Language *string `protobuf:"bytes,2,opt,name=language,def=en" json:"language,omitempty"`
+ Field []*Field `protobuf:"bytes,3,rep,name=field" json:"field,omitempty"`
+ OrderId *int32 `protobuf:"varint,4,opt,name=order_id" json:"order_id,omitempty"`
+ Storage *Document_Storage `protobuf:"varint,5,opt,name=storage,enum=search.Document_Storage,def=0" json:"storage,omitempty"`
+ Acl *AccessControlList `protobuf:"bytes,6,opt,name=acl" json:"acl,omitempty"`
+ Version *int64 `protobuf:"varint,7,opt,name=version" json:"version,omitempty"`
+ Facet []*Facet `protobuf:"bytes,8,rep,name=facet" json:"facet,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Document) Reset() { *m = Document{} }
+func (m *Document) String() string { return proto.CompactTextString(m) }
+func (*Document) ProtoMessage() {}
+
+const Default_Document_Language string = "en"
+const Default_Document_Storage Document_Storage = Document_DISK
+
+func (m *Document) GetId() string {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return ""
+}
+
+func (m *Document) GetLanguage() string {
+ if m != nil && m.Language != nil {
+ return *m.Language
+ }
+ return Default_Document_Language
+}
+
+func (m *Document) GetField() []*Field {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *Document) GetOrderId() int32 {
+ if m != nil && m.OrderId != nil {
+ return *m.OrderId
+ }
+ return 0
+}
+
+func (m *Document) GetStorage() Document_Storage {
+ if m != nil && m.Storage != nil {
+ return *m.Storage
+ }
+ return Default_Document_Storage
+}
+
+func (m *Document) GetAcl() *AccessControlList {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *Document) GetVersion() int64 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *Document) GetFacet() []*Facet {
+ if m != nil {
+ return m.Facet
+ }
+ return nil
+}
+
+type SearchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchServiceError) Reset() { *m = SearchServiceError{} }
+func (m *SearchServiceError) String() string { return proto.CompactTextString(m) }
+func (*SearchServiceError) ProtoMessage() {}
+
+type RequestStatus struct {
+ Code *SearchServiceError_ErrorCode `protobuf:"varint,1,req,name=code,enum=search.SearchServiceError_ErrorCode" json:"code,omitempty"`
+ ErrorDetail *string `protobuf:"bytes,2,opt,name=error_detail" json:"error_detail,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RequestStatus) Reset() { *m = RequestStatus{} }
+func (m *RequestStatus) String() string { return proto.CompactTextString(m) }
+func (*RequestStatus) ProtoMessage() {}
+
+func (m *RequestStatus) GetCode() SearchServiceError_ErrorCode {
+ if m != nil && m.Code != nil {
+ return *m.Code
+ }
+ return SearchServiceError_OK
+}
+
+func (m *RequestStatus) GetErrorDetail() string {
+ if m != nil && m.ErrorDetail != nil {
+ return *m.ErrorDetail
+ }
+ return ""
+}
+
+type IndexSpec struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Consistency *IndexSpec_Consistency `protobuf:"varint,2,opt,name=consistency,enum=search.IndexSpec_Consistency,def=1" json:"consistency,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ Version *int32 `protobuf:"varint,4,opt,name=version" json:"version,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,5,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ Mode *IndexSpec_Mode `protobuf:"varint,6,opt,name=mode,enum=search.IndexSpec_Mode,def=0" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexSpec) Reset() { *m = IndexSpec{} }
+func (m *IndexSpec) String() string { return proto.CompactTextString(m) }
+func (*IndexSpec) ProtoMessage() {}
+
+const Default_IndexSpec_Consistency IndexSpec_Consistency = IndexSpec_PER_DOCUMENT
+const Default_IndexSpec_Source IndexSpec_Source = IndexSpec_SEARCH
+const Default_IndexSpec_Mode IndexSpec_Mode = IndexSpec_PRIORITY
+
+func (m *IndexSpec) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetConsistency() IndexSpec_Consistency {
+ if m != nil && m.Consistency != nil {
+ return *m.Consistency
+ }
+ return Default_IndexSpec_Consistency
+}
+
+func (m *IndexSpec) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *IndexSpec) GetVersion() int32 {
+ if m != nil && m.Version != nil {
+ return *m.Version
+ }
+ return 0
+}
+
+func (m *IndexSpec) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_IndexSpec_Source
+}
+
+func (m *IndexSpec) GetMode() IndexSpec_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_IndexSpec_Mode
+}
+
+type IndexMetadata struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Field []*FieldTypes `protobuf:"bytes,2,rep,name=field" json:"field,omitempty"`
+ Storage *IndexMetadata_Storage `protobuf:"bytes,3,opt,name=storage" json:"storage,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata) Reset() { *m = IndexMetadata{} }
+func (m *IndexMetadata) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata) ProtoMessage() {}
+
+func (m *IndexMetadata) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetField() []*FieldTypes {
+ if m != nil {
+ return m.Field
+ }
+ return nil
+}
+
+func (m *IndexMetadata) GetStorage() *IndexMetadata_Storage {
+ if m != nil {
+ return m.Storage
+ }
+ return nil
+}
+
+type IndexMetadata_Storage struct {
+ AmountUsed *int64 `protobuf:"varint,1,opt,name=amount_used" json:"amount_used,omitempty"`
+ Limit *int64 `protobuf:"varint,2,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexMetadata_Storage) Reset() { *m = IndexMetadata_Storage{} }
+func (m *IndexMetadata_Storage) String() string { return proto.CompactTextString(m) }
+func (*IndexMetadata_Storage) ProtoMessage() {}
+
+func (m *IndexMetadata_Storage) GetAmountUsed() int64 {
+ if m != nil && m.AmountUsed != nil {
+ return *m.AmountUsed
+ }
+ return 0
+}
+
+func (m *IndexMetadata_Storage) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type IndexDocumentParams struct {
+ Document []*Document `protobuf:"bytes,1,rep,name=document" json:"document,omitempty"`
+ Freshness *IndexDocumentParams_Freshness `protobuf:"varint,2,opt,name=freshness,enum=search.IndexDocumentParams_Freshness,def=0" json:"freshness,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,3,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentParams) Reset() { *m = IndexDocumentParams{} }
+func (m *IndexDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentParams) ProtoMessage() {}
+
+const Default_IndexDocumentParams_Freshness IndexDocumentParams_Freshness = IndexDocumentParams_SYNCHRONOUSLY
+
+func (m *IndexDocumentParams) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *IndexDocumentParams) GetFreshness() IndexDocumentParams_Freshness {
+ if m != nil && m.Freshness != nil {
+ return *m.Freshness
+ }
+ return Default_IndexDocumentParams_Freshness
+}
+
+func (m *IndexDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type IndexDocumentRequest struct {
+ Params *IndexDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentRequest) Reset() { *m = IndexDocumentRequest{} }
+func (m *IndexDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentRequest) ProtoMessage() {}
+
+func (m *IndexDocumentRequest) GetParams() *IndexDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *IndexDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type IndexDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ DocId []string `protobuf:"bytes,2,rep,name=doc_id" json:"doc_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *IndexDocumentResponse) Reset() { *m = IndexDocumentResponse{} }
+func (m *IndexDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*IndexDocumentResponse) ProtoMessage() {}
+
+func (m *IndexDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *IndexDocumentResponse) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+type DeleteDocumentParams struct {
+ DocId []string `protobuf:"bytes,1,rep,name=doc_id" json:"doc_id,omitempty"`
+ IndexSpec *IndexSpec `protobuf:"bytes,2,req,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentParams) Reset() { *m = DeleteDocumentParams{} }
+func (m *DeleteDocumentParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentParams) ProtoMessage() {}
+
+func (m *DeleteDocumentParams) GetDocId() []string {
+ if m != nil {
+ return m.DocId
+ }
+ return nil
+}
+
+func (m *DeleteDocumentParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteDocumentRequest struct {
+ Params *DeleteDocumentParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentRequest) Reset() { *m = DeleteDocumentRequest{} }
+func (m *DeleteDocumentRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentRequest) ProtoMessage() {}
+
+func (m *DeleteDocumentRequest) GetParams() *DeleteDocumentParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteDocumentRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteDocumentResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteDocumentResponse) Reset() { *m = DeleteDocumentResponse{} }
+func (m *DeleteDocumentResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteDocumentResponse) ProtoMessage() {}
+
+func (m *DeleteDocumentResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type ListDocumentsParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ StartDocId *string `protobuf:"bytes,2,opt,name=start_doc_id" json:"start_doc_id,omitempty"`
+ IncludeStartDoc *bool `protobuf:"varint,3,opt,name=include_start_doc,def=1" json:"include_start_doc,omitempty"`
+ Limit *int32 `protobuf:"varint,4,opt,name=limit,def=100" json:"limit,omitempty"`
+ KeysOnly *bool `protobuf:"varint,5,opt,name=keys_only" json:"keys_only,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsParams) Reset() { *m = ListDocumentsParams{} }
+func (m *ListDocumentsParams) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsParams) ProtoMessage() {}
+
+const Default_ListDocumentsParams_IncludeStartDoc bool = true
+const Default_ListDocumentsParams_Limit int32 = 100
+
+func (m *ListDocumentsParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *ListDocumentsParams) GetStartDocId() string {
+ if m != nil && m.StartDocId != nil {
+ return *m.StartDocId
+ }
+ return ""
+}
+
+func (m *ListDocumentsParams) GetIncludeStartDoc() bool {
+ if m != nil && m.IncludeStartDoc != nil {
+ return *m.IncludeStartDoc
+ }
+ return Default_ListDocumentsParams_IncludeStartDoc
+}
+
+func (m *ListDocumentsParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListDocumentsParams_Limit
+}
+
+func (m *ListDocumentsParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+type ListDocumentsRequest struct {
+ Params *ListDocumentsParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,2,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsRequest) Reset() { *m = ListDocumentsRequest{} }
+func (m *ListDocumentsRequest) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsRequest) ProtoMessage() {}
+
+func (m *ListDocumentsRequest) GetParams() *ListDocumentsParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListDocumentsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListDocumentsResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ Document []*Document `protobuf:"bytes,2,rep,name=document" json:"document,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListDocumentsResponse) Reset() { *m = ListDocumentsResponse{} }
+func (m *ListDocumentsResponse) String() string { return proto.CompactTextString(m) }
+func (*ListDocumentsResponse) ProtoMessage() {}
+
+func (m *ListDocumentsResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListDocumentsResponse) GetDocument() []*Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+type ListIndexesParams struct {
+ FetchSchema *bool `protobuf:"varint,1,opt,name=fetch_schema" json:"fetch_schema,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=20" json:"limit,omitempty"`
+ Namespace *string `protobuf:"bytes,3,opt,name=namespace" json:"namespace,omitempty"`
+ StartIndexName *string `protobuf:"bytes,4,opt,name=start_index_name" json:"start_index_name,omitempty"`
+ IncludeStartIndex *bool `protobuf:"varint,5,opt,name=include_start_index,def=1" json:"include_start_index,omitempty"`
+ IndexNamePrefix *string `protobuf:"bytes,6,opt,name=index_name_prefix" json:"index_name_prefix,omitempty"`
+ Offset *int32 `protobuf:"varint,7,opt,name=offset" json:"offset,omitempty"`
+ Source *IndexSpec_Source `protobuf:"varint,8,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesParams) Reset() { *m = ListIndexesParams{} }
+func (m *ListIndexesParams) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesParams) ProtoMessage() {}
+
+const Default_ListIndexesParams_Limit int32 = 20
+const Default_ListIndexesParams_IncludeStartIndex bool = true
+const Default_ListIndexesParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *ListIndexesParams) GetFetchSchema() bool {
+ if m != nil && m.FetchSchema != nil {
+ return *m.FetchSchema
+ }
+ return false
+}
+
+func (m *ListIndexesParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ListIndexesParams_Limit
+}
+
+func (m *ListIndexesParams) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetStartIndexName() string {
+ if m != nil && m.StartIndexName != nil {
+ return *m.StartIndexName
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetIncludeStartIndex() bool {
+ if m != nil && m.IncludeStartIndex != nil {
+ return *m.IncludeStartIndex
+ }
+ return Default_ListIndexesParams_IncludeStartIndex
+}
+
+func (m *ListIndexesParams) GetIndexNamePrefix() string {
+ if m != nil && m.IndexNamePrefix != nil {
+ return *m.IndexNamePrefix
+ }
+ return ""
+}
+
+func (m *ListIndexesParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *ListIndexesParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_ListIndexesParams_Source
+}
+
+type ListIndexesRequest struct {
+ Params *ListIndexesParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesRequest) Reset() { *m = ListIndexesRequest{} }
+func (m *ListIndexesRequest) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesRequest) ProtoMessage() {}
+
+func (m *ListIndexesRequest) GetParams() *ListIndexesParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *ListIndexesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type ListIndexesResponse struct {
+ Status *RequestStatus `protobuf:"bytes,1,req,name=status" json:"status,omitempty"`
+ IndexMetadata []*IndexMetadata `protobuf:"bytes,2,rep,name=index_metadata" json:"index_metadata,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ListIndexesResponse) Reset() { *m = ListIndexesResponse{} }
+func (m *ListIndexesResponse) String() string { return proto.CompactTextString(m) }
+func (*ListIndexesResponse) ProtoMessage() {}
+
+func (m *ListIndexesResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *ListIndexesResponse) GetIndexMetadata() []*IndexMetadata {
+ if m != nil {
+ return m.IndexMetadata
+ }
+ return nil
+}
+
+type DeleteSchemaParams struct {
+ Source *IndexSpec_Source `protobuf:"varint,1,opt,name=source,enum=search.IndexSpec_Source,def=0" json:"source,omitempty"`
+ IndexSpec []*IndexSpec `protobuf:"bytes,2,rep,name=index_spec" json:"index_spec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaParams) Reset() { *m = DeleteSchemaParams{} }
+func (m *DeleteSchemaParams) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaParams) ProtoMessage() {}
+
+const Default_DeleteSchemaParams_Source IndexSpec_Source = IndexSpec_SEARCH
+
+func (m *DeleteSchemaParams) GetSource() IndexSpec_Source {
+ if m != nil && m.Source != nil {
+ return *m.Source
+ }
+ return Default_DeleteSchemaParams_Source
+}
+
+func (m *DeleteSchemaParams) GetIndexSpec() []*IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+type DeleteSchemaRequest struct {
+ Params *DeleteSchemaParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaRequest) Reset() { *m = DeleteSchemaRequest{} }
+func (m *DeleteSchemaRequest) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaRequest) ProtoMessage() {}
+
+func (m *DeleteSchemaRequest) GetParams() *DeleteSchemaParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *DeleteSchemaRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type DeleteSchemaResponse struct {
+ Status []*RequestStatus `protobuf:"bytes,1,rep,name=status" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *DeleteSchemaResponse) Reset() { *m = DeleteSchemaResponse{} }
+func (m *DeleteSchemaResponse) String() string { return proto.CompactTextString(m) }
+func (*DeleteSchemaResponse) ProtoMessage() {}
+
+func (m *DeleteSchemaResponse) GetStatus() []*RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type SortSpec struct {
+ SortExpression *string `protobuf:"bytes,1,req,name=sort_expression" json:"sort_expression,omitempty"`
+ SortDescending *bool `protobuf:"varint,2,opt,name=sort_descending,def=1" json:"sort_descending,omitempty"`
+ DefaultValueText *string `protobuf:"bytes,4,opt,name=default_value_text" json:"default_value_text,omitempty"`
+ DefaultValueNumeric *float64 `protobuf:"fixed64,5,opt,name=default_value_numeric" json:"default_value_numeric,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SortSpec) Reset() { *m = SortSpec{} }
+func (m *SortSpec) String() string { return proto.CompactTextString(m) }
+func (*SortSpec) ProtoMessage() {}
+
+const Default_SortSpec_SortDescending bool = true
+
+func (m *SortSpec) GetSortExpression() string {
+ if m != nil && m.SortExpression != nil {
+ return *m.SortExpression
+ }
+ return ""
+}
+
+func (m *SortSpec) GetSortDescending() bool {
+ if m != nil && m.SortDescending != nil {
+ return *m.SortDescending
+ }
+ return Default_SortSpec_SortDescending
+}
+
+func (m *SortSpec) GetDefaultValueText() string {
+ if m != nil && m.DefaultValueText != nil {
+ return *m.DefaultValueText
+ }
+ return ""
+}
+
+func (m *SortSpec) GetDefaultValueNumeric() float64 {
+ if m != nil && m.DefaultValueNumeric != nil {
+ return *m.DefaultValueNumeric
+ }
+ return 0
+}
+
+type ScorerSpec struct {
+ Scorer *ScorerSpec_Scorer `protobuf:"varint,1,opt,name=scorer,enum=search.ScorerSpec_Scorer,def=2" json:"scorer,omitempty"`
+ Limit *int32 `protobuf:"varint,2,opt,name=limit,def=1000" json:"limit,omitempty"`
+ MatchScorerParameters *string `protobuf:"bytes,9,opt,name=match_scorer_parameters" json:"match_scorer_parameters,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ScorerSpec) Reset() { *m = ScorerSpec{} }
+func (m *ScorerSpec) String() string { return proto.CompactTextString(m) }
+func (*ScorerSpec) ProtoMessage() {}
+
+const Default_ScorerSpec_Scorer ScorerSpec_Scorer = ScorerSpec_MATCH_SCORER
+const Default_ScorerSpec_Limit int32 = 1000
+
+func (m *ScorerSpec) GetScorer() ScorerSpec_Scorer {
+ if m != nil && m.Scorer != nil {
+ return *m.Scorer
+ }
+ return Default_ScorerSpec_Scorer
+}
+
+func (m *ScorerSpec) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_ScorerSpec_Limit
+}
+
+func (m *ScorerSpec) GetMatchScorerParameters() string {
+ if m != nil && m.MatchScorerParameters != nil {
+ return *m.MatchScorerParameters
+ }
+ return ""
+}
+
+type FieldSpec struct {
+ Name []string `protobuf:"bytes,1,rep,name=name" json:"name,omitempty"`
+ Expression []*FieldSpec_Expression `protobuf:"group,2,rep" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec) Reset() { *m = FieldSpec{} }
+func (m *FieldSpec) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec) ProtoMessage() {}
+
+func (m *FieldSpec) GetName() []string {
+ if m != nil {
+ return m.Name
+ }
+ return nil
+}
+
+func (m *FieldSpec) GetExpression() []*FieldSpec_Expression {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+type FieldSpec_Expression struct {
+ Name *string `protobuf:"bytes,3,req,name=name" json:"name,omitempty"`
+ Expression *string `protobuf:"bytes,4,req,name=expression" json:"expression,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FieldSpec_Expression) Reset() { *m = FieldSpec_Expression{} }
+func (m *FieldSpec_Expression) String() string { return proto.CompactTextString(m) }
+func (*FieldSpec_Expression) ProtoMessage() {}
+
+func (m *FieldSpec_Expression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FieldSpec_Expression) GetExpression() string {
+ if m != nil && m.Expression != nil {
+ return *m.Expression
+ }
+ return ""
+}
+
+type FacetRange struct {
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Start *string `protobuf:"bytes,2,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,3,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRange) Reset() { *m = FacetRange{} }
+func (m *FacetRange) String() string { return proto.CompactTextString(m) }
+func (*FacetRange) ProtoMessage() {}
+
+func (m *FacetRange) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRange) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRange) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type FacetRequestParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit" json:"value_limit,omitempty"`
+ Range []*FacetRange `protobuf:"bytes,2,rep,name=range" json:"range,omitempty"`
+ ValueConstraint []string `protobuf:"bytes,3,rep,name=value_constraint" json:"value_constraint,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequestParam) Reset() { *m = FacetRequestParam{} }
+func (m *FacetRequestParam) String() string { return proto.CompactTextString(m) }
+func (*FacetRequestParam) ProtoMessage() {}
+
+func (m *FacetRequestParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return 0
+}
+
+func (m *FacetRequestParam) GetRange() []*FacetRange {
+ if m != nil {
+ return m.Range
+ }
+ return nil
+}
+
+func (m *FacetRequestParam) GetValueConstraint() []string {
+ if m != nil {
+ return m.ValueConstraint
+ }
+ return nil
+}
+
+type FacetAutoDetectParam struct {
+ ValueLimit *int32 `protobuf:"varint,1,opt,name=value_limit,def=10" json:"value_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetAutoDetectParam) Reset() { *m = FacetAutoDetectParam{} }
+func (m *FacetAutoDetectParam) String() string { return proto.CompactTextString(m) }
+func (*FacetAutoDetectParam) ProtoMessage() {}
+
+const Default_FacetAutoDetectParam_ValueLimit int32 = 10
+
+func (m *FacetAutoDetectParam) GetValueLimit() int32 {
+ if m != nil && m.ValueLimit != nil {
+ return *m.ValueLimit
+ }
+ return Default_FacetAutoDetectParam_ValueLimit
+}
+
+type FacetRequest struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Params *FacetRequestParam `protobuf:"bytes,3,opt,name=params" json:"params,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRequest) Reset() { *m = FacetRequest{} }
+func (m *FacetRequest) String() string { return proto.CompactTextString(m) }
+func (*FacetRequest) ProtoMessage() {}
+
+func (m *FacetRequest) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRequest) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetRequest) GetParams() *FacetRequestParam {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+type FacetRefine struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Value *string `protobuf:"bytes,3,opt,name=value" json:"value,omitempty"`
+ Start *string `protobuf:"bytes,4,opt,name=start" json:"start,omitempty"`
+ End *string `protobuf:"bytes,5,opt,name=end" json:"end,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetRefine) Reset() { *m = FacetRefine{} }
+func (m *FacetRefine) String() string { return proto.CompactTextString(m) }
+func (*FacetRefine) ProtoMessage() {}
+
+func (m *FacetRefine) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetRefine) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetStart() string {
+ if m != nil && m.Start != nil {
+ return *m.Start
+ }
+ return ""
+}
+
+func (m *FacetRefine) GetEnd() string {
+ if m != nil && m.End != nil {
+ return *m.End
+ }
+ return ""
+}
+
+type SearchParams struct {
+ IndexSpec *IndexSpec `protobuf:"bytes,1,req,name=index_spec" json:"index_spec,omitempty"`
+ Query *string `protobuf:"bytes,2,req,name=query" json:"query,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ Offset *int32 `protobuf:"varint,11,opt,name=offset" json:"offset,omitempty"`
+ CursorType *SearchParams_CursorType `protobuf:"varint,5,opt,name=cursor_type,enum=search.SearchParams_CursorType,def=0" json:"cursor_type,omitempty"`
+ Limit *int32 `protobuf:"varint,6,opt,name=limit,def=20" json:"limit,omitempty"`
+ MatchedCountAccuracy *int32 `protobuf:"varint,7,opt,name=matched_count_accuracy" json:"matched_count_accuracy,omitempty"`
+ SortSpec []*SortSpec `protobuf:"bytes,8,rep,name=sort_spec" json:"sort_spec,omitempty"`
+ ScorerSpec *ScorerSpec `protobuf:"bytes,9,opt,name=scorer_spec" json:"scorer_spec,omitempty"`
+ FieldSpec *FieldSpec `protobuf:"bytes,10,opt,name=field_spec" json:"field_spec,omitempty"`
+ KeysOnly *bool `protobuf:"varint,12,opt,name=keys_only" json:"keys_only,omitempty"`
+ ParsingMode *SearchParams_ParsingMode `protobuf:"varint,13,opt,name=parsing_mode,enum=search.SearchParams_ParsingMode,def=0" json:"parsing_mode,omitempty"`
+ AutoDiscoverFacetCount *int32 `protobuf:"varint,15,opt,name=auto_discover_facet_count,def=0" json:"auto_discover_facet_count,omitempty"`
+ IncludeFacet []*FacetRequest `protobuf:"bytes,16,rep,name=include_facet" json:"include_facet,omitempty"`
+ FacetRefine []*FacetRefine `protobuf:"bytes,17,rep,name=facet_refine" json:"facet_refine,omitempty"`
+ FacetAutoDetectParam *FacetAutoDetectParam `protobuf:"bytes,18,opt,name=facet_auto_detect_param" json:"facet_auto_detect_param,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchParams) Reset() { *m = SearchParams{} }
+func (m *SearchParams) String() string { return proto.CompactTextString(m) }
+func (*SearchParams) ProtoMessage() {}
+
+const Default_SearchParams_CursorType SearchParams_CursorType = SearchParams_NONE
+const Default_SearchParams_Limit int32 = 20
+const Default_SearchParams_ParsingMode SearchParams_ParsingMode = SearchParams_STRICT
+const Default_SearchParams_AutoDiscoverFacetCount int32 = 0
+
+func (m *SearchParams) GetIndexSpec() *IndexSpec {
+ if m != nil {
+ return m.IndexSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetQuery() string {
+ if m != nil && m.Query != nil {
+ return *m.Query
+ }
+ return ""
+}
+
+func (m *SearchParams) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchParams) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return 0
+}
+
+func (m *SearchParams) GetCursorType() SearchParams_CursorType {
+ if m != nil && m.CursorType != nil {
+ return *m.CursorType
+ }
+ return Default_SearchParams_CursorType
+}
+
+func (m *SearchParams) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return Default_SearchParams_Limit
+}
+
+func (m *SearchParams) GetMatchedCountAccuracy() int32 {
+ if m != nil && m.MatchedCountAccuracy != nil {
+ return *m.MatchedCountAccuracy
+ }
+ return 0
+}
+
+func (m *SearchParams) GetSortSpec() []*SortSpec {
+ if m != nil {
+ return m.SortSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetScorerSpec() *ScorerSpec {
+ if m != nil {
+ return m.ScorerSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFieldSpec() *FieldSpec {
+ if m != nil {
+ return m.FieldSpec
+ }
+ return nil
+}
+
+func (m *SearchParams) GetKeysOnly() bool {
+ if m != nil && m.KeysOnly != nil {
+ return *m.KeysOnly
+ }
+ return false
+}
+
+func (m *SearchParams) GetParsingMode() SearchParams_ParsingMode {
+ if m != nil && m.ParsingMode != nil {
+ return *m.ParsingMode
+ }
+ return Default_SearchParams_ParsingMode
+}
+
+func (m *SearchParams) GetAutoDiscoverFacetCount() int32 {
+ if m != nil && m.AutoDiscoverFacetCount != nil {
+ return *m.AutoDiscoverFacetCount
+ }
+ return Default_SearchParams_AutoDiscoverFacetCount
+}
+
+func (m *SearchParams) GetIncludeFacet() []*FacetRequest {
+ if m != nil {
+ return m.IncludeFacet
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetRefine() []*FacetRefine {
+ if m != nil {
+ return m.FacetRefine
+ }
+ return nil
+}
+
+func (m *SearchParams) GetFacetAutoDetectParam() *FacetAutoDetectParam {
+ if m != nil {
+ return m.FacetAutoDetectParam
+ }
+ return nil
+}
+
+type SearchRequest struct {
+ Params *SearchParams `protobuf:"bytes,1,req,name=params" json:"params,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchRequest) Reset() { *m = SearchRequest{} }
+func (m *SearchRequest) String() string { return proto.CompactTextString(m) }
+func (*SearchRequest) ProtoMessage() {}
+
+func (m *SearchRequest) GetParams() *SearchParams {
+ if m != nil {
+ return m.Params
+ }
+ return nil
+}
+
+func (m *SearchRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type FacetResultValue struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Count *int32 `protobuf:"varint,2,req,name=count" json:"count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResultValue) Reset() { *m = FacetResultValue{} }
+func (m *FacetResultValue) String() string { return proto.CompactTextString(m) }
+func (*FacetResultValue) ProtoMessage() {}
+
+func (m *FacetResultValue) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResultValue) GetCount() int32 {
+ if m != nil && m.Count != nil {
+ return *m.Count
+ }
+ return 0
+}
+
+type FacetResult struct {
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ Type *FacetValue_ContentType `protobuf:"varint,2,req,name=type,enum=search.FacetValue_ContentType" json:"type,omitempty"`
+ Value []*FacetResultValue `protobuf:"bytes,3,rep,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *FacetResult) Reset() { *m = FacetResult{} }
+func (m *FacetResult) String() string { return proto.CompactTextString(m) }
+func (*FacetResult) ProtoMessage() {}
+
+func (m *FacetResult) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *FacetResult) GetType() FacetValue_ContentType {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return FacetValue_ATOM
+}
+
+func (m *FacetResult) GetValue() []*FacetResultValue {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type SearchResult struct {
+ Document *Document `protobuf:"bytes,1,req,name=document" json:"document,omitempty"`
+ Expression []*Field `protobuf:"bytes,4,rep,name=expression" json:"expression,omitempty"`
+ Score []float64 `protobuf:"fixed64,2,rep,name=score" json:"score,omitempty"`
+ Cursor *string `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResult) Reset() { *m = SearchResult{} }
+func (m *SearchResult) String() string { return proto.CompactTextString(m) }
+func (*SearchResult) ProtoMessage() {}
+
+func (m *SearchResult) GetDocument() *Document {
+ if m != nil {
+ return m.Document
+ }
+ return nil
+}
+
+func (m *SearchResult) GetExpression() []*Field {
+ if m != nil {
+ return m.Expression
+ }
+ return nil
+}
+
+func (m *SearchResult) GetScore() []float64 {
+ if m != nil {
+ return m.Score
+ }
+ return nil
+}
+
+func (m *SearchResult) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+type SearchResponse struct {
+ Result []*SearchResult `protobuf:"bytes,1,rep,name=result" json:"result,omitempty"`
+ MatchedCount *int64 `protobuf:"varint,2,req,name=matched_count" json:"matched_count,omitempty"`
+ Status *RequestStatus `protobuf:"bytes,3,req,name=status" json:"status,omitempty"`
+ Cursor *string `protobuf:"bytes,4,opt,name=cursor" json:"cursor,omitempty"`
+ FacetResult []*FacetResult `protobuf:"bytes,5,rep,name=facet_result" json:"facet_result,omitempty"`
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *SearchResponse) Reset() { *m = SearchResponse{} }
+func (m *SearchResponse) String() string { return proto.CompactTextString(m) }
+func (*SearchResponse) ProtoMessage() {}
+
+var extRange_SearchResponse = []proto.ExtensionRange{
+ {1000, 9999},
+}
+
+func (*SearchResponse) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_SearchResponse
+}
+func (m *SearchResponse) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+func (m *SearchResponse) GetResult() []*SearchResult {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetMatchedCount() int64 {
+ if m != nil && m.MatchedCount != nil {
+ return *m.MatchedCount
+ }
+ return 0
+}
+
+func (m *SearchResponse) GetStatus() *RequestStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+func (m *SearchResponse) GetCursor() string {
+ if m != nil && m.Cursor != nil {
+ return *m.Cursor
+ }
+ return ""
+}
+
+func (m *SearchResponse) GetFacetResult() []*FacetResult {
+ if m != nil {
+ return m.FacetResult
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("search.Scope_Type", Scope_Type_name, Scope_Type_value)
+ proto.RegisterEnum("search.Entry_Permission", Entry_Permission_name, Entry_Permission_value)
+ proto.RegisterEnum("search.FieldValue_ContentType", FieldValue_ContentType_name, FieldValue_ContentType_value)
+ proto.RegisterEnum("search.FacetValue_ContentType", FacetValue_ContentType_name, FacetValue_ContentType_value)
+ proto.RegisterEnum("search.Document_Storage", Document_Storage_name, Document_Storage_value)
+ proto.RegisterEnum("search.SearchServiceError_ErrorCode", SearchServiceError_ErrorCode_name, SearchServiceError_ErrorCode_value)
+ proto.RegisterEnum("search.IndexSpec_Consistency", IndexSpec_Consistency_name, IndexSpec_Consistency_value)
+ proto.RegisterEnum("search.IndexSpec_Source", IndexSpec_Source_name, IndexSpec_Source_value)
+ proto.RegisterEnum("search.IndexSpec_Mode", IndexSpec_Mode_name, IndexSpec_Mode_value)
+ proto.RegisterEnum("search.IndexDocumentParams_Freshness", IndexDocumentParams_Freshness_name, IndexDocumentParams_Freshness_value)
+ proto.RegisterEnum("search.ScorerSpec_Scorer", ScorerSpec_Scorer_name, ScorerSpec_Scorer_value)
+ proto.RegisterEnum("search.SearchParams_CursorType", SearchParams_CursorType_name, SearchParams_CursorType_value)
+ proto.RegisterEnum("search.SearchParams_ParsingMode", SearchParams_ParsingMode_name, SearchParams_ParsingMode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.proto
new file mode 100644
index 000000000000..629984137208
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/search/search.proto
@@ -0,0 +1,376 @@
+syntax = "proto2";
+option go_package = "search";
+
+package search;
+
+message Scope {
+ enum Type {
+ USER_BY_CANONICAL_ID = 1;
+ USER_BY_EMAIL = 2;
+ GROUP_BY_CANONICAL_ID = 3;
+ GROUP_BY_EMAIL = 4;
+ GROUP_BY_DOMAIN = 5;
+ ALL_USERS = 6;
+ ALL_AUTHENTICATED_USERS = 7;
+ }
+
+ optional Type type = 1;
+ optional string value = 2;
+}
+
+message Entry {
+ enum Permission {
+ READ = 1;
+ WRITE = 2;
+ FULL_CONTROL = 3;
+ }
+
+ optional Scope scope = 1;
+ optional Permission permission = 2;
+ optional string display_name = 3;
+}
+
+message AccessControlList {
+ optional string owner = 1;
+ repeated Entry entries = 2;
+}
+
+message FieldValue {
+ enum ContentType {
+ TEXT = 0;
+ HTML = 1;
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ GEO = 5;
+ }
+
+ optional ContentType type = 1 [default = TEXT];
+
+ optional string language = 2 [default = "en"];
+
+ optional string string_value = 3;
+
+ optional group Geo = 4 {
+ required double lat = 5;
+ required double lng = 6;
+ }
+}
+
+message Field {
+ required string name = 1;
+ required FieldValue value = 2;
+}
+
+message FieldTypes {
+ required string name = 1;
+ repeated FieldValue.ContentType type = 2;
+}
+
+message FacetValue {
+ enum ContentType {
+ ATOM = 2;
+ DATE = 3;
+ NUMBER = 4;
+ }
+
+ optional ContentType type = 1 [default = ATOM];
+ optional string string_value = 3;
+}
+
+message Facet {
+ required string name = 1;
+ required FacetValue value = 2;
+}
+
+
+message Document {
+ optional string id = 1;
+ optional string language = 2 [default = "en"];
+ repeated Field field = 3;
+ optional int32 order_id = 4;
+
+ enum Storage {
+ DISK = 0;
+ }
+
+ optional Storage storage = 5 [default = DISK];
+ optional AccessControlList acl = 6;
+ optional int64 version = 7;
+ repeated Facet facet = 8;
+}
+
+message SearchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_REQUEST = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ PERMISSION_DENIED = 4;
+ TIMEOUT = 5;
+ CONCURRENT_TRANSACTION = 6;
+ }
+}
+
+message RequestStatus {
+ required SearchServiceError.ErrorCode code = 1;
+ optional string error_detail = 2;
+}
+
+message IndexSpec {
+ required string name = 1;
+
+ enum Consistency {
+ GLOBAL = 0;
+ PER_DOCUMENT = 1;
+ }
+ optional Consistency consistency = 2 [default = PER_DOCUMENT];
+
+ optional string namespace = 3;
+ optional int32 version = 4;
+
+ enum Source {
+ SEARCH = 0;
+ DATASTORE = 1;
+ CLOUD_STORAGE = 2;
+ }
+ optional Source source = 5 [default = SEARCH];
+
+ enum Mode {
+ PRIORITY = 0;
+ BACKGROUND = 1;
+ }
+ optional Mode mode = 6 [default = PRIORITY];
+}
+
+message IndexMetadata {
+ required IndexSpec index_spec = 1;
+
+ repeated FieldTypes field = 2;
+
+ message Storage {
+ optional int64 amount_used = 1;
+ optional int64 limit = 2;
+ }
+ optional Storage storage = 3;
+}
+
+message IndexDocumentParams {
+ repeated Document document = 1;
+
+ enum Freshness {
+ SYNCHRONOUSLY = 0;
+ WHEN_CONVENIENT = 1;
+ }
+ optional Freshness freshness = 2 [default = SYNCHRONOUSLY, deprecated=true];
+
+ required IndexSpec index_spec = 3;
+}
+
+message IndexDocumentRequest {
+ required IndexDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message IndexDocumentResponse {
+ repeated RequestStatus status = 1;
+
+ repeated string doc_id = 2;
+}
+
+message DeleteDocumentParams {
+ repeated string doc_id = 1;
+
+ required IndexSpec index_spec = 2;
+}
+
+message DeleteDocumentRequest {
+ required DeleteDocumentParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteDocumentResponse {
+ repeated RequestStatus status = 1;
+}
+
+message ListDocumentsParams {
+ required IndexSpec index_spec = 1;
+ optional string start_doc_id = 2;
+ optional bool include_start_doc = 3 [default = true];
+ optional int32 limit = 4 [default = 100];
+ optional bool keys_only = 5;
+}
+
+message ListDocumentsRequest {
+ required ListDocumentsParams params = 1;
+
+ optional bytes app_id = 2;
+}
+
+message ListDocumentsResponse {
+ required RequestStatus status = 1;
+
+ repeated Document document = 2;
+}
+
+message ListIndexesParams {
+ optional bool fetch_schema = 1;
+ optional int32 limit = 2 [default = 20];
+ optional string namespace = 3;
+ optional string start_index_name = 4;
+ optional bool include_start_index = 5 [default = true];
+ optional string index_name_prefix = 6;
+ optional int32 offset = 7;
+ optional IndexSpec.Source source = 8 [default = SEARCH];
+}
+
+message ListIndexesRequest {
+ required ListIndexesParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message ListIndexesResponse {
+ required RequestStatus status = 1;
+ repeated IndexMetadata index_metadata = 2;
+}
+
+message DeleteSchemaParams {
+ optional IndexSpec.Source source = 1 [default = SEARCH];
+ repeated IndexSpec index_spec = 2;
+}
+
+message DeleteSchemaRequest {
+ required DeleteSchemaParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message DeleteSchemaResponse {
+ repeated RequestStatus status = 1;
+}
+
+message SortSpec {
+ required string sort_expression = 1;
+ optional bool sort_descending = 2 [default = true];
+ optional string default_value_text = 4;
+ optional double default_value_numeric = 5;
+}
+
+message ScorerSpec {
+ enum Scorer {
+ RESCORING_MATCH_SCORER = 0;
+ MATCH_SCORER = 2;
+ }
+ optional Scorer scorer = 1 [default = MATCH_SCORER];
+
+ optional int32 limit = 2 [default = 1000];
+ optional string match_scorer_parameters = 9;
+}
+
+message FieldSpec {
+ repeated string name = 1;
+
+ repeated group Expression = 2 {
+ required string name = 3;
+ required string expression = 4;
+ }
+}
+
+message FacetRange {
+ optional string name = 1;
+ optional string start = 2;
+ optional string end = 3;
+}
+
+message FacetRequestParam {
+ optional int32 value_limit = 1;
+ repeated FacetRange range = 2;
+ repeated string value_constraint = 3;
+}
+
+message FacetAutoDetectParam {
+ optional int32 value_limit = 1 [default = 10];
+}
+
+message FacetRequest {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ optional FacetRequestParam params = 3;
+}
+
+message FacetRefine {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ optional string value = 3;
+ optional string start = 4;
+ optional string end = 5;
+}
+
+message SearchParams {
+ required IndexSpec index_spec = 1;
+ required string query = 2;
+ optional string cursor = 4;
+ optional int32 offset = 11;
+
+ enum CursorType {
+ NONE = 0;
+ SINGLE = 1;
+ PER_RESULT = 2;
+ }
+ optional CursorType cursor_type = 5 [default = NONE];
+
+ optional int32 limit = 6 [default = 20];
+ optional int32 matched_count_accuracy = 7;
+ repeated SortSpec sort_spec = 8;
+ optional ScorerSpec scorer_spec = 9;
+ optional FieldSpec field_spec = 10;
+ optional bool keys_only = 12;
+
+ enum ParsingMode {
+ STRICT = 0;
+ RELAXED = 1;
+ }
+ optional ParsingMode parsing_mode = 13 [default = STRICT];
+
+ optional int32 auto_discover_facet_count = 15 [default = 0];
+ repeated FacetRequest include_facet = 16;
+ repeated FacetRefine facet_refine = 17;
+ optional FacetAutoDetectParam facet_auto_detect_param = 18;
+}
+
+message SearchRequest {
+ required SearchParams params = 1;
+
+ optional bytes app_id = 3;
+}
+
+message FacetResultValue {
+ required string name = 1;
+ required int32 count = 2;
+}
+
+message FacetResult {
+ required string name = 1;
+ required FacetValue.ContentType type = 2;
+ repeated FacetResultValue value = 3;
+}
+
+message SearchResult {
+ required Document document = 1;
+ repeated Field expression = 4;
+ repeated double score = 2;
+ optional string cursor = 3;
+}
+
+message SearchResponse {
+ repeated SearchResult result = 1;
+ required int64 matched_count = 2;
+ required RequestStatus status = 3;
+ optional string cursor = 4;
+ repeated FacetResult facet_result = 5;
+
+ extensions 1000 to 9999;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
new file mode 100644
index 000000000000..1da0ae3ae24d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.pb.go
@@ -0,0 +1,1890 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+// DO NOT EDIT!
+
+/*
+Package taskqueue is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
+
+It has these top-level messages:
+ TaskQueueServiceError
+ TaskPayload
+ TaskQueueRetryParameters
+ TaskQueueAcl
+ TaskQueueHttpHeader
+ TaskQueueMode
+ TaskQueueAddRequest
+ TaskQueueAddResponse
+ TaskQueueBulkAddRequest
+ TaskQueueBulkAddResponse
+ TaskQueueDeleteRequest
+ TaskQueueDeleteResponse
+ TaskQueueForceRunRequest
+ TaskQueueForceRunResponse
+ TaskQueueUpdateQueueRequest
+ TaskQueueUpdateQueueResponse
+ TaskQueueFetchQueuesRequest
+ TaskQueueFetchQueuesResponse
+ TaskQueueFetchQueueStatsRequest
+ TaskQueueScannerQueueInfo
+ TaskQueueFetchQueueStatsResponse
+ TaskQueuePauseQueueRequest
+ TaskQueuePauseQueueResponse
+ TaskQueuePurgeQueueRequest
+ TaskQueuePurgeQueueResponse
+ TaskQueueDeleteQueueRequest
+ TaskQueueDeleteQueueResponse
+ TaskQueueDeleteGroupRequest
+ TaskQueueDeleteGroupResponse
+ TaskQueueQueryTasksRequest
+ TaskQueueQueryTasksResponse
+ TaskQueueFetchTaskRequest
+ TaskQueueFetchTaskResponse
+ TaskQueueUpdateStorageLimitRequest
+ TaskQueueUpdateStorageLimitResponse
+ TaskQueueQueryAndOwnTasksRequest
+ TaskQueueQueryAndOwnTasksResponse
+ TaskQueueModifyTaskLeaseRequest
+ TaskQueueModifyTaskLeaseResponse
+*/
+package taskqueue
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+import appengine "google.golang.org/appengine/internal/datastore"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type TaskQueueServiceError_ErrorCode int32
+
+const (
+ TaskQueueServiceError_OK TaskQueueServiceError_ErrorCode = 0
+ TaskQueueServiceError_UNKNOWN_QUEUE TaskQueueServiceError_ErrorCode = 1
+ TaskQueueServiceError_TRANSIENT_ERROR TaskQueueServiceError_ErrorCode = 2
+ TaskQueueServiceError_INTERNAL_ERROR TaskQueueServiceError_ErrorCode = 3
+ TaskQueueServiceError_TASK_TOO_LARGE TaskQueueServiceError_ErrorCode = 4
+ TaskQueueServiceError_INVALID_TASK_NAME TaskQueueServiceError_ErrorCode = 5
+ TaskQueueServiceError_INVALID_QUEUE_NAME TaskQueueServiceError_ErrorCode = 6
+ TaskQueueServiceError_INVALID_URL TaskQueueServiceError_ErrorCode = 7
+ TaskQueueServiceError_INVALID_QUEUE_RATE TaskQueueServiceError_ErrorCode = 8
+ TaskQueueServiceError_PERMISSION_DENIED TaskQueueServiceError_ErrorCode = 9
+ TaskQueueServiceError_TASK_ALREADY_EXISTS TaskQueueServiceError_ErrorCode = 10
+ TaskQueueServiceError_TOMBSTONED_TASK TaskQueueServiceError_ErrorCode = 11
+ TaskQueueServiceError_INVALID_ETA TaskQueueServiceError_ErrorCode = 12
+ TaskQueueServiceError_INVALID_REQUEST TaskQueueServiceError_ErrorCode = 13
+ TaskQueueServiceError_UNKNOWN_TASK TaskQueueServiceError_ErrorCode = 14
+ TaskQueueServiceError_TOMBSTONED_QUEUE TaskQueueServiceError_ErrorCode = 15
+ TaskQueueServiceError_DUPLICATE_TASK_NAME TaskQueueServiceError_ErrorCode = 16
+ TaskQueueServiceError_SKIPPED TaskQueueServiceError_ErrorCode = 17
+ TaskQueueServiceError_TOO_MANY_TASKS TaskQueueServiceError_ErrorCode = 18
+ TaskQueueServiceError_INVALID_PAYLOAD TaskQueueServiceError_ErrorCode = 19
+ TaskQueueServiceError_INVALID_RETRY_PARAMETERS TaskQueueServiceError_ErrorCode = 20
+ TaskQueueServiceError_INVALID_QUEUE_MODE TaskQueueServiceError_ErrorCode = 21
+ TaskQueueServiceError_ACL_LOOKUP_ERROR TaskQueueServiceError_ErrorCode = 22
+ TaskQueueServiceError_TRANSACTIONAL_REQUEST_TOO_LARGE TaskQueueServiceError_ErrorCode = 23
+ TaskQueueServiceError_INCORRECT_CREATOR_NAME TaskQueueServiceError_ErrorCode = 24
+ TaskQueueServiceError_TASK_LEASE_EXPIRED TaskQueueServiceError_ErrorCode = 25
+ TaskQueueServiceError_QUEUE_PAUSED TaskQueueServiceError_ErrorCode = 26
+ TaskQueueServiceError_INVALID_TAG TaskQueueServiceError_ErrorCode = 27
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ TaskQueueServiceError_DATASTORE_ERROR TaskQueueServiceError_ErrorCode = 10000
+)
+
+var TaskQueueServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "UNKNOWN_QUEUE",
+ 2: "TRANSIENT_ERROR",
+ 3: "INTERNAL_ERROR",
+ 4: "TASK_TOO_LARGE",
+ 5: "INVALID_TASK_NAME",
+ 6: "INVALID_QUEUE_NAME",
+ 7: "INVALID_URL",
+ 8: "INVALID_QUEUE_RATE",
+ 9: "PERMISSION_DENIED",
+ 10: "TASK_ALREADY_EXISTS",
+ 11: "TOMBSTONED_TASK",
+ 12: "INVALID_ETA",
+ 13: "INVALID_REQUEST",
+ 14: "UNKNOWN_TASK",
+ 15: "TOMBSTONED_QUEUE",
+ 16: "DUPLICATE_TASK_NAME",
+ 17: "SKIPPED",
+ 18: "TOO_MANY_TASKS",
+ 19: "INVALID_PAYLOAD",
+ 20: "INVALID_RETRY_PARAMETERS",
+ 21: "INVALID_QUEUE_MODE",
+ 22: "ACL_LOOKUP_ERROR",
+ 23: "TRANSACTIONAL_REQUEST_TOO_LARGE",
+ 24: "INCORRECT_CREATOR_NAME",
+ 25: "TASK_LEASE_EXPIRED",
+ 26: "QUEUE_PAUSED",
+ 27: "INVALID_TAG",
+ 10000: "DATASTORE_ERROR",
+}
+var TaskQueueServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "UNKNOWN_QUEUE": 1,
+ "TRANSIENT_ERROR": 2,
+ "INTERNAL_ERROR": 3,
+ "TASK_TOO_LARGE": 4,
+ "INVALID_TASK_NAME": 5,
+ "INVALID_QUEUE_NAME": 6,
+ "INVALID_URL": 7,
+ "INVALID_QUEUE_RATE": 8,
+ "PERMISSION_DENIED": 9,
+ "TASK_ALREADY_EXISTS": 10,
+ "TOMBSTONED_TASK": 11,
+ "INVALID_ETA": 12,
+ "INVALID_REQUEST": 13,
+ "UNKNOWN_TASK": 14,
+ "TOMBSTONED_QUEUE": 15,
+ "DUPLICATE_TASK_NAME": 16,
+ "SKIPPED": 17,
+ "TOO_MANY_TASKS": 18,
+ "INVALID_PAYLOAD": 19,
+ "INVALID_RETRY_PARAMETERS": 20,
+ "INVALID_QUEUE_MODE": 21,
+ "ACL_LOOKUP_ERROR": 22,
+ "TRANSACTIONAL_REQUEST_TOO_LARGE": 23,
+ "INCORRECT_CREATOR_NAME": 24,
+ "TASK_LEASE_EXPIRED": 25,
+ "QUEUE_PAUSED": 26,
+ "INVALID_TAG": 27,
+ "DATASTORE_ERROR": 10000,
+}
+
+func (x TaskQueueServiceError_ErrorCode) Enum() *TaskQueueServiceError_ErrorCode {
+ p := new(TaskQueueServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x TaskQueueServiceError_ErrorCode) String() string {
+ return proto.EnumName(TaskQueueServiceError_ErrorCode_name, int32(x))
+}
+func (x *TaskQueueServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueServiceError_ErrorCode_value, data, "TaskQueueServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueServiceError_ErrorCode(value)
+ return nil
+}
+
+type TaskQueueMode_Mode int32
+
+const (
+ TaskQueueMode_PUSH TaskQueueMode_Mode = 0
+ TaskQueueMode_PULL TaskQueueMode_Mode = 1
+)
+
+var TaskQueueMode_Mode_name = map[int32]string{
+ 0: "PUSH",
+ 1: "PULL",
+}
+var TaskQueueMode_Mode_value = map[string]int32{
+ "PUSH": 0,
+ "PULL": 1,
+}
+
+func (x TaskQueueMode_Mode) Enum() *TaskQueueMode_Mode {
+ p := new(TaskQueueMode_Mode)
+ *p = x
+ return p
+}
+func (x TaskQueueMode_Mode) String() string {
+ return proto.EnumName(TaskQueueMode_Mode_name, int32(x))
+}
+func (x *TaskQueueMode_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueMode_Mode_value, data, "TaskQueueMode_Mode")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueMode_Mode(value)
+ return nil
+}
+
+type TaskQueueAddRequest_RequestMethod int32
+
+const (
+ TaskQueueAddRequest_GET TaskQueueAddRequest_RequestMethod = 1
+ TaskQueueAddRequest_POST TaskQueueAddRequest_RequestMethod = 2
+ TaskQueueAddRequest_HEAD TaskQueueAddRequest_RequestMethod = 3
+ TaskQueueAddRequest_PUT TaskQueueAddRequest_RequestMethod = 4
+ TaskQueueAddRequest_DELETE TaskQueueAddRequest_RequestMethod = 5
+)
+
+var TaskQueueAddRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueAddRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueAddRequest_RequestMethod) Enum() *TaskQueueAddRequest_RequestMethod {
+ p := new(TaskQueueAddRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueAddRequest_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueAddRequest_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueAddRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueAddRequest_RequestMethod_value, data, "TaskQueueAddRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueAddRequest_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RequestMethod int32
+
+const (
+ TaskQueueQueryTasksResponse_Task_GET TaskQueueQueryTasksResponse_Task_RequestMethod = 1
+ TaskQueueQueryTasksResponse_Task_POST TaskQueueQueryTasksResponse_Task_RequestMethod = 2
+ TaskQueueQueryTasksResponse_Task_HEAD TaskQueueQueryTasksResponse_Task_RequestMethod = 3
+ TaskQueueQueryTasksResponse_Task_PUT TaskQueueQueryTasksResponse_Task_RequestMethod = 4
+ TaskQueueQueryTasksResponse_Task_DELETE TaskQueueQueryTasksResponse_Task_RequestMethod = 5
+)
+
+var TaskQueueQueryTasksResponse_Task_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+}
+var TaskQueueQueryTasksResponse_Task_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+}
+
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) Enum() *TaskQueueQueryTasksResponse_Task_RequestMethod {
+ p := new(TaskQueueQueryTasksResponse_Task_RequestMethod)
+ *p = x
+ return p
+}
+func (x TaskQueueQueryTasksResponse_Task_RequestMethod) String() string {
+ return proto.EnumName(TaskQueueQueryTasksResponse_Task_RequestMethod_name, int32(x))
+}
+func (x *TaskQueueQueryTasksResponse_Task_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(TaskQueueQueryTasksResponse_Task_RequestMethod_value, data, "TaskQueueQueryTasksResponse_Task_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = TaskQueueQueryTasksResponse_Task_RequestMethod(value)
+ return nil
+}
+
+type TaskQueueServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueServiceError) Reset() { *m = TaskQueueServiceError{} }
+func (m *TaskQueueServiceError) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueServiceError) ProtoMessage() {}
+
+type TaskPayload struct {
+ XXX_extensions map[int32]proto.Extension `json:"-"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskPayload) Reset() { *m = TaskPayload{} }
+func (m *TaskPayload) String() string { return proto.CompactTextString(m) }
+func (*TaskPayload) ProtoMessage() {}
+
+func (m *TaskPayload) Marshal() ([]byte, error) {
+ return proto.MarshalMessageSet(m.ExtensionMap())
+}
+func (m *TaskPayload) Unmarshal(buf []byte) error {
+ return proto.UnmarshalMessageSet(buf, m.ExtensionMap())
+}
+func (m *TaskPayload) MarshalJSON() ([]byte, error) {
+ return proto.MarshalMessageSetJSON(m.XXX_extensions)
+}
+func (m *TaskPayload) UnmarshalJSON(buf []byte) error {
+ return proto.UnmarshalMessageSetJSON(buf, m.XXX_extensions)
+}
+
+// ensure TaskPayload satisfies proto.Marshaler and proto.Unmarshaler
+var _ proto.Marshaler = (*TaskPayload)(nil)
+var _ proto.Unmarshaler = (*TaskPayload)(nil)
+
+var extRange_TaskPayload = []proto.ExtensionRange{
+ {10, 2147483646},
+}
+
+func (*TaskPayload) ExtensionRangeArray() []proto.ExtensionRange {
+ return extRange_TaskPayload
+}
+func (m *TaskPayload) ExtensionMap() map[int32]proto.Extension {
+ if m.XXX_extensions == nil {
+ m.XXX_extensions = make(map[int32]proto.Extension)
+ }
+ return m.XXX_extensions
+}
+
+type TaskQueueRetryParameters struct {
+ RetryLimit *int32 `protobuf:"varint,1,opt,name=retry_limit" json:"retry_limit,omitempty"`
+ AgeLimitSec *int64 `protobuf:"varint,2,opt,name=age_limit_sec" json:"age_limit_sec,omitempty"`
+ MinBackoffSec *float64 `protobuf:"fixed64,3,opt,name=min_backoff_sec,def=0.1" json:"min_backoff_sec,omitempty"`
+ MaxBackoffSec *float64 `protobuf:"fixed64,4,opt,name=max_backoff_sec,def=3600" json:"max_backoff_sec,omitempty"`
+ MaxDoublings *int32 `protobuf:"varint,5,opt,name=max_doublings,def=16" json:"max_doublings,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueRetryParameters) Reset() { *m = TaskQueueRetryParameters{} }
+func (m *TaskQueueRetryParameters) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueRetryParameters) ProtoMessage() {}
+
+const Default_TaskQueueRetryParameters_MinBackoffSec float64 = 0.1
+const Default_TaskQueueRetryParameters_MaxBackoffSec float64 = 3600
+const Default_TaskQueueRetryParameters_MaxDoublings int32 = 16
+
+func (m *TaskQueueRetryParameters) GetRetryLimit() int32 {
+ if m != nil && m.RetryLimit != nil {
+ return *m.RetryLimit
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetAgeLimitSec() int64 {
+ if m != nil && m.AgeLimitSec != nil {
+ return *m.AgeLimitSec
+ }
+ return 0
+}
+
+func (m *TaskQueueRetryParameters) GetMinBackoffSec() float64 {
+ if m != nil && m.MinBackoffSec != nil {
+ return *m.MinBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MinBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxBackoffSec() float64 {
+ if m != nil && m.MaxBackoffSec != nil {
+ return *m.MaxBackoffSec
+ }
+ return Default_TaskQueueRetryParameters_MaxBackoffSec
+}
+
+func (m *TaskQueueRetryParameters) GetMaxDoublings() int32 {
+ if m != nil && m.MaxDoublings != nil {
+ return *m.MaxDoublings
+ }
+ return Default_TaskQueueRetryParameters_MaxDoublings
+}
+
+type TaskQueueAcl struct {
+ UserEmail [][]byte `protobuf:"bytes,1,rep,name=user_email" json:"user_email,omitempty"`
+ WriterEmail [][]byte `protobuf:"bytes,2,rep,name=writer_email" json:"writer_email,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAcl) Reset() { *m = TaskQueueAcl{} }
+func (m *TaskQueueAcl) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAcl) ProtoMessage() {}
+
+func (m *TaskQueueAcl) GetUserEmail() [][]byte {
+ if m != nil {
+ return m.UserEmail
+ }
+ return nil
+}
+
+func (m *TaskQueueAcl) GetWriterEmail() [][]byte {
+ if m != nil {
+ return m.WriterEmail
+ }
+ return nil
+}
+
+type TaskQueueHttpHeader struct {
+ Key []byte `protobuf:"bytes,1,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,2,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueHttpHeader) Reset() { *m = TaskQueueHttpHeader{} }
+func (m *TaskQueueHttpHeader) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueHttpHeader) ProtoMessage() {}
+
+func (m *TaskQueueHttpHeader) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueHttpHeader) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueMode struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueMode) Reset() { *m = TaskQueueMode{} }
+func (m *TaskQueueMode) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueMode) ProtoMessage() {}
+
+type TaskQueueAddRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Method *TaskQueueAddRequest_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueAddRequest_RequestMethod,def=2" json:"method,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Header []*TaskQueueAddRequest_Header `protobuf:"group,6,rep" json:"header,omitempty"`
+ Body []byte `protobuf:"bytes,9,opt,name=body" json:"body,omitempty"`
+ Transaction *appengine.Transaction `protobuf:"bytes,10,opt,name=transaction" json:"transaction,omitempty"`
+ AppId []byte `protobuf:"bytes,11,opt,name=app_id" json:"app_id,omitempty"`
+ Crontimetable *TaskQueueAddRequest_CronTimetable `protobuf:"group,12,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Description []byte `protobuf:"bytes,15,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,16,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,17,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,18,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Tag []byte `protobuf:"bytes,19,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest) Reset() { *m = TaskQueueAddRequest{} }
+func (m *TaskQueueAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest) ProtoMessage() {}
+
+const Default_TaskQueueAddRequest_Method TaskQueueAddRequest_RequestMethod = TaskQueueAddRequest_POST
+const Default_TaskQueueAddRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueAddRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueAddRequest) GetMethod() TaskQueueAddRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return Default_TaskQueueAddRequest_Method
+}
+
+func (m *TaskQueueAddRequest) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetHeader() []*TaskQueueAddRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetTransaction() *appengine.Transaction {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetCrontimetable() *TaskQueueAddRequest_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueAddRequest_Mode
+}
+
+func (m *TaskQueueAddRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_Header struct {
+ Key []byte `protobuf:"bytes,7,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,8,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_Header) Reset() { *m = TaskQueueAddRequest_Header{} }
+func (m *TaskQueueAddRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_Header) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueAddRequest_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,13,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,14,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) Reset() { *m = TaskQueueAddRequest_CronTimetable{} }
+func (m *TaskQueueAddRequest_CronTimetable) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddRequest_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueAddRequest_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueAddResponse struct {
+ ChosenTaskName []byte `protobuf:"bytes,1,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueAddResponse) Reset() { *m = TaskQueueAddResponse{} }
+func (m *TaskQueueAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueAddResponse) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueBulkAddRequest struct {
+ AddRequest []*TaskQueueAddRequest `protobuf:"bytes,1,rep,name=add_request" json:"add_request,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddRequest) Reset() { *m = TaskQueueBulkAddRequest{} }
+func (m *TaskQueueBulkAddRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddRequest) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddRequest) GetAddRequest() []*TaskQueueAddRequest {
+ if m != nil {
+ return m.AddRequest
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse struct {
+ Taskresult []*TaskQueueBulkAddResponse_TaskResult `protobuf:"group,1,rep,name=TaskResult" json:"taskresult,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse) Reset() { *m = TaskQueueBulkAddResponse{} }
+func (m *TaskQueueBulkAddResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse) GetTaskresult() []*TaskQueueBulkAddResponse_TaskResult {
+ if m != nil {
+ return m.Taskresult
+ }
+ return nil
+}
+
+type TaskQueueBulkAddResponse_TaskResult struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,2,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ ChosenTaskName []byte `protobuf:"bytes,3,opt,name=chosen_task_name" json:"chosen_task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) Reset() { *m = TaskQueueBulkAddResponse_TaskResult{} }
+func (m *TaskQueueBulkAddResponse_TaskResult) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueBulkAddResponse_TaskResult) ProtoMessage() {}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+func (m *TaskQueueBulkAddResponse_TaskResult) GetChosenTaskName() []byte {
+ if m != nil {
+ return m.ChosenTaskName
+ }
+ return nil
+}
+
+type TaskQueueDeleteRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName [][]byte `protobuf:"bytes,2,rep,name=task_name" json:"task_name,omitempty"`
+ AppId []byte `protobuf:"bytes,3,opt,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteRequest) Reset() { *m = TaskQueueDeleteRequest{} }
+func (m *TaskQueueDeleteRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetTaskName() [][]byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteResponse struct {
+ Result []TaskQueueServiceError_ErrorCode `protobuf:"varint,3,rep,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteResponse) Reset() { *m = TaskQueueDeleteResponse{} }
+func (m *TaskQueueDeleteResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteResponse) ProtoMessage() {}
+
+func (m *TaskQueueDeleteResponse) GetResult() []TaskQueueServiceError_ErrorCode {
+ if m != nil {
+ return m.Result
+ }
+ return nil
+}
+
+type TaskQueueForceRunRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunRequest) Reset() { *m = TaskQueueForceRunRequest{} }
+func (m *TaskQueueForceRunRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunRequest) ProtoMessage() {}
+
+func (m *TaskQueueForceRunRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueForceRunRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueForceRunResponse struct {
+ Result *TaskQueueServiceError_ErrorCode `protobuf:"varint,3,req,name=result,enum=appengine.TaskQueueServiceError_ErrorCode" json:"result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueForceRunResponse) Reset() { *m = TaskQueueForceRunResponse{} }
+func (m *TaskQueueForceRunResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueForceRunResponse) ProtoMessage() {}
+
+func (m *TaskQueueForceRunResponse) GetResult() TaskQueueServiceError_ErrorCode {
+ if m != nil && m.Result != nil {
+ return *m.Result
+ }
+ return TaskQueueServiceError_OK
+}
+
+type TaskQueueUpdateQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *int32 `protobuf:"varint,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,6,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,7,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,8,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,9,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,10,rep,name=header_override" json:"header_override,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueRequest) Reset() { *m = TaskQueueUpdateQueueRequest{} }
+func (m *TaskQueueUpdateQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueRequest) ProtoMessage() {}
+
+const Default_TaskQueueUpdateQueueRequest_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+
+func (m *TaskQueueUpdateQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetBucketCapacity() int32 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueUpdateQueueRequest_Mode
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateQueueRequest) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+type TaskQueueUpdateQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateQueueResponse) Reset() { *m = TaskQueueUpdateQueueResponse{} }
+func (m *TaskQueueUpdateQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateQueueResponse) ProtoMessage() {}
+
+type TaskQueueFetchQueuesRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ MaxRows *int32 `protobuf:"varint,2,req,name=max_rows" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesRequest) Reset() { *m = TaskQueueFetchQueuesRequest{} }
+func (m *TaskQueueFetchQueuesRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return 0
+}
+
+type TaskQueueFetchQueuesResponse struct {
+ Queue []*TaskQueueFetchQueuesResponse_Queue `protobuf:"group,1,rep" json:"queue,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse) Reset() { *m = TaskQueueFetchQueuesResponse{} }
+func (m *TaskQueueFetchQueuesResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueuesResponse) GetQueue() []*TaskQueueFetchQueuesResponse_Queue {
+ if m != nil {
+ return m.Queue
+ }
+ return nil
+}
+
+type TaskQueueFetchQueuesResponse_Queue struct {
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ BucketRefillPerSecond *float64 `protobuf:"fixed64,3,req,name=bucket_refill_per_second" json:"bucket_refill_per_second,omitempty"`
+ BucketCapacity *float64 `protobuf:"fixed64,4,req,name=bucket_capacity" json:"bucket_capacity,omitempty"`
+ UserSpecifiedRate *string `protobuf:"bytes,5,opt,name=user_specified_rate" json:"user_specified_rate,omitempty"`
+ Paused *bool `protobuf:"varint,6,req,name=paused,def=0" json:"paused,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,7,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ MaxConcurrentRequests *int32 `protobuf:"varint,8,opt,name=max_concurrent_requests" json:"max_concurrent_requests,omitempty"`
+ Mode *TaskQueueMode_Mode `protobuf:"varint,9,opt,name=mode,enum=appengine.TaskQueueMode_Mode,def=0" json:"mode,omitempty"`
+ Acl *TaskQueueAcl `protobuf:"bytes,10,opt,name=acl" json:"acl,omitempty"`
+ HeaderOverride []*TaskQueueHttpHeader `protobuf:"bytes,11,rep,name=header_override" json:"header_override,omitempty"`
+ CreatorName *string `protobuf:"bytes,12,opt,name=creator_name,def=apphosting" json:"creator_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) Reset() { *m = TaskQueueFetchQueuesResponse_Queue{} }
+func (m *TaskQueueFetchQueuesResponse_Queue) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueuesResponse_Queue) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueuesResponse_Queue_Paused bool = false
+const Default_TaskQueueFetchQueuesResponse_Queue_Mode TaskQueueMode_Mode = TaskQueueMode_PUSH
+const Default_TaskQueueFetchQueuesResponse_Queue_CreatorName string = "apphosting"
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketRefillPerSecond() float64 {
+ if m != nil && m.BucketRefillPerSecond != nil {
+ return *m.BucketRefillPerSecond
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetBucketCapacity() float64 {
+ if m != nil && m.BucketCapacity != nil {
+ return *m.BucketCapacity
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetUserSpecifiedRate() string {
+ if m != nil && m.UserSpecifiedRate != nil {
+ return *m.UserSpecifiedRate
+ }
+ return ""
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetPaused() bool {
+ if m != nil && m.Paused != nil {
+ return *m.Paused
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Paused
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMaxConcurrentRequests() int32 {
+ if m != nil && m.MaxConcurrentRequests != nil {
+ return *m.MaxConcurrentRequests
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetMode() TaskQueueMode_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_Mode
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetAcl() *TaskQueueAcl {
+ if m != nil {
+ return m.Acl
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetHeaderOverride() []*TaskQueueHttpHeader {
+ if m != nil {
+ return m.HeaderOverride
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueuesResponse_Queue) GetCreatorName() string {
+ if m != nil && m.CreatorName != nil {
+ return *m.CreatorName
+ }
+ return Default_TaskQueueFetchQueuesResponse_Queue_CreatorName
+}
+
+type TaskQueueFetchQueueStatsRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName [][]byte `protobuf:"bytes,2,rep,name=queue_name" json:"queue_name,omitempty"`
+ MaxNumTasks *int32 `protobuf:"varint,3,opt,name=max_num_tasks,def=0" json:"max_num_tasks,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) Reset() { *m = TaskQueueFetchQueueStatsRequest{} }
+func (m *TaskQueueFetchQueueStatsRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsRequest) ProtoMessage() {}
+
+const Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks int32 = 0
+
+func (m *TaskQueueFetchQueueStatsRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetQueueName() [][]byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchQueueStatsRequest) GetMaxNumTasks() int32 {
+ if m != nil && m.MaxNumTasks != nil {
+ return *m.MaxNumTasks
+ }
+ return Default_TaskQueueFetchQueueStatsRequest_MaxNumTasks
+}
+
+type TaskQueueScannerQueueInfo struct {
+ ExecutedLastMinute *int64 `protobuf:"varint,1,req,name=executed_last_minute" json:"executed_last_minute,omitempty"`
+ ExecutedLastHour *int64 `protobuf:"varint,2,req,name=executed_last_hour" json:"executed_last_hour,omitempty"`
+ SamplingDurationSeconds *float64 `protobuf:"fixed64,3,req,name=sampling_duration_seconds" json:"sampling_duration_seconds,omitempty"`
+ RequestsInFlight *int32 `protobuf:"varint,4,opt,name=requests_in_flight" json:"requests_in_flight,omitempty"`
+ EnforcedRate *float64 `protobuf:"fixed64,5,opt,name=enforced_rate" json:"enforced_rate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueScannerQueueInfo) Reset() { *m = TaskQueueScannerQueueInfo{} }
+func (m *TaskQueueScannerQueueInfo) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueScannerQueueInfo) ProtoMessage() {}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastMinute() int64 {
+ if m != nil && m.ExecutedLastMinute != nil {
+ return *m.ExecutedLastMinute
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetExecutedLastHour() int64 {
+ if m != nil && m.ExecutedLastHour != nil {
+ return *m.ExecutedLastHour
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetSamplingDurationSeconds() float64 {
+ if m != nil && m.SamplingDurationSeconds != nil {
+ return *m.SamplingDurationSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetRequestsInFlight() int32 {
+ if m != nil && m.RequestsInFlight != nil {
+ return *m.RequestsInFlight
+ }
+ return 0
+}
+
+func (m *TaskQueueScannerQueueInfo) GetEnforcedRate() float64 {
+ if m != nil && m.EnforcedRate != nil {
+ return *m.EnforcedRate
+ }
+ return 0
+}
+
+type TaskQueueFetchQueueStatsResponse struct {
+ Queuestats []*TaskQueueFetchQueueStatsResponse_QueueStats `protobuf:"group,1,rep,name=QueueStats" json:"queuestats,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse) Reset() { *m = TaskQueueFetchQueueStatsResponse{} }
+func (m *TaskQueueFetchQueueStatsResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchQueueStatsResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse) GetQueuestats() []*TaskQueueFetchQueueStatsResponse_QueueStats {
+ if m != nil {
+ return m.Queuestats
+ }
+ return nil
+}
+
+type TaskQueueFetchQueueStatsResponse_QueueStats struct {
+ NumTasks *int32 `protobuf:"varint,2,req,name=num_tasks" json:"num_tasks,omitempty"`
+ OldestEtaUsec *int64 `protobuf:"varint,3,req,name=oldest_eta_usec" json:"oldest_eta_usec,omitempty"`
+ ScannerInfo *TaskQueueScannerQueueInfo `protobuf:"bytes,4,opt,name=scanner_info" json:"scanner_info,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) Reset() {
+ *m = TaskQueueFetchQueueStatsResponse_QueueStats{}
+}
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueFetchQueueStatsResponse_QueueStats) ProtoMessage() {}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetNumTasks() int32 {
+ if m != nil && m.NumTasks != nil {
+ return *m.NumTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetOldestEtaUsec() int64 {
+ if m != nil && m.OldestEtaUsec != nil {
+ return *m.OldestEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueFetchQueueStatsResponse_QueueStats) GetScannerInfo() *TaskQueueScannerQueueInfo {
+ if m != nil {
+ return m.ScannerInfo
+ }
+ return nil
+}
+
+type TaskQueuePauseQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ Pause *bool `protobuf:"varint,3,req,name=pause" json:"pause,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueRequest) Reset() { *m = TaskQueuePauseQueueRequest{} }
+func (m *TaskQueuePauseQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePauseQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueuePauseQueueRequest) GetPause() bool {
+ if m != nil && m.Pause != nil {
+ return *m.Pause
+ }
+ return false
+}
+
+type TaskQueuePauseQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePauseQueueResponse) Reset() { *m = TaskQueuePauseQueueResponse{} }
+func (m *TaskQueuePauseQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePauseQueueResponse) ProtoMessage() {}
+
+type TaskQueuePurgeQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueRequest) Reset() { *m = TaskQueuePurgeQueueRequest{} }
+func (m *TaskQueuePurgeQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueuePurgeQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueuePurgeQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueuePurgeQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueuePurgeQueueResponse) Reset() { *m = TaskQueuePurgeQueueResponse{} }
+func (m *TaskQueuePurgeQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueuePurgeQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteQueueRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueRequest) Reset() { *m = TaskQueueDeleteQueueRequest{} }
+func (m *TaskQueueDeleteQueueRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteQueueRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueDeleteQueueRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+type TaskQueueDeleteQueueResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteQueueResponse) Reset() { *m = TaskQueueDeleteQueueResponse{} }
+func (m *TaskQueueDeleteQueueResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteQueueResponse) ProtoMessage() {}
+
+type TaskQueueDeleteGroupRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupRequest) Reset() { *m = TaskQueueDeleteGroupRequest{} }
+func (m *TaskQueueDeleteGroupRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupRequest) ProtoMessage() {}
+
+func (m *TaskQueueDeleteGroupRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+type TaskQueueDeleteGroupResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueDeleteGroupResponse) Reset() { *m = TaskQueueDeleteGroupResponse{} }
+func (m *TaskQueueDeleteGroupResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueDeleteGroupResponse) ProtoMessage() {}
+
+type TaskQueueQueryTasksRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ StartTaskName []byte `protobuf:"bytes,3,opt,name=start_task_name" json:"start_task_name,omitempty"`
+ StartEtaUsec *int64 `protobuf:"varint,4,opt,name=start_eta_usec" json:"start_eta_usec,omitempty"`
+ StartTag []byte `protobuf:"bytes,6,opt,name=start_tag" json:"start_tag,omitempty"`
+ MaxRows *int32 `protobuf:"varint,5,opt,name=max_rows,def=1" json:"max_rows,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksRequest) Reset() { *m = TaskQueueQueryTasksRequest{} }
+func (m *TaskQueueQueryTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksRequest_MaxRows int32 = 1
+
+func (m *TaskQueueQueryTasksRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTaskName() []byte {
+ if m != nil {
+ return m.StartTaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartEtaUsec() int64 {
+ if m != nil && m.StartEtaUsec != nil {
+ return *m.StartEtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksRequest) GetStartTag() []byte {
+ if m != nil {
+ return m.StartTag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksRequest) GetMaxRows() int32 {
+ if m != nil && m.MaxRows != nil {
+ return *m.MaxRows
+ }
+ return Default_TaskQueueQueryTasksRequest_MaxRows
+}
+
+type TaskQueueQueryTasksResponse struct {
+ Task []*TaskQueueQueryTasksResponse_Task `protobuf:"group,1,rep" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse) Reset() { *m = TaskQueueQueryTasksResponse{} }
+func (m *TaskQueueQueryTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse) GetTask() []*TaskQueueQueryTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ Url []byte `protobuf:"bytes,4,opt,name=url" json:"url,omitempty"`
+ Method *TaskQueueQueryTasksResponse_Task_RequestMethod `protobuf:"varint,5,opt,name=method,enum=appengine.TaskQueueQueryTasksResponse_Task_RequestMethod" json:"method,omitempty"`
+ RetryCount *int32 `protobuf:"varint,6,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Header []*TaskQueueQueryTasksResponse_Task_Header `protobuf:"group,7,rep" json:"header,omitempty"`
+ BodySize *int32 `protobuf:"varint,10,opt,name=body_size" json:"body_size,omitempty"`
+ Body []byte `protobuf:"bytes,11,opt,name=body" json:"body,omitempty"`
+ CreationTimeUsec *int64 `protobuf:"varint,12,req,name=creation_time_usec" json:"creation_time_usec,omitempty"`
+ Crontimetable *TaskQueueQueryTasksResponse_Task_CronTimetable `protobuf:"group,13,opt,name=CronTimetable" json:"crontimetable,omitempty"`
+ Runlog *TaskQueueQueryTasksResponse_Task_RunLog `protobuf:"group,16,opt,name=RunLog" json:"runlog,omitempty"`
+ Description []byte `protobuf:"bytes,21,opt,name=description" json:"description,omitempty"`
+ Payload *TaskPayload `protobuf:"bytes,22,opt,name=payload" json:"payload,omitempty"`
+ RetryParameters *TaskQueueRetryParameters `protobuf:"bytes,23,opt,name=retry_parameters" json:"retry_parameters,omitempty"`
+ FirstTryUsec *int64 `protobuf:"varint,24,opt,name=first_try_usec" json:"first_try_usec,omitempty"`
+ Tag []byte `protobuf:"bytes,25,opt,name=tag" json:"tag,omitempty"`
+ ExecutionCount *int32 `protobuf:"varint,26,opt,name=execution_count,def=0" json:"execution_count,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) Reset() { *m = TaskQueueQueryTasksResponse_Task{} }
+func (m *TaskQueueQueryTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryTasksResponse_Task_RetryCount int32 = 0
+const Default_TaskQueueQueryTasksResponse_Task_ExecutionCount int32 = 0
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetUrl() []byte {
+ if m != nil {
+ return m.Url
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetMethod() TaskQueueQueryTasksResponse_Task_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return TaskQueueQueryTasksResponse_Task_GET
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetHeader() []*TaskQueueQueryTasksResponse_Task_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBodySize() int32 {
+ if m != nil && m.BodySize != nil {
+ return *m.BodySize
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCreationTimeUsec() int64 {
+ if m != nil && m.CreationTimeUsec != nil {
+ return *m.CreationTimeUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetCrontimetable() *TaskQueueQueryTasksResponse_Task_CronTimetable {
+ if m != nil {
+ return m.Crontimetable
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRunlog() *TaskQueueQueryTasksResponse_Task_RunLog {
+ if m != nil {
+ return m.Runlog
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetDescription() []byte {
+ if m != nil {
+ return m.Description
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetPayload() *TaskPayload {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetRetryParameters() *TaskQueueRetryParameters {
+ if m != nil {
+ return m.RetryParameters
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetFirstTryUsec() int64 {
+ if m != nil && m.FirstTryUsec != nil {
+ return *m.FirstTryUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task) GetExecutionCount() int32 {
+ if m != nil && m.ExecutionCount != nil {
+ return *m.ExecutionCount
+ }
+ return Default_TaskQueueQueryTasksResponse_Task_ExecutionCount
+}
+
+type TaskQueueQueryTasksResponse_Task_Header struct {
+ Key []byte `protobuf:"bytes,8,req,name=key" json:"key,omitempty"`
+ Value []byte `protobuf:"bytes,9,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_Header{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_Header) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_Header) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetKey() []byte {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_Header) GetValue() []byte {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_CronTimetable struct {
+ Schedule []byte `protobuf:"bytes,14,req,name=schedule" json:"schedule,omitempty"`
+ Timezone []byte `protobuf:"bytes,15,req,name=timezone" json:"timezone,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_CronTimetable{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) String() string {
+ return proto.CompactTextString(m)
+}
+func (*TaskQueueQueryTasksResponse_Task_CronTimetable) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetSchedule() []byte {
+ if m != nil {
+ return m.Schedule
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_CronTimetable) GetTimezone() []byte {
+ if m != nil {
+ return m.Timezone
+ }
+ return nil
+}
+
+type TaskQueueQueryTasksResponse_Task_RunLog struct {
+ DispatchedUsec *int64 `protobuf:"varint,17,req,name=dispatched_usec" json:"dispatched_usec,omitempty"`
+ LagUsec *int64 `protobuf:"varint,18,req,name=lag_usec" json:"lag_usec,omitempty"`
+ ElapsedUsec *int64 `protobuf:"varint,19,req,name=elapsed_usec" json:"elapsed_usec,omitempty"`
+ ResponseCode *int64 `protobuf:"varint,20,opt,name=response_code" json:"response_code,omitempty"`
+ RetryReason *string `protobuf:"bytes,27,opt,name=retry_reason" json:"retry_reason,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) Reset() {
+ *m = TaskQueueQueryTasksResponse_Task_RunLog{}
+}
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryTasksResponse_Task_RunLog) ProtoMessage() {}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetDispatchedUsec() int64 {
+ if m != nil && m.DispatchedUsec != nil {
+ return *m.DispatchedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetLagUsec() int64 {
+ if m != nil && m.LagUsec != nil {
+ return *m.LagUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetElapsedUsec() int64 {
+ if m != nil && m.ElapsedUsec != nil {
+ return *m.ElapsedUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetResponseCode() int64 {
+ if m != nil && m.ResponseCode != nil {
+ return *m.ResponseCode
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryTasksResponse_Task_RunLog) GetRetryReason() string {
+ if m != nil && m.RetryReason != nil {
+ return *m.RetryReason
+ }
+ return ""
+}
+
+type TaskQueueFetchTaskRequest struct {
+ AppId []byte `protobuf:"bytes,1,opt,name=app_id" json:"app_id,omitempty"`
+ QueueName []byte `protobuf:"bytes,2,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,3,req,name=task_name" json:"task_name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskRequest) Reset() { *m = TaskQueueFetchTaskRequest{} }
+func (m *TaskQueueFetchTaskRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskRequest) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueFetchTaskRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+type TaskQueueFetchTaskResponse struct {
+ Task *TaskQueueQueryTasksResponse `protobuf:"bytes,1,req,name=task" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueFetchTaskResponse) Reset() { *m = TaskQueueFetchTaskResponse{} }
+func (m *TaskQueueFetchTaskResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueFetchTaskResponse) ProtoMessage() {}
+
+func (m *TaskQueueFetchTaskResponse) GetTask() *TaskQueueQueryTasksResponse {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueUpdateStorageLimitRequest struct {
+ AppId []byte `protobuf:"bytes,1,req,name=app_id" json:"app_id,omitempty"`
+ Limit *int64 `protobuf:"varint,2,req,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) Reset() { *m = TaskQueueUpdateStorageLimitRequest{} }
+func (m *TaskQueueUpdateStorageLimitRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitRequest) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetAppId() []byte {
+ if m != nil {
+ return m.AppId
+ }
+ return nil
+}
+
+func (m *TaskQueueUpdateStorageLimitRequest) GetLimit() int64 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+type TaskQueueUpdateStorageLimitResponse struct {
+ NewLimit *int64 `protobuf:"varint,1,req,name=new_limit" json:"new_limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueUpdateStorageLimitResponse) Reset() { *m = TaskQueueUpdateStorageLimitResponse{} }
+func (m *TaskQueueUpdateStorageLimitResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueUpdateStorageLimitResponse) ProtoMessage() {}
+
+func (m *TaskQueueUpdateStorageLimitResponse) GetNewLimit() int64 {
+ if m != nil && m.NewLimit != nil {
+ return *m.NewLimit
+ }
+ return 0
+}
+
+type TaskQueueQueryAndOwnTasksRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,2,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ MaxTasks *int64 `protobuf:"varint,3,req,name=max_tasks" json:"max_tasks,omitempty"`
+ GroupByTag *bool `protobuf:"varint,4,opt,name=group_by_tag,def=0" json:"group_by_tag,omitempty"`
+ Tag []byte `protobuf:"bytes,5,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) Reset() { *m = TaskQueueQueryAndOwnTasksRequest{} }
+func (m *TaskQueueQueryAndOwnTasksRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksRequest) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag bool = false
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetMaxTasks() int64 {
+ if m != nil && m.MaxTasks != nil {
+ return *m.MaxTasks
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetGroupByTag() bool {
+ if m != nil && m.GroupByTag != nil {
+ return *m.GroupByTag
+ }
+ return Default_TaskQueueQueryAndOwnTasksRequest_GroupByTag
+}
+
+func (m *TaskQueueQueryAndOwnTasksRequest) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse struct {
+ Task []*TaskQueueQueryAndOwnTasksResponse_Task `protobuf:"group,1,rep" json:"task,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) Reset() { *m = TaskQueueQueryAndOwnTasksResponse{} }
+func (m *TaskQueueQueryAndOwnTasksResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse) ProtoMessage() {}
+
+func (m *TaskQueueQueryAndOwnTasksResponse) GetTask() []*TaskQueueQueryAndOwnTasksResponse_Task {
+ if m != nil {
+ return m.Task
+ }
+ return nil
+}
+
+type TaskQueueQueryAndOwnTasksResponse_Task struct {
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ RetryCount *int32 `protobuf:"varint,4,opt,name=retry_count,def=0" json:"retry_count,omitempty"`
+ Body []byte `protobuf:"bytes,5,opt,name=body" json:"body,omitempty"`
+ Tag []byte `protobuf:"bytes,6,opt,name=tag" json:"tag,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) Reset() {
+ *m = TaskQueueQueryAndOwnTasksResponse_Task{}
+}
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueQueryAndOwnTasksResponse_Task) ProtoMessage() {}
+
+const Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount int32 = 0
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetRetryCount() int32 {
+ if m != nil && m.RetryCount != nil {
+ return *m.RetryCount
+ }
+ return Default_TaskQueueQueryAndOwnTasksResponse_Task_RetryCount
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetBody() []byte {
+ if m != nil {
+ return m.Body
+ }
+ return nil
+}
+
+func (m *TaskQueueQueryAndOwnTasksResponse_Task) GetTag() []byte {
+ if m != nil {
+ return m.Tag
+ }
+ return nil
+}
+
+type TaskQueueModifyTaskLeaseRequest struct {
+ QueueName []byte `protobuf:"bytes,1,req,name=queue_name" json:"queue_name,omitempty"`
+ TaskName []byte `protobuf:"bytes,2,req,name=task_name" json:"task_name,omitempty"`
+ EtaUsec *int64 `protobuf:"varint,3,req,name=eta_usec" json:"eta_usec,omitempty"`
+ LeaseSeconds *float64 `protobuf:"fixed64,4,req,name=lease_seconds" json:"lease_seconds,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) Reset() { *m = TaskQueueModifyTaskLeaseRequest{} }
+func (m *TaskQueueModifyTaskLeaseRequest) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseRequest) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetQueueName() []byte {
+ if m != nil {
+ return m.QueueName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetTaskName() []byte {
+ if m != nil {
+ return m.TaskName
+ }
+ return nil
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetEtaUsec() int64 {
+ if m != nil && m.EtaUsec != nil {
+ return *m.EtaUsec
+ }
+ return 0
+}
+
+func (m *TaskQueueModifyTaskLeaseRequest) GetLeaseSeconds() float64 {
+ if m != nil && m.LeaseSeconds != nil {
+ return *m.LeaseSeconds
+ }
+ return 0
+}
+
+type TaskQueueModifyTaskLeaseResponse struct {
+ UpdatedEtaUsec *int64 `protobuf:"varint,1,req,name=updated_eta_usec" json:"updated_eta_usec,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *TaskQueueModifyTaskLeaseResponse) Reset() { *m = TaskQueueModifyTaskLeaseResponse{} }
+func (m *TaskQueueModifyTaskLeaseResponse) String() string { return proto.CompactTextString(m) }
+func (*TaskQueueModifyTaskLeaseResponse) ProtoMessage() {}
+
+func (m *TaskQueueModifyTaskLeaseResponse) GetUpdatedEtaUsec() int64 {
+ if m != nil && m.UpdatedEtaUsec != nil {
+ return *m.UpdatedEtaUsec
+ }
+ return 0
+}
+
+func init() {
+ proto.RegisterEnum("appengine.TaskQueueServiceError_ErrorCode", TaskQueueServiceError_ErrorCode_name, TaskQueueServiceError_ErrorCode_value)
+ proto.RegisterEnum("appengine.TaskQueueMode_Mode", TaskQueueMode_Mode_name, TaskQueueMode_Mode_value)
+ proto.RegisterEnum("appengine.TaskQueueAddRequest_RequestMethod", TaskQueueAddRequest_RequestMethod_name, TaskQueueAddRequest_RequestMethod_value)
+ proto.RegisterEnum("appengine.TaskQueueQueryTasksResponse_Task_RequestMethod", TaskQueueQueryTasksResponse_Task_RequestMethod_name, TaskQueueQueryTasksResponse_Task_RequestMethod_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
new file mode 100644
index 000000000000..419aaf5702ab
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/taskqueue/taskqueue_service.proto
@@ -0,0 +1,342 @@
+syntax = "proto2";
+option go_package = "taskqueue";
+
+import "google.golang.org/appengine/internal/datastore/datastore_v3.proto";
+
+package appengine;
+
+message TaskQueueServiceError {
+ enum ErrorCode {
+ OK = 0;
+ UNKNOWN_QUEUE = 1;
+ TRANSIENT_ERROR = 2;
+ INTERNAL_ERROR = 3;
+ TASK_TOO_LARGE = 4;
+ INVALID_TASK_NAME = 5;
+ INVALID_QUEUE_NAME = 6;
+ INVALID_URL = 7;
+ INVALID_QUEUE_RATE = 8;
+ PERMISSION_DENIED = 9;
+ TASK_ALREADY_EXISTS = 10;
+ TOMBSTONED_TASK = 11;
+ INVALID_ETA = 12;
+ INVALID_REQUEST = 13;
+ UNKNOWN_TASK = 14;
+ TOMBSTONED_QUEUE = 15;
+ DUPLICATE_TASK_NAME = 16;
+ SKIPPED = 17;
+ TOO_MANY_TASKS = 18;
+ INVALID_PAYLOAD = 19;
+ INVALID_RETRY_PARAMETERS = 20;
+ INVALID_QUEUE_MODE = 21;
+ ACL_LOOKUP_ERROR = 22;
+ TRANSACTIONAL_REQUEST_TOO_LARGE = 23;
+ INCORRECT_CREATOR_NAME = 24;
+ TASK_LEASE_EXPIRED = 25;
+ QUEUE_PAUSED = 26;
+ INVALID_TAG = 27;
+
+ // Reserved range for the Datastore error codes.
+ // Original Datastore error code is shifted by DATASTORE_ERROR offset.
+ DATASTORE_ERROR = 10000;
+ }
+}
+
+message TaskPayload {
+ extensions 10 to max;
+ option message_set_wire_format = true;
+}
+
+message TaskQueueRetryParameters {
+ optional int32 retry_limit = 1;
+ optional int64 age_limit_sec = 2;
+
+ optional double min_backoff_sec = 3 [default = 0.1];
+ optional double max_backoff_sec = 4 [default = 3600];
+ optional int32 max_doublings = 5 [default = 16];
+}
+
+message TaskQueueAcl {
+ repeated bytes user_email = 1;
+ repeated bytes writer_email = 2;
+}
+
+message TaskQueueHttpHeader {
+ required bytes key = 1;
+ required bytes value = 2;
+}
+
+message TaskQueueMode {
+ enum Mode {
+ PUSH = 0;
+ PULL = 1;
+ }
+}
+
+message TaskQueueAddRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5 [default=POST];
+
+ optional bytes url = 4;
+
+ repeated group Header = 6 {
+ required bytes key = 7;
+ required bytes value = 8;
+ }
+
+ optional bytes body = 9 [ctype=CORD];
+ optional Transaction transaction = 10;
+ optional bytes app_id = 11;
+
+ optional group CronTimetable = 12 {
+ required bytes schedule = 13;
+ required bytes timezone = 14;
+ }
+
+ optional bytes description = 15;
+ optional TaskPayload payload = 16;
+ optional TaskQueueRetryParameters retry_parameters = 17;
+ optional TaskQueueMode.Mode mode = 18 [default=PUSH];
+ optional bytes tag = 19;
+}
+
+message TaskQueueAddResponse {
+ optional bytes chosen_task_name = 1;
+}
+
+message TaskQueueBulkAddRequest {
+ repeated TaskQueueAddRequest add_request = 1;
+}
+
+message TaskQueueBulkAddResponse {
+ repeated group TaskResult = 1 {
+ required TaskQueueServiceError.ErrorCode result = 2;
+ optional bytes chosen_task_name = 3;
+ }
+}
+
+message TaskQueueDeleteRequest {
+ required bytes queue_name = 1;
+ repeated bytes task_name = 2;
+ optional bytes app_id = 3;
+}
+
+message TaskQueueDeleteResponse {
+ repeated TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueForceRunRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueForceRunResponse {
+ required TaskQueueServiceError.ErrorCode result = 3;
+}
+
+message TaskQueueUpdateQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required int32 bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ optional TaskQueueRetryParameters retry_parameters = 6;
+ optional int32 max_concurrent_requests = 7;
+ optional TaskQueueMode.Mode mode = 8 [default = PUSH];
+ optional TaskQueueAcl acl = 9;
+ repeated TaskQueueHttpHeader header_override = 10;
+}
+
+message TaskQueueUpdateQueueResponse {
+}
+
+message TaskQueueFetchQueuesRequest {
+ optional bytes app_id = 1;
+ required int32 max_rows = 2;
+}
+
+message TaskQueueFetchQueuesResponse {
+ repeated group Queue = 1 {
+ required bytes queue_name = 2;
+ required double bucket_refill_per_second = 3;
+ required double bucket_capacity = 4;
+ optional string user_specified_rate = 5;
+ required bool paused = 6 [default=false];
+ optional TaskQueueRetryParameters retry_parameters = 7;
+ optional int32 max_concurrent_requests = 8;
+ optional TaskQueueMode.Mode mode = 9 [default = PUSH];
+ optional TaskQueueAcl acl = 10;
+ repeated TaskQueueHttpHeader header_override = 11;
+ optional string creator_name = 12 [ctype=CORD, default="apphosting"];
+ }
+}
+
+message TaskQueueFetchQueueStatsRequest {
+ optional bytes app_id = 1;
+ repeated bytes queue_name = 2;
+ optional int32 max_num_tasks = 3 [default = 0];
+}
+
+message TaskQueueScannerQueueInfo {
+ required int64 executed_last_minute = 1;
+ required int64 executed_last_hour = 2;
+ required double sampling_duration_seconds = 3;
+ optional int32 requests_in_flight = 4;
+ optional double enforced_rate = 5;
+}
+
+message TaskQueueFetchQueueStatsResponse {
+ repeated group QueueStats = 1 {
+ required int32 num_tasks = 2;
+ required int64 oldest_eta_usec = 3;
+ optional TaskQueueScannerQueueInfo scanner_info = 4;
+ }
+}
+message TaskQueuePauseQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bool pause = 3;
+}
+
+message TaskQueuePauseQueueResponse {
+}
+
+message TaskQueuePurgeQueueRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueuePurgeQueueResponse {
+}
+
+message TaskQueueDeleteQueueRequest {
+ required bytes app_id = 1;
+ required bytes queue_name = 2;
+}
+
+message TaskQueueDeleteQueueResponse {
+}
+
+message TaskQueueDeleteGroupRequest {
+ required bytes app_id = 1;
+}
+
+message TaskQueueDeleteGroupResponse {
+}
+
+message TaskQueueQueryTasksRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+
+ optional bytes start_task_name = 3;
+ optional int64 start_eta_usec = 4;
+ optional bytes start_tag = 6;
+ optional int32 max_rows = 5 [default = 1];
+}
+
+message TaskQueueQueryTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional bytes url = 4;
+
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ }
+ optional RequestMethod method = 5;
+
+ optional int32 retry_count = 6 [default=0];
+
+ repeated group Header = 7 {
+ required bytes key = 8;
+ required bytes value = 9;
+ }
+
+ optional int32 body_size = 10;
+ optional bytes body = 11 [ctype=CORD];
+ required int64 creation_time_usec = 12;
+
+ optional group CronTimetable = 13 {
+ required bytes schedule = 14;
+ required bytes timezone = 15;
+ }
+
+ optional group RunLog = 16 {
+ required int64 dispatched_usec = 17;
+ required int64 lag_usec = 18;
+ required int64 elapsed_usec = 19;
+ optional int64 response_code = 20;
+ optional string retry_reason = 27;
+ }
+
+ optional bytes description = 21;
+ optional TaskPayload payload = 22;
+ optional TaskQueueRetryParameters retry_parameters = 23;
+ optional int64 first_try_usec = 24;
+ optional bytes tag = 25;
+ optional int32 execution_count = 26 [default=0];
+ }
+}
+
+message TaskQueueFetchTaskRequest {
+ optional bytes app_id = 1;
+ required bytes queue_name = 2;
+ required bytes task_name = 3;
+}
+
+message TaskQueueFetchTaskResponse {
+ required TaskQueueQueryTasksResponse task = 1;
+}
+
+message TaskQueueUpdateStorageLimitRequest {
+ required bytes app_id = 1;
+ required int64 limit = 2;
+}
+
+message TaskQueueUpdateStorageLimitResponse {
+ required int64 new_limit = 1;
+}
+
+message TaskQueueQueryAndOwnTasksRequest {
+ required bytes queue_name = 1;
+ required double lease_seconds = 2;
+ required int64 max_tasks = 3;
+ optional bool group_by_tag = 4 [default=false];
+ optional bytes tag = 5;
+}
+
+message TaskQueueQueryAndOwnTasksResponse {
+ repeated group Task = 1 {
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ optional int32 retry_count = 4 [default=0];
+ optional bytes body = 5 [ctype=CORD];
+ optional bytes tag = 6;
+ }
+}
+
+message TaskQueueModifyTaskLeaseRequest {
+ required bytes queue_name = 1;
+ required bytes task_name = 2;
+ required int64 eta_usec = 3;
+ required double lease_seconds = 4;
+}
+
+message TaskQueueModifyTaskLeaseResponse {
+ required int64 updated_eta_usec = 1;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/transaction.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/transaction.go
new file mode 100644
index 000000000000..543f1c52900f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/transaction.go
@@ -0,0 +1,30 @@
+package internal
+
+// This file implements hooks for applying datastore transactions.
+
+import (
+ "reflect"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "google.golang.org/appengine/internal/datastore"
+)
+
+var transactionSetters = make(map[reflect.Type]reflect.Value)
+
+// RegisterTransactionSetter registers a function that sets transaction information
+// in a protocol buffer message. f should be a function with two arguments,
+// the first being a protocol buffer type, and the second being *datastore.Transaction.
+func RegisterTransactionSetter(f interface{}) {
+ v := reflect.ValueOf(f)
+ transactionSetters[v.Type().In(0)] = v
+}
+
+// ApplyTransaction applies the transaction t to message pb
+// by using the relevant setter passed to RegisterTransactionSetter.
+func ApplyTransaction(pb proto.Message, t *pb.Transaction) {
+ v := reflect.ValueOf(pb)
+ if f, ok := transactionSetters[v.Type()]; ok {
+ f.Call([]reflect.Value{v, reflect.ValueOf(t)})
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
new file mode 100644
index 000000000000..ef7fd26eca8f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.pb.go
@@ -0,0 +1,355 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+// DO NOT EDIT!
+
+/*
+Package urlfetch is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
+
+It has these top-level messages:
+ URLFetchServiceError
+ URLFetchRequest
+ URLFetchResponse
+*/
+package urlfetch
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type URLFetchServiceError_ErrorCode int32
+
+const (
+ URLFetchServiceError_OK URLFetchServiceError_ErrorCode = 0
+ URLFetchServiceError_INVALID_URL URLFetchServiceError_ErrorCode = 1
+ URLFetchServiceError_FETCH_ERROR URLFetchServiceError_ErrorCode = 2
+ URLFetchServiceError_UNSPECIFIED_ERROR URLFetchServiceError_ErrorCode = 3
+ URLFetchServiceError_RESPONSE_TOO_LARGE URLFetchServiceError_ErrorCode = 4
+ URLFetchServiceError_DEADLINE_EXCEEDED URLFetchServiceError_ErrorCode = 5
+ URLFetchServiceError_SSL_CERTIFICATE_ERROR URLFetchServiceError_ErrorCode = 6
+ URLFetchServiceError_DNS_ERROR URLFetchServiceError_ErrorCode = 7
+ URLFetchServiceError_CLOSED URLFetchServiceError_ErrorCode = 8
+ URLFetchServiceError_INTERNAL_TRANSIENT_ERROR URLFetchServiceError_ErrorCode = 9
+ URLFetchServiceError_TOO_MANY_REDIRECTS URLFetchServiceError_ErrorCode = 10
+ URLFetchServiceError_MALFORMED_REPLY URLFetchServiceError_ErrorCode = 11
+ URLFetchServiceError_CONNECTION_ERROR URLFetchServiceError_ErrorCode = 12
+)
+
+var URLFetchServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "INVALID_URL",
+ 2: "FETCH_ERROR",
+ 3: "UNSPECIFIED_ERROR",
+ 4: "RESPONSE_TOO_LARGE",
+ 5: "DEADLINE_EXCEEDED",
+ 6: "SSL_CERTIFICATE_ERROR",
+ 7: "DNS_ERROR",
+ 8: "CLOSED",
+ 9: "INTERNAL_TRANSIENT_ERROR",
+ 10: "TOO_MANY_REDIRECTS",
+ 11: "MALFORMED_REPLY",
+ 12: "CONNECTION_ERROR",
+}
+var URLFetchServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "INVALID_URL": 1,
+ "FETCH_ERROR": 2,
+ "UNSPECIFIED_ERROR": 3,
+ "RESPONSE_TOO_LARGE": 4,
+ "DEADLINE_EXCEEDED": 5,
+ "SSL_CERTIFICATE_ERROR": 6,
+ "DNS_ERROR": 7,
+ "CLOSED": 8,
+ "INTERNAL_TRANSIENT_ERROR": 9,
+ "TOO_MANY_REDIRECTS": 10,
+ "MALFORMED_REPLY": 11,
+ "CONNECTION_ERROR": 12,
+}
+
+func (x URLFetchServiceError_ErrorCode) Enum() *URLFetchServiceError_ErrorCode {
+ p := new(URLFetchServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x URLFetchServiceError_ErrorCode) String() string {
+ return proto.EnumName(URLFetchServiceError_ErrorCode_name, int32(x))
+}
+func (x *URLFetchServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchServiceError_ErrorCode_value, data, "URLFetchServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchServiceError_ErrorCode(value)
+ return nil
+}
+
+type URLFetchRequest_RequestMethod int32
+
+const (
+ URLFetchRequest_GET URLFetchRequest_RequestMethod = 1
+ URLFetchRequest_POST URLFetchRequest_RequestMethod = 2
+ URLFetchRequest_HEAD URLFetchRequest_RequestMethod = 3
+ URLFetchRequest_PUT URLFetchRequest_RequestMethod = 4
+ URLFetchRequest_DELETE URLFetchRequest_RequestMethod = 5
+ URLFetchRequest_PATCH URLFetchRequest_RequestMethod = 6
+)
+
+var URLFetchRequest_RequestMethod_name = map[int32]string{
+ 1: "GET",
+ 2: "POST",
+ 3: "HEAD",
+ 4: "PUT",
+ 5: "DELETE",
+ 6: "PATCH",
+}
+var URLFetchRequest_RequestMethod_value = map[string]int32{
+ "GET": 1,
+ "POST": 2,
+ "HEAD": 3,
+ "PUT": 4,
+ "DELETE": 5,
+ "PATCH": 6,
+}
+
+func (x URLFetchRequest_RequestMethod) Enum() *URLFetchRequest_RequestMethod {
+ p := new(URLFetchRequest_RequestMethod)
+ *p = x
+ return p
+}
+func (x URLFetchRequest_RequestMethod) String() string {
+ return proto.EnumName(URLFetchRequest_RequestMethod_name, int32(x))
+}
+func (x *URLFetchRequest_RequestMethod) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(URLFetchRequest_RequestMethod_value, data, "URLFetchRequest_RequestMethod")
+ if err != nil {
+ return err
+ }
+ *x = URLFetchRequest_RequestMethod(value)
+ return nil
+}
+
+type URLFetchServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchServiceError) Reset() { *m = URLFetchServiceError{} }
+func (m *URLFetchServiceError) String() string { return proto.CompactTextString(m) }
+func (*URLFetchServiceError) ProtoMessage() {}
+
+type URLFetchRequest struct {
+ Method *URLFetchRequest_RequestMethod `protobuf:"varint,1,req,enum=appengine.URLFetchRequest_RequestMethod" json:"Method,omitempty"`
+ Url *string `protobuf:"bytes,2,req" json:"Url,omitempty"`
+ Header []*URLFetchRequest_Header `protobuf:"group,3,rep" json:"header,omitempty"`
+ Payload []byte `protobuf:"bytes,6,opt" json:"Payload,omitempty"`
+ FollowRedirects *bool `protobuf:"varint,7,opt,def=1" json:"FollowRedirects,omitempty"`
+ Deadline *float64 `protobuf:"fixed64,8,opt" json:"Deadline,omitempty"`
+ MustValidateServerCertificate *bool `protobuf:"varint,9,opt,def=1" json:"MustValidateServerCertificate,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest) Reset() { *m = URLFetchRequest{} }
+func (m *URLFetchRequest) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest) ProtoMessage() {}
+
+const Default_URLFetchRequest_FollowRedirects bool = true
+const Default_URLFetchRequest_MustValidateServerCertificate bool = true
+
+func (m *URLFetchRequest) GetMethod() URLFetchRequest_RequestMethod {
+ if m != nil && m.Method != nil {
+ return *m.Method
+ }
+ return URLFetchRequest_GET
+}
+
+func (m *URLFetchRequest) GetUrl() string {
+ if m != nil && m.Url != nil {
+ return *m.Url
+ }
+ return ""
+}
+
+func (m *URLFetchRequest) GetHeader() []*URLFetchRequest_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetPayload() []byte {
+ if m != nil {
+ return m.Payload
+ }
+ return nil
+}
+
+func (m *URLFetchRequest) GetFollowRedirects() bool {
+ if m != nil && m.FollowRedirects != nil {
+ return *m.FollowRedirects
+ }
+ return Default_URLFetchRequest_FollowRedirects
+}
+
+func (m *URLFetchRequest) GetDeadline() float64 {
+ if m != nil && m.Deadline != nil {
+ return *m.Deadline
+ }
+ return 0
+}
+
+func (m *URLFetchRequest) GetMustValidateServerCertificate() bool {
+ if m != nil && m.MustValidateServerCertificate != nil {
+ return *m.MustValidateServerCertificate
+ }
+ return Default_URLFetchRequest_MustValidateServerCertificate
+}
+
+type URLFetchRequest_Header struct {
+ Key *string `protobuf:"bytes,4,req" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchRequest_Header) Reset() { *m = URLFetchRequest_Header{} }
+func (m *URLFetchRequest_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchRequest_Header) ProtoMessage() {}
+
+func (m *URLFetchRequest_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchRequest_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+type URLFetchResponse struct {
+ Content []byte `protobuf:"bytes,1,opt" json:"Content,omitempty"`
+ StatusCode *int32 `protobuf:"varint,2,req" json:"StatusCode,omitempty"`
+ Header []*URLFetchResponse_Header `protobuf:"group,3,rep" json:"header,omitempty"`
+ ContentWasTruncated *bool `protobuf:"varint,6,opt,def=0" json:"ContentWasTruncated,omitempty"`
+ ExternalBytesSent *int64 `protobuf:"varint,7,opt" json:"ExternalBytesSent,omitempty"`
+ ExternalBytesReceived *int64 `protobuf:"varint,8,opt" json:"ExternalBytesReceived,omitempty"`
+ FinalUrl *string `protobuf:"bytes,9,opt" json:"FinalUrl,omitempty"`
+ ApiCpuMilliseconds *int64 `protobuf:"varint,10,opt,def=0" json:"ApiCpuMilliseconds,omitempty"`
+ ApiBytesSent *int64 `protobuf:"varint,11,opt,def=0" json:"ApiBytesSent,omitempty"`
+ ApiBytesReceived *int64 `protobuf:"varint,12,opt,def=0" json:"ApiBytesReceived,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse) Reset() { *m = URLFetchResponse{} }
+func (m *URLFetchResponse) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse) ProtoMessage() {}
+
+const Default_URLFetchResponse_ContentWasTruncated bool = false
+const Default_URLFetchResponse_ApiCpuMilliseconds int64 = 0
+const Default_URLFetchResponse_ApiBytesSent int64 = 0
+const Default_URLFetchResponse_ApiBytesReceived int64 = 0
+
+func (m *URLFetchResponse) GetContent() []byte {
+ if m != nil {
+ return m.Content
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetStatusCode() int32 {
+ if m != nil && m.StatusCode != nil {
+ return *m.StatusCode
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetHeader() []*URLFetchResponse_Header {
+ if m != nil {
+ return m.Header
+ }
+ return nil
+}
+
+func (m *URLFetchResponse) GetContentWasTruncated() bool {
+ if m != nil && m.ContentWasTruncated != nil {
+ return *m.ContentWasTruncated
+ }
+ return Default_URLFetchResponse_ContentWasTruncated
+}
+
+func (m *URLFetchResponse) GetExternalBytesSent() int64 {
+ if m != nil && m.ExternalBytesSent != nil {
+ return *m.ExternalBytesSent
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetExternalBytesReceived() int64 {
+ if m != nil && m.ExternalBytesReceived != nil {
+ return *m.ExternalBytesReceived
+ }
+ return 0
+}
+
+func (m *URLFetchResponse) GetFinalUrl() string {
+ if m != nil && m.FinalUrl != nil {
+ return *m.FinalUrl
+ }
+ return ""
+}
+
+func (m *URLFetchResponse) GetApiCpuMilliseconds() int64 {
+ if m != nil && m.ApiCpuMilliseconds != nil {
+ return *m.ApiCpuMilliseconds
+ }
+ return Default_URLFetchResponse_ApiCpuMilliseconds
+}
+
+func (m *URLFetchResponse) GetApiBytesSent() int64 {
+ if m != nil && m.ApiBytesSent != nil {
+ return *m.ApiBytesSent
+ }
+ return Default_URLFetchResponse_ApiBytesSent
+}
+
+func (m *URLFetchResponse) GetApiBytesReceived() int64 {
+ if m != nil && m.ApiBytesReceived != nil {
+ return *m.ApiBytesReceived
+ }
+ return Default_URLFetchResponse_ApiBytesReceived
+}
+
+type URLFetchResponse_Header struct {
+ Key *string `protobuf:"bytes,4,req" json:"Key,omitempty"`
+ Value *string `protobuf:"bytes,5,req" json:"Value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *URLFetchResponse_Header) Reset() { *m = URLFetchResponse_Header{} }
+func (m *URLFetchResponse_Header) String() string { return proto.CompactTextString(m) }
+func (*URLFetchResponse_Header) ProtoMessage() {}
+
+func (m *URLFetchResponse_Header) GetKey() string {
+ if m != nil && m.Key != nil {
+ return *m.Key
+ }
+ return ""
+}
+
+func (m *URLFetchResponse_Header) GetValue() string {
+ if m != nil && m.Value != nil {
+ return *m.Value
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("appengine.URLFetchServiceError_ErrorCode", URLFetchServiceError_ErrorCode_name, URLFetchServiceError_ErrorCode_value)
+ proto.RegisterEnum("appengine.URLFetchRequest_RequestMethod", URLFetchRequest_RequestMethod_name, URLFetchRequest_RequestMethod_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
new file mode 100644
index 000000000000..f695edf6a907
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/urlfetch/urlfetch_service.proto
@@ -0,0 +1,64 @@
+syntax = "proto2";
+option go_package = "urlfetch";
+
+package appengine;
+
+message URLFetchServiceError {
+ enum ErrorCode {
+ OK = 0;
+ INVALID_URL = 1;
+ FETCH_ERROR = 2;
+ UNSPECIFIED_ERROR = 3;
+ RESPONSE_TOO_LARGE = 4;
+ DEADLINE_EXCEEDED = 5;
+ SSL_CERTIFICATE_ERROR = 6;
+ DNS_ERROR = 7;
+ CLOSED = 8;
+ INTERNAL_TRANSIENT_ERROR = 9;
+ TOO_MANY_REDIRECTS = 10;
+ MALFORMED_REPLY = 11;
+ CONNECTION_ERROR = 12;
+ }
+}
+
+message URLFetchRequest {
+ enum RequestMethod {
+ GET = 1;
+ POST = 2;
+ HEAD = 3;
+ PUT = 4;
+ DELETE = 5;
+ PATCH = 6;
+ }
+ required RequestMethod Method = 1;
+ required string Url = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bytes Payload = 6 [ctype=CORD];
+
+ optional bool FollowRedirects = 7 [default=true];
+
+ optional double Deadline = 8;
+
+ optional bool MustValidateServerCertificate = 9 [default=true];
+}
+
+message URLFetchResponse {
+ optional bytes Content = 1;
+ required int32 StatusCode = 2;
+ repeated group Header = 3 {
+ required string Key = 4;
+ required string Value = 5;
+ }
+ optional bool ContentWasTruncated = 6 [default=false];
+ optional int64 ExternalBytesSent = 7;
+ optional int64 ExternalBytesReceived = 8;
+
+ optional string FinalUrl = 9;
+
+ optional int64 ApiCpuMilliseconds = 10 [default=0];
+ optional int64 ApiBytesSent = 11 [default=0];
+ optional int64 ApiBytesReceived = 12 [default=0];
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.pb.go
new file mode 100644
index 000000000000..5d425accc0d9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.pb.go
@@ -0,0 +1,288 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/user/user_service.proto
+// DO NOT EDIT!
+
+/*
+Package user is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/user/user_service.proto
+
+It has these top-level messages:
+ UserServiceError
+ CreateLoginURLRequest
+ CreateLoginURLResponse
+ CreateLogoutURLRequest
+ CreateLogoutURLResponse
+ GetOAuthUserRequest
+ GetOAuthUserResponse
+ CheckOAuthSignatureRequest
+ CheckOAuthSignatureResponse
+*/
+package user
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type UserServiceError_ErrorCode int32
+
+const (
+ UserServiceError_OK UserServiceError_ErrorCode = 0
+ UserServiceError_REDIRECT_URL_TOO_LONG UserServiceError_ErrorCode = 1
+ UserServiceError_NOT_ALLOWED UserServiceError_ErrorCode = 2
+ UserServiceError_OAUTH_INVALID_TOKEN UserServiceError_ErrorCode = 3
+ UserServiceError_OAUTH_INVALID_REQUEST UserServiceError_ErrorCode = 4
+ UserServiceError_OAUTH_ERROR UserServiceError_ErrorCode = 5
+)
+
+var UserServiceError_ErrorCode_name = map[int32]string{
+ 0: "OK",
+ 1: "REDIRECT_URL_TOO_LONG",
+ 2: "NOT_ALLOWED",
+ 3: "OAUTH_INVALID_TOKEN",
+ 4: "OAUTH_INVALID_REQUEST",
+ 5: "OAUTH_ERROR",
+}
+var UserServiceError_ErrorCode_value = map[string]int32{
+ "OK": 0,
+ "REDIRECT_URL_TOO_LONG": 1,
+ "NOT_ALLOWED": 2,
+ "OAUTH_INVALID_TOKEN": 3,
+ "OAUTH_INVALID_REQUEST": 4,
+ "OAUTH_ERROR": 5,
+}
+
+func (x UserServiceError_ErrorCode) Enum() *UserServiceError_ErrorCode {
+ p := new(UserServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x UserServiceError_ErrorCode) String() string {
+ return proto.EnumName(UserServiceError_ErrorCode_name, int32(x))
+}
+func (x *UserServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(UserServiceError_ErrorCode_value, data, "UserServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = UserServiceError_ErrorCode(value)
+ return nil
+}
+
+type UserServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *UserServiceError) Reset() { *m = UserServiceError{} }
+func (m *UserServiceError) String() string { return proto.CompactTextString(m) }
+func (*UserServiceError) ProtoMessage() {}
+
+type CreateLoginURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ FederatedIdentity *string `protobuf:"bytes,3,opt,name=federated_identity,def=" json:"federated_identity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLRequest) Reset() { *m = CreateLoginURLRequest{} }
+func (m *CreateLoginURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLRequest) ProtoMessage() {}
+
+func (m *CreateLoginURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *CreateLoginURLRequest) GetFederatedIdentity() string {
+ if m != nil && m.FederatedIdentity != nil {
+ return *m.FederatedIdentity
+ }
+ return ""
+}
+
+type CreateLoginURLResponse struct {
+ LoginUrl *string `protobuf:"bytes,1,req,name=login_url" json:"login_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLoginURLResponse) Reset() { *m = CreateLoginURLResponse{} }
+func (m *CreateLoginURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLoginURLResponse) ProtoMessage() {}
+
+func (m *CreateLoginURLResponse) GetLoginUrl() string {
+ if m != nil && m.LoginUrl != nil {
+ return *m.LoginUrl
+ }
+ return ""
+}
+
+type CreateLogoutURLRequest struct {
+ DestinationUrl *string `protobuf:"bytes,1,req,name=destination_url" json:"destination_url,omitempty"`
+ AuthDomain *string `protobuf:"bytes,2,opt,name=auth_domain" json:"auth_domain,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLRequest) Reset() { *m = CreateLogoutURLRequest{} }
+func (m *CreateLogoutURLRequest) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLRequest) ProtoMessage() {}
+
+func (m *CreateLogoutURLRequest) GetDestinationUrl() string {
+ if m != nil && m.DestinationUrl != nil {
+ return *m.DestinationUrl
+ }
+ return ""
+}
+
+func (m *CreateLogoutURLRequest) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+type CreateLogoutURLResponse struct {
+ LogoutUrl *string `protobuf:"bytes,1,req,name=logout_url" json:"logout_url,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CreateLogoutURLResponse) Reset() { *m = CreateLogoutURLResponse{} }
+func (m *CreateLogoutURLResponse) String() string { return proto.CompactTextString(m) }
+func (*CreateLogoutURLResponse) ProtoMessage() {}
+
+func (m *CreateLogoutURLResponse) GetLogoutUrl() string {
+ if m != nil && m.LogoutUrl != nil {
+ return *m.LogoutUrl
+ }
+ return ""
+}
+
+type GetOAuthUserRequest struct {
+ Scope *string `protobuf:"bytes,1,opt,name=scope" json:"scope,omitempty"`
+ Scopes []string `protobuf:"bytes,2,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserRequest) Reset() { *m = GetOAuthUserRequest{} }
+func (m *GetOAuthUserRequest) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserRequest) ProtoMessage() {}
+
+func (m *GetOAuthUserRequest) GetScope() string {
+ if m != nil && m.Scope != nil {
+ return *m.Scope
+ }
+ return ""
+}
+
+func (m *GetOAuthUserRequest) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type GetOAuthUserResponse struct {
+ Email *string `protobuf:"bytes,1,req,name=email" json:"email,omitempty"`
+ UserId *string `protobuf:"bytes,2,req,name=user_id" json:"user_id,omitempty"`
+ AuthDomain *string `protobuf:"bytes,3,req,name=auth_domain" json:"auth_domain,omitempty"`
+ UserOrganization *string `protobuf:"bytes,4,opt,name=user_organization,def=" json:"user_organization,omitempty"`
+ IsAdmin *bool `protobuf:"varint,5,opt,name=is_admin,def=0" json:"is_admin,omitempty"`
+ ClientId *string `protobuf:"bytes,6,opt,name=client_id,def=" json:"client_id,omitempty"`
+ Scopes []string `protobuf:"bytes,7,rep,name=scopes" json:"scopes,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GetOAuthUserResponse) Reset() { *m = GetOAuthUserResponse{} }
+func (m *GetOAuthUserResponse) String() string { return proto.CompactTextString(m) }
+func (*GetOAuthUserResponse) ProtoMessage() {}
+
+const Default_GetOAuthUserResponse_IsAdmin bool = false
+
+func (m *GetOAuthUserResponse) GetEmail() string {
+ if m != nil && m.Email != nil {
+ return *m.Email
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserId() string {
+ if m != nil && m.UserId != nil {
+ return *m.UserId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetAuthDomain() string {
+ if m != nil && m.AuthDomain != nil {
+ return *m.AuthDomain
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetUserOrganization() string {
+ if m != nil && m.UserOrganization != nil {
+ return *m.UserOrganization
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetIsAdmin() bool {
+ if m != nil && m.IsAdmin != nil {
+ return *m.IsAdmin
+ }
+ return Default_GetOAuthUserResponse_IsAdmin
+}
+
+func (m *GetOAuthUserResponse) GetClientId() string {
+ if m != nil && m.ClientId != nil {
+ return *m.ClientId
+ }
+ return ""
+}
+
+func (m *GetOAuthUserResponse) GetScopes() []string {
+ if m != nil {
+ return m.Scopes
+ }
+ return nil
+}
+
+type CheckOAuthSignatureRequest struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureRequest) Reset() { *m = CheckOAuthSignatureRequest{} }
+func (m *CheckOAuthSignatureRequest) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureRequest) ProtoMessage() {}
+
+type CheckOAuthSignatureResponse struct {
+ OauthConsumerKey *string `protobuf:"bytes,1,req,name=oauth_consumer_key" json:"oauth_consumer_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CheckOAuthSignatureResponse) Reset() { *m = CheckOAuthSignatureResponse{} }
+func (m *CheckOAuthSignatureResponse) String() string { return proto.CompactTextString(m) }
+func (*CheckOAuthSignatureResponse) ProtoMessage() {}
+
+func (m *CheckOAuthSignatureResponse) GetOauthConsumerKey() string {
+ if m != nil && m.OauthConsumerKey != nil {
+ return *m.OauthConsumerKey
+ }
+ return ""
+}
+
+func init() {
+ proto.RegisterEnum("appengine.UserServiceError_ErrorCode", UserServiceError_ErrorCode_name, UserServiceError_ErrorCode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.proto
new file mode 100644
index 000000000000..f3e9693462cb
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/user/user_service.proto
@@ -0,0 +1,58 @@
+syntax = "proto2";
+option go_package = "user";
+
+package appengine;
+
+message UserServiceError {
+ enum ErrorCode {
+ OK = 0;
+ REDIRECT_URL_TOO_LONG = 1;
+ NOT_ALLOWED = 2;
+ OAUTH_INVALID_TOKEN = 3;
+ OAUTH_INVALID_REQUEST = 4;
+ OAUTH_ERROR = 5;
+ }
+}
+
+message CreateLoginURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+ optional string federated_identity = 3 [default = ""];
+}
+
+message CreateLoginURLResponse {
+ required string login_url = 1;
+}
+
+message CreateLogoutURLRequest {
+ required string destination_url = 1;
+ optional string auth_domain = 2;
+}
+
+message CreateLogoutURLResponse {
+ required string logout_url = 1;
+}
+
+message GetOAuthUserRequest {
+ optional string scope = 1;
+
+ repeated string scopes = 2;
+}
+
+message GetOAuthUserResponse {
+ required string email = 1;
+ required string user_id = 2;
+ required string auth_domain = 3;
+ optional string user_organization = 4 [default = ""];
+ optional bool is_admin = 5 [default = false];
+ optional string client_id = 6 [default = ""];
+
+ repeated string scopes = 7;
+}
+
+message CheckOAuthSignatureRequest {
+}
+
+message CheckOAuthSignatureResponse {
+ required string oauth_consumer_key = 1;
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go b/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
new file mode 100644
index 000000000000..db7f47aa7888
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.pb.go
@@ -0,0 +1,428 @@
+// Code generated by protoc-gen-go.
+// source: google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+// DO NOT EDIT!
+
+/*
+Package xmpp is a generated protocol buffer package.
+
+It is generated from these files:
+ google.golang.org/appengine/internal/xmpp/xmpp_service.proto
+
+It has these top-level messages:
+ XmppServiceError
+ PresenceRequest
+ PresenceResponse
+ BulkPresenceRequest
+ BulkPresenceResponse
+ XmppMessageRequest
+ XmppMessageResponse
+ XmppSendPresenceRequest
+ XmppSendPresenceResponse
+ XmppInviteRequest
+ XmppInviteResponse
+*/
+package xmpp
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+type XmppServiceError_ErrorCode int32
+
+const (
+ XmppServiceError_UNSPECIFIED_ERROR XmppServiceError_ErrorCode = 1
+ XmppServiceError_INVALID_JID XmppServiceError_ErrorCode = 2
+ XmppServiceError_NO_BODY XmppServiceError_ErrorCode = 3
+ XmppServiceError_INVALID_XML XmppServiceError_ErrorCode = 4
+ XmppServiceError_INVALID_TYPE XmppServiceError_ErrorCode = 5
+ XmppServiceError_INVALID_SHOW XmppServiceError_ErrorCode = 6
+ XmppServiceError_EXCEEDED_MAX_SIZE XmppServiceError_ErrorCode = 7
+ XmppServiceError_APPID_ALIAS_REQUIRED XmppServiceError_ErrorCode = 8
+ XmppServiceError_NONDEFAULT_MODULE XmppServiceError_ErrorCode = 9
+)
+
+var XmppServiceError_ErrorCode_name = map[int32]string{
+ 1: "UNSPECIFIED_ERROR",
+ 2: "INVALID_JID",
+ 3: "NO_BODY",
+ 4: "INVALID_XML",
+ 5: "INVALID_TYPE",
+ 6: "INVALID_SHOW",
+ 7: "EXCEEDED_MAX_SIZE",
+ 8: "APPID_ALIAS_REQUIRED",
+ 9: "NONDEFAULT_MODULE",
+}
+var XmppServiceError_ErrorCode_value = map[string]int32{
+ "UNSPECIFIED_ERROR": 1,
+ "INVALID_JID": 2,
+ "NO_BODY": 3,
+ "INVALID_XML": 4,
+ "INVALID_TYPE": 5,
+ "INVALID_SHOW": 6,
+ "EXCEEDED_MAX_SIZE": 7,
+ "APPID_ALIAS_REQUIRED": 8,
+ "NONDEFAULT_MODULE": 9,
+}
+
+func (x XmppServiceError_ErrorCode) Enum() *XmppServiceError_ErrorCode {
+ p := new(XmppServiceError_ErrorCode)
+ *p = x
+ return p
+}
+func (x XmppServiceError_ErrorCode) String() string {
+ return proto.EnumName(XmppServiceError_ErrorCode_name, int32(x))
+}
+func (x *XmppServiceError_ErrorCode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppServiceError_ErrorCode_value, data, "XmppServiceError_ErrorCode")
+ if err != nil {
+ return err
+ }
+ *x = XmppServiceError_ErrorCode(value)
+ return nil
+}
+
+type PresenceResponse_SHOW int32
+
+const (
+ PresenceResponse_NORMAL PresenceResponse_SHOW = 0
+ PresenceResponse_AWAY PresenceResponse_SHOW = 1
+ PresenceResponse_DO_NOT_DISTURB PresenceResponse_SHOW = 2
+ PresenceResponse_CHAT PresenceResponse_SHOW = 3
+ PresenceResponse_EXTENDED_AWAY PresenceResponse_SHOW = 4
+)
+
+var PresenceResponse_SHOW_name = map[int32]string{
+ 0: "NORMAL",
+ 1: "AWAY",
+ 2: "DO_NOT_DISTURB",
+ 3: "CHAT",
+ 4: "EXTENDED_AWAY",
+}
+var PresenceResponse_SHOW_value = map[string]int32{
+ "NORMAL": 0,
+ "AWAY": 1,
+ "DO_NOT_DISTURB": 2,
+ "CHAT": 3,
+ "EXTENDED_AWAY": 4,
+}
+
+func (x PresenceResponse_SHOW) Enum() *PresenceResponse_SHOW {
+ p := new(PresenceResponse_SHOW)
+ *p = x
+ return p
+}
+func (x PresenceResponse_SHOW) String() string {
+ return proto.EnumName(PresenceResponse_SHOW_name, int32(x))
+}
+func (x *PresenceResponse_SHOW) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PresenceResponse_SHOW_value, data, "PresenceResponse_SHOW")
+ if err != nil {
+ return err
+ }
+ *x = PresenceResponse_SHOW(value)
+ return nil
+}
+
+type XmppMessageResponse_XmppMessageStatus int32
+
+const (
+ XmppMessageResponse_NO_ERROR XmppMessageResponse_XmppMessageStatus = 0
+ XmppMessageResponse_INVALID_JID XmppMessageResponse_XmppMessageStatus = 1
+ XmppMessageResponse_OTHER_ERROR XmppMessageResponse_XmppMessageStatus = 2
+)
+
+var XmppMessageResponse_XmppMessageStatus_name = map[int32]string{
+ 0: "NO_ERROR",
+ 1: "INVALID_JID",
+ 2: "OTHER_ERROR",
+}
+var XmppMessageResponse_XmppMessageStatus_value = map[string]int32{
+ "NO_ERROR": 0,
+ "INVALID_JID": 1,
+ "OTHER_ERROR": 2,
+}
+
+func (x XmppMessageResponse_XmppMessageStatus) Enum() *XmppMessageResponse_XmppMessageStatus {
+ p := new(XmppMessageResponse_XmppMessageStatus)
+ *p = x
+ return p
+}
+func (x XmppMessageResponse_XmppMessageStatus) String() string {
+ return proto.EnumName(XmppMessageResponse_XmppMessageStatus_name, int32(x))
+}
+func (x *XmppMessageResponse_XmppMessageStatus) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(XmppMessageResponse_XmppMessageStatus_value, data, "XmppMessageResponse_XmppMessageStatus")
+ if err != nil {
+ return err
+ }
+ *x = XmppMessageResponse_XmppMessageStatus(value)
+ return nil
+}
+
+type XmppServiceError struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppServiceError) Reset() { *m = XmppServiceError{} }
+func (m *XmppServiceError) String() string { return proto.CompactTextString(m) }
+func (*XmppServiceError) ProtoMessage() {}
+
+type PresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceRequest) Reset() { *m = PresenceRequest{} }
+func (m *PresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*PresenceRequest) ProtoMessage() {}
+
+func (m *PresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *PresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type PresenceResponse struct {
+ IsAvailable *bool `protobuf:"varint,1,req,name=is_available" json:"is_available,omitempty"`
+ Presence *PresenceResponse_SHOW `protobuf:"varint,2,opt,name=presence,enum=appengine.PresenceResponse_SHOW" json:"presence,omitempty"`
+ Valid *bool `protobuf:"varint,3,opt,name=valid" json:"valid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PresenceResponse) Reset() { *m = PresenceResponse{} }
+func (m *PresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*PresenceResponse) ProtoMessage() {}
+
+func (m *PresenceResponse) GetIsAvailable() bool {
+ if m != nil && m.IsAvailable != nil {
+ return *m.IsAvailable
+ }
+ return false
+}
+
+func (m *PresenceResponse) GetPresence() PresenceResponse_SHOW {
+ if m != nil && m.Presence != nil {
+ return *m.Presence
+ }
+ return PresenceResponse_NORMAL
+}
+
+func (m *PresenceResponse) GetValid() bool {
+ if m != nil && m.Valid != nil {
+ return *m.Valid
+ }
+ return false
+}
+
+type BulkPresenceRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceRequest) Reset() { *m = BulkPresenceRequest{} }
+func (m *BulkPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceRequest) ProtoMessage() {}
+
+func (m *BulkPresenceRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *BulkPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type BulkPresenceResponse struct {
+ PresenceResponse []*PresenceResponse `protobuf:"bytes,1,rep,name=presence_response" json:"presence_response,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BulkPresenceResponse) Reset() { *m = BulkPresenceResponse{} }
+func (m *BulkPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*BulkPresenceResponse) ProtoMessage() {}
+
+func (m *BulkPresenceResponse) GetPresenceResponse() []*PresenceResponse {
+ if m != nil {
+ return m.PresenceResponse
+ }
+ return nil
+}
+
+type XmppMessageRequest struct {
+ Jid []string `protobuf:"bytes,1,rep,name=jid" json:"jid,omitempty"`
+ Body *string `protobuf:"bytes,2,req,name=body" json:"body,omitempty"`
+ RawXml *bool `protobuf:"varint,3,opt,name=raw_xml,def=0" json:"raw_xml,omitempty"`
+ Type *string `protobuf:"bytes,4,opt,name=type,def=chat" json:"type,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageRequest) Reset() { *m = XmppMessageRequest{} }
+func (m *XmppMessageRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageRequest) ProtoMessage() {}
+
+const Default_XmppMessageRequest_RawXml bool = false
+const Default_XmppMessageRequest_Type string = "chat"
+
+func (m *XmppMessageRequest) GetJid() []string {
+ if m != nil {
+ return m.Jid
+ }
+ return nil
+}
+
+func (m *XmppMessageRequest) GetBody() string {
+ if m != nil && m.Body != nil {
+ return *m.Body
+ }
+ return ""
+}
+
+func (m *XmppMessageRequest) GetRawXml() bool {
+ if m != nil && m.RawXml != nil {
+ return *m.RawXml
+ }
+ return Default_XmppMessageRequest_RawXml
+}
+
+func (m *XmppMessageRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return Default_XmppMessageRequest_Type
+}
+
+func (m *XmppMessageRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppMessageResponse struct {
+ Status []XmppMessageResponse_XmppMessageStatus `protobuf:"varint,1,rep,name=status,enum=appengine.XmppMessageResponse_XmppMessageStatus" json:"status,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppMessageResponse) Reset() { *m = XmppMessageResponse{} }
+func (m *XmppMessageResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppMessageResponse) ProtoMessage() {}
+
+func (m *XmppMessageResponse) GetStatus() []XmppMessageResponse_XmppMessageStatus {
+ if m != nil {
+ return m.Status
+ }
+ return nil
+}
+
+type XmppSendPresenceRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ Type *string `protobuf:"bytes,2,opt,name=type" json:"type,omitempty"`
+ Show *string `protobuf:"bytes,3,opt,name=show" json:"show,omitempty"`
+ Status *string `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"`
+ FromJid *string `protobuf:"bytes,5,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceRequest) Reset() { *m = XmppSendPresenceRequest{} }
+func (m *XmppSendPresenceRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceRequest) ProtoMessage() {}
+
+func (m *XmppSendPresenceRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetType() string {
+ if m != nil && m.Type != nil {
+ return *m.Type
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetShow() string {
+ if m != nil && m.Show != nil {
+ return *m.Show
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetStatus() string {
+ if m != nil && m.Status != nil {
+ return *m.Status
+ }
+ return ""
+}
+
+func (m *XmppSendPresenceRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppSendPresenceResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppSendPresenceResponse) Reset() { *m = XmppSendPresenceResponse{} }
+func (m *XmppSendPresenceResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppSendPresenceResponse) ProtoMessage() {}
+
+type XmppInviteRequest struct {
+ Jid *string `protobuf:"bytes,1,req,name=jid" json:"jid,omitempty"`
+ FromJid *string `protobuf:"bytes,2,opt,name=from_jid" json:"from_jid,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteRequest) Reset() { *m = XmppInviteRequest{} }
+func (m *XmppInviteRequest) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteRequest) ProtoMessage() {}
+
+func (m *XmppInviteRequest) GetJid() string {
+ if m != nil && m.Jid != nil {
+ return *m.Jid
+ }
+ return ""
+}
+
+func (m *XmppInviteRequest) GetFromJid() string {
+ if m != nil && m.FromJid != nil {
+ return *m.FromJid
+ }
+ return ""
+}
+
+type XmppInviteResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *XmppInviteResponse) Reset() { *m = XmppInviteResponse{} }
+func (m *XmppInviteResponse) String() string { return proto.CompactTextString(m) }
+func (*XmppInviteResponse) ProtoMessage() {}
+
+func init() {
+ proto.RegisterEnum("appengine.XmppServiceError_ErrorCode", XmppServiceError_ErrorCode_name, XmppServiceError_ErrorCode_value)
+ proto.RegisterEnum("appengine.PresenceResponse_SHOW", PresenceResponse_SHOW_name, PresenceResponse_SHOW_value)
+ proto.RegisterEnum("appengine.XmppMessageResponse_XmppMessageStatus", XmppMessageResponse_XmppMessageStatus_name, XmppMessageResponse_XmppMessageStatus_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.proto b/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
new file mode 100644
index 000000000000..472d52ebf4fa
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/internal/xmpp/xmpp_service.proto
@@ -0,0 +1,83 @@
+syntax = "proto2";
+option go_package = "xmpp";
+
+package appengine;
+
+message XmppServiceError {
+ enum ErrorCode {
+ UNSPECIFIED_ERROR = 1;
+ INVALID_JID = 2;
+ NO_BODY = 3;
+ INVALID_XML = 4;
+ INVALID_TYPE = 5;
+ INVALID_SHOW = 6;
+ EXCEEDED_MAX_SIZE = 7;
+ APPID_ALIAS_REQUIRED = 8;
+ NONDEFAULT_MODULE = 9;
+ }
+}
+
+message PresenceRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message PresenceResponse {
+ enum SHOW {
+ NORMAL = 0;
+ AWAY = 1;
+ DO_NOT_DISTURB = 2;
+ CHAT = 3;
+ EXTENDED_AWAY = 4;
+ }
+
+ required bool is_available = 1;
+ optional SHOW presence = 2;
+ optional bool valid = 3;
+}
+
+message BulkPresenceRequest {
+ repeated string jid = 1;
+ optional string from_jid = 2;
+}
+
+message BulkPresenceResponse {
+ repeated PresenceResponse presence_response = 1;
+}
+
+message XmppMessageRequest {
+ repeated string jid = 1;
+ required string body = 2;
+ optional bool raw_xml = 3 [ default = false ];
+ optional string type = 4 [ default = "chat" ];
+ optional string from_jid = 5;
+}
+
+message XmppMessageResponse {
+ enum XmppMessageStatus {
+ NO_ERROR = 0;
+ INVALID_JID = 1;
+ OTHER_ERROR = 2;
+ }
+
+ repeated XmppMessageStatus status = 1;
+}
+
+message XmppSendPresenceRequest {
+ required string jid = 1;
+ optional string type = 2;
+ optional string show = 3;
+ optional string status = 4;
+ optional string from_jid = 5;
+}
+
+message XmppSendPresenceResponse {
+}
+
+message XmppInviteRequest {
+ required string jid = 1;
+ optional string from_jid = 2;
+}
+
+message XmppInviteResponse {
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/log/log.go b/Godeps/_workspace/src/google.golang.org/appengine/log/log.go
new file mode 100644
index 000000000000..b169cde6486f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/log/log.go
@@ -0,0 +1,322 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package log provides the means of querying an application's logs from
+within an App Engine application.
+
+Example:
+ c := appengine.NewContext(r)
+ query := &log.Query{
+ AppLogs: true,
+ Versions: []string{"1"},
+ }
+
+ for results := query.Run(c); ; {
+ record, err := results.Next()
+ if err == log.Done {
+ c.Infof("Done processing results")
+ break
+ }
+ if err != nil {
+ c.Errorf("Failed to retrieve next log: %v", err)
+ break
+ }
+ c.Infof("Saw record %v", record)
+ }
+*/
+package log // import "google.golang.org/appengine/log"
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/log"
+)
+
+// Query defines a logs query.
+type Query struct {
+ // Start time specifies the earliest log to return (inclusive).
+ StartTime time.Time
+
+ // End time specifies the latest log to return (exclusive).
+ EndTime time.Time
+
+ // Offset specifies a position within the log stream to resume reading from,
+ // and should come from a previously returned Record's field of the same name.
+ Offset []byte
+
+ // Incomplete controls whether active (incomplete) requests should be included.
+ Incomplete bool
+
+ // AppLogs indicates if application-level logs should be included.
+ AppLogs bool
+
+ // ApplyMinLevel indicates if MinLevel should be used to filter results.
+ ApplyMinLevel bool
+
+ // If ApplyMinLevel is true, only logs for requests with at least one
+ // application log of MinLevel or higher will be returned.
+ MinLevel int
+
+ // Versions is the major version IDs whose logs should be retrieved.
+ // Logs for specific modules can be retrieved by the specifying versions
+ // in the form "module:version"; the default module is used if no module
+ // is specified.
+ Versions []string
+
+ // A list of requests to search for instead of a time-based scan. Cannot be
+ // combined with filtering options such as StartTime, EndTime, Offset,
+ // Incomplete, ApplyMinLevel, or Versions.
+ RequestIDs []string
+}
+
+// AppLog represents a single application-level log.
+type AppLog struct {
+ Time time.Time
+ Level int
+ Message string
+}
+
+// Record contains all the information for a single web request.
+type Record struct {
+ AppID string
+ ModuleID string
+ VersionID string
+ RequestID []byte
+ IP string
+ Nickname string
+ AppEngineRelease string
+
+ // The time when this request started.
+ StartTime time.Time
+
+ // The time when this request finished.
+ EndTime time.Time
+
+ // Opaque cursor into the result stream.
+ Offset []byte
+
+ // The time required to process the request.
+ Latency time.Duration
+ MCycles int64
+ Method string
+ Resource string
+ HTTPVersion string
+ Status int32
+
+ // The size of the request sent back to the client, in bytes.
+ ResponseSize int64
+ Referrer string
+ UserAgent string
+ URLMapEntry string
+ Combined string
+ Host string
+
+ // The estimated cost of this request, in dollars.
+ Cost float64
+ TaskQueueName string
+ TaskName string
+ WasLoadingRequest bool
+ PendingTime time.Duration
+ Finished bool
+ AppLogs []AppLog
+
+ // Mostly-unique identifier for the instance that handled the request if available.
+ InstanceID string
+}
+
+// Result represents the result of a query.
+type Result struct {
+ logs []*Record
+ context appengine.Context
+ request *pb.LogReadRequest
+ resultsSeen bool
+ err error
+}
+
+// Next returns the next log record,
+func (qr *Result) Next() (*Record, error) {
+ if qr.err != nil {
+ return nil, qr.err
+ }
+ if len(qr.logs) > 0 {
+ lr := qr.logs[0]
+ qr.logs = qr.logs[1:]
+ return lr, nil
+ }
+
+ if qr.request.Offset == nil && qr.resultsSeen {
+ return nil, Done
+ }
+
+ if err := qr.run(); err != nil {
+ // Errors here may be retried, so don't store the error.
+ return nil, err
+ }
+
+ return qr.Next()
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("log: query has no more results")
+
+// protoToAppLogs takes as input an array of pointers to LogLines, the internal
+// Protocol Buffer representation of a single application-level log,
+// and converts it to an array of AppLogs, the external representation
+// of an application-level log.
+func protoToAppLogs(logLines []*pb.LogLine) []AppLog {
+ appLogs := make([]AppLog, len(logLines))
+
+ for i, line := range logLines {
+ appLogs[i] = AppLog{
+ Time: time.Unix(0, *line.Time*1e3),
+ Level: int(*line.Level),
+ Message: *line.LogMessage,
+ }
+ }
+
+ return appLogs
+}
+
+// protoToRecord converts a RequestLog, the internal Protocol Buffer
+// representation of a single request-level log, to a Record, its
+// corresponding external representation.
+func protoToRecord(rl *pb.RequestLog) *Record {
+ offset, err := proto.Marshal(rl.Offset)
+ if err != nil {
+ offset = nil
+ }
+ return &Record{
+ AppID: *rl.AppId,
+ ModuleID: rl.GetModuleId(),
+ VersionID: *rl.VersionId,
+ RequestID: rl.RequestId,
+ Offset: offset,
+ IP: *rl.Ip,
+ Nickname: rl.GetNickname(),
+ AppEngineRelease: string(rl.GetAppEngineRelease()),
+ StartTime: time.Unix(0, *rl.StartTime*1e3),
+ EndTime: time.Unix(0, *rl.EndTime*1e3),
+ Latency: time.Duration(*rl.Latency) * time.Microsecond,
+ MCycles: *rl.Mcycles,
+ Method: *rl.Method,
+ Resource: *rl.Resource,
+ HTTPVersion: *rl.HttpVersion,
+ Status: *rl.Status,
+ ResponseSize: *rl.ResponseSize,
+ Referrer: rl.GetReferrer(),
+ UserAgent: rl.GetUserAgent(),
+ URLMapEntry: *rl.UrlMapEntry,
+ Combined: *rl.Combined,
+ Host: rl.GetHost(),
+ Cost: rl.GetCost(),
+ TaskQueueName: rl.GetTaskQueueName(),
+ TaskName: rl.GetTaskName(),
+ WasLoadingRequest: rl.GetWasLoadingRequest(),
+ PendingTime: time.Duration(rl.GetPendingTime()) * time.Microsecond,
+ Finished: rl.GetFinished(),
+ AppLogs: protoToAppLogs(rl.Line),
+ InstanceID: string(rl.GetCloneKey()),
+ }
+}
+
+// Run starts a query for log records, which contain request and application
+// level log information.
+func (params *Query) Run(c appengine.Context) *Result {
+ req, err := makeRequest(params, c.FullyQualifiedAppID(), appengine.VersionID(c))
+ return &Result{
+ context: c,
+ request: req,
+ err: err,
+ }
+}
+
+func makeRequest(params *Query, appID, versionID string) (*pb.LogReadRequest, error) {
+ req := &pb.LogReadRequest{}
+ req.AppId = &appID
+ if !params.StartTime.IsZero() {
+ req.StartTime = proto.Int64(params.StartTime.UnixNano() / 1e3)
+ }
+ if !params.EndTime.IsZero() {
+ req.EndTime = proto.Int64(params.EndTime.UnixNano() / 1e3)
+ }
+ if len(params.Offset) > 0 {
+ var offset pb.LogOffset
+ if err := proto.Unmarshal(params.Offset, &offset); err != nil {
+ return nil, fmt.Errorf("bad Offset: %v", err)
+ }
+ req.Offset = &offset
+ }
+ if params.Incomplete {
+ req.IncludeIncomplete = ¶ms.Incomplete
+ }
+ if params.AppLogs {
+ req.IncludeAppLogs = ¶ms.AppLogs
+ }
+ if params.ApplyMinLevel {
+ req.MinimumLogLevel = proto.Int32(int32(params.MinLevel))
+ }
+ if params.Versions == nil {
+ // If no versions were specified, default to the default module at
+ // the major version being used by this module.
+ if i := strings.Index(versionID, "."); i >= 0 {
+ versionID = versionID[:i]
+ }
+ req.VersionId = []string{versionID}
+ } else {
+ req.ModuleVersion = make([]*pb.LogModuleVersion, 0, len(params.Versions))
+ for _, v := range params.Versions {
+ var m *string
+ if i := strings.Index(v, ":"); i >= 0 {
+ m, v = proto.String(v[:i]), v[i+1:]
+ }
+ req.ModuleVersion = append(req.ModuleVersion, &pb.LogModuleVersion{
+ ModuleId: m,
+ VersionId: proto.String(v),
+ })
+ }
+ }
+ if params.RequestIDs != nil {
+ ids := make([][]byte, len(params.RequestIDs))
+ for i, v := range params.RequestIDs {
+ ids[i] = []byte(v)
+ }
+ req.RequestId = ids
+ }
+
+ return req, nil
+}
+
+// run takes the query Result produced by a call to Run and updates it with
+// more Records. The updated Result contains a new set of logs as well as an
+// offset to where more logs can be found. We also convert the items in the
+// response from their internal representations to external versions of the
+// same structs.
+func (r *Result) run() error {
+ res := &pb.LogReadResponse{}
+ if err := r.context.Call("logservice", "Read", r.request, res, nil); err != nil {
+ return err
+ }
+
+ r.logs = make([]*Record, len(res.Log))
+ r.request.Offset = res.Offset
+ r.resultsSeen = true
+
+ for i, log := range res.Log {
+ r.logs[i] = protoToRecord(log)
+ }
+
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("logservice", pb.LogServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/log/log_test.go b/Godeps/_workspace/src/google.golang.org/appengine/log/log_test.go
new file mode 100644
index 000000000000..5d7e985a1ee1
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/log/log_test.go
@@ -0,0 +1,108 @@
+package log
+
+import (
+ "reflect"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ pb "google.golang.org/appengine/internal/log"
+)
+
+func TestQueryToRequest(t *testing.T) {
+ testCases := []struct {
+ desc string
+ query *Query
+ want *pb.LogReadRequest
+ }{
+ {
+ desc: "Empty",
+ query: &Query{},
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ VersionId: []string{"v12"},
+ },
+ },
+ {
+ desc: "Versions",
+ query: &Query{
+ Versions: []string{"alpha", "backend:beta"},
+ },
+ want: &pb.LogReadRequest{
+ AppId: proto.String("s~fake"),
+ ModuleVersion: []*pb.LogModuleVersion{
+ {
+ VersionId: proto.String("alpha"),
+ }, {
+ ModuleId: proto.String("backend"),
+ VersionId: proto.String("beta"),
+ },
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ req, err := makeRequest(tt.query, "s~fake", "v12")
+
+ if err != nil {
+ t.Errorf("%s: got err %v, want nil", tt.desc, err)
+ continue
+ }
+ if !proto.Equal(req, tt.want) {
+ t.Errorf("%s request:\ngot %v\nwant %v", tt.desc, req, tt.want)
+ }
+ }
+}
+
+func TestProtoToRecord(t *testing.T) {
+ // We deliberately leave ModuleId and other optional fields unset.
+ p := &pb.RequestLog{
+ AppId: proto.String("s~fake"),
+ VersionId: proto.String("1"),
+ RequestId: []byte("deadbeef"),
+ Ip: proto.String("127.0.0.1"),
+ StartTime: proto.Int64(431044244000000),
+ EndTime: proto.Int64(431044724000000),
+ Latency: proto.Int64(480000000),
+ Mcycles: proto.Int64(7),
+ Method: proto.String("GET"),
+ Resource: proto.String("/app"),
+ HttpVersion: proto.String("1.1"),
+ Status: proto.Int32(418),
+ ResponseSize: proto.Int64(1337),
+ UrlMapEntry: proto.String("_go_app"),
+ Combined: proto.String("apache log"),
+ }
+ // Sanity check that all required fields are set.
+ if _, err := proto.Marshal(p); err != nil {
+ t.Fatalf("proto.Marshal: %v", err)
+ }
+ want := &Record{
+ AppID: "s~fake",
+ ModuleID: "default",
+ VersionID: "1",
+ RequestID: []byte("deadbeef"),
+ IP: "127.0.0.1",
+ StartTime: time.Date(1983, 8, 29, 22, 30, 44, 0, time.UTC),
+ EndTime: time.Date(1983, 8, 29, 22, 38, 44, 0, time.UTC),
+ Latency: 8 * time.Minute,
+ MCycles: 7,
+ Method: "GET",
+ Resource: "/app",
+ HTTPVersion: "1.1",
+ Status: 418,
+ ResponseSize: 1337,
+ URLMapEntry: "_go_app",
+ Combined: "apache log",
+ Finished: true,
+ AppLogs: []AppLog{},
+ }
+ got := protoToRecord(p)
+ // Coerce locations to UTC since otherwise they will be in local.
+ got.StartTime, got.EndTime = got.StartTime.UTC(), got.EndTime.UTC()
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("protoToRecord:\ngot: %v\nwant: %v", got, want)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/mail/mail.go b/Godeps/_workspace/src/google.golang.org/appengine/mail/mail.go
new file mode 100644
index 000000000000..5a26d595c0bd
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/mail/mail.go
@@ -0,0 +1,123 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package mail provides the means of sending email from an
+App Engine application.
+
+Example:
+ msg := &mail.Message{
+ Sender: "romeo@montague.com",
+ To: []string{"Juliet "},
+ Subject: "See you tonight",
+ Body: "Don't forget our plans. Hark, 'til later.",
+ }
+ if err := mail.Send(c, msg); err != nil {
+ c.Errorf("Alas, my user, the email failed to sendeth: %v", err)
+ }
+*/
+package mail // import "google.golang.org/appengine/mail"
+
+import (
+ "net/mail"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ bpb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+// A Message represents an email message.
+// Addresses may be of any form permitted by RFC 822.
+type Message struct {
+ // Sender must be set, and must be either an application admin
+ // or the currently signed-in user.
+ Sender string
+ ReplyTo string // may be empty
+
+ // At least one of these slices must have a non-zero length,
+ // except when calling SendToAdmins.
+ To, Cc, Bcc []string
+
+ Subject string
+
+ // At least one of Body or HTMLBody must be non-empty.
+ Body string
+ HTMLBody string
+
+ Attachments []Attachment
+
+ // Extra mail headers.
+ // See https://cloud.google.com/appengine/docs/go/mail/
+ // for permissible headers.
+ Headers mail.Header
+}
+
+// An Attachment represents an email attachment.
+type Attachment struct {
+ // Name must be set to a valid file name.
+ Name string
+ Data []byte
+ ContentID string
+}
+
+// Send sends an email message.
+func Send(c appengine.Context, msg *Message) error {
+ return send(c, "Send", msg)
+}
+
+// SendToAdmins sends an email message to the application's administrators.
+func SendToAdmins(c appengine.Context, msg *Message) error {
+ return send(c, "SendToAdmins", msg)
+}
+
+func send(c appengine.Context, method string, msg *Message) error {
+ req := &pb.MailMessage{
+ Sender: &msg.Sender,
+ To: msg.To,
+ Cc: msg.Cc,
+ Bcc: msg.Bcc,
+ Subject: &msg.Subject,
+ }
+ if msg.ReplyTo != "" {
+ req.ReplyTo = &msg.ReplyTo
+ }
+ if msg.Body != "" {
+ req.TextBody = &msg.Body
+ }
+ if msg.HTMLBody != "" {
+ req.HtmlBody = &msg.HTMLBody
+ }
+ if len(msg.Attachments) > 0 {
+ req.Attachment = make([]*pb.MailAttachment, len(msg.Attachments))
+ for i, att := range msg.Attachments {
+ req.Attachment[i] = &pb.MailAttachment{
+ FileName: proto.String(att.Name),
+ Data: att.Data,
+ }
+ if att.ContentID != "" {
+ req.Attachment[i].ContentID = proto.String(att.ContentID)
+ }
+ }
+ }
+ for key, vs := range msg.Headers {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.MailHeader{
+ Name: proto.String(key),
+ Value: proto.String(v),
+ })
+ }
+ }
+ res := &bpb.VoidProto{}
+ if err := c.Call("mail", method, req, res, nil); err != nil {
+ return err
+ }
+ return nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("mail", pb.MailServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/mail/mail_test.go b/Godeps/_workspace/src/google.golang.org/appengine/mail/mail_test.go
new file mode 100644
index 000000000000..7502c5973ad5
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/mail/mail_test.go
@@ -0,0 +1,65 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package mail
+
+import (
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ basepb "google.golang.org/appengine/internal/base"
+ pb "google.golang.org/appengine/internal/mail"
+)
+
+func TestMessageConstruction(t *testing.T) {
+ var got *pb.MailMessage
+ c := aetesting.FakeSingleContext(t, "mail", "Send", func(in *pb.MailMessage, out *basepb.VoidProto) error {
+ got = in
+ return nil
+ })
+
+ msg := &Message{
+ Sender: "dsymonds@example.com",
+ To: []string{"nigeltao@example.com"},
+ Body: "Hey, lunch time?",
+ Attachments: []Attachment{
+ // Regression test for a prod bug. The address of a range variable was used when
+ // constructing the outgoing proto, so multiple attachments used the same name.
+ {
+ Name: "att1.txt",
+ Data: []byte("data1"),
+ ContentID: "",
+ },
+ {
+ Name: "att2.txt",
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if err := Send(c, msg); err != nil {
+ t.Fatalf("Send: %v", err)
+ }
+ want := &pb.MailMessage{
+ Sender: proto.String("dsymonds@example.com"),
+ To: []string{"nigeltao@example.com"},
+ Subject: proto.String(""),
+ TextBody: proto.String("Hey, lunch time?"),
+ Attachment: []*pb.MailAttachment{
+ {
+ FileName: proto.String("att1.txt"),
+ Data: []byte("data1"),
+ ContentID: proto.String(""),
+ },
+ {
+ FileName: proto.String("att2.txt"),
+ Data: []byte("data2"),
+ },
+ },
+ }
+ if !proto.Equal(got, want) {
+ t.Errorf("Bad proto for %+v\n got %v\nwant %v", msg, got, want)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache.go b/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache.go
new file mode 100644
index 000000000000..5ed3dea9a1a5
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache.go
@@ -0,0 +1,525 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package memcache provides a client for App Engine's distributed in-memory
+// key-value store for small chunks of arbitrary data.
+//
+// The fundamental operations get and set items, keyed by a string.
+//
+// item0, err := memcache.Get(c, "key")
+// if err != nil && err != memcache.ErrCacheMiss {
+// return err
+// }
+// if err == nil {
+// fmt.Fprintf(w, "memcache hit: Key=%q Val=[% x]\n", item0.Key, item0.Value)
+// } else {
+// fmt.Fprintf(w, "memcache miss\n")
+// }
+//
+// and
+//
+// item1 := &memcache.Item{
+// Key: "foo",
+// Value: []byte("bar"),
+// }
+// if err := memcache.Set(c, item1); err != nil {
+// return err
+// }
+package memcache // import "google.golang.org/appengine/memcache"
+
+import (
+ "bytes"
+ "encoding/gob"
+ "encoding/json"
+ "errors"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var (
+ // ErrCacheMiss means that an operation failed
+ // because the item wasn't present.
+ ErrCacheMiss = errors.New("memcache: cache miss")
+ // ErrCASConflict means that a CompareAndSwap call failed due to the
+ // cached value being modified between the Get and the CompareAndSwap.
+ // If the cached value was simply evicted rather than replaced,
+ // ErrNotStored will be returned instead.
+ ErrCASConflict = errors.New("memcache: compare-and-swap conflict")
+ // ErrNoStats means that no statistics were available.
+ ErrNoStats = errors.New("memcache: no statistics available")
+ // ErrNotStored means that a conditional write operation (i.e. Add or
+ // CompareAndSwap) failed because the condition was not satisfied.
+ ErrNotStored = errors.New("memcache: item not stored")
+ // ErrServerError means that a server error occurred.
+ ErrServerError = errors.New("memcache: server error")
+)
+
+// Item is the unit of memcache gets and sets.
+type Item struct {
+ // Key is the Item's key (250 bytes maximum).
+ Key string
+ // Value is the Item's value.
+ Value []byte
+ // Object is the Item's value for use with a Codec.
+ Object interface{}
+ // Flags are server-opaque flags whose semantics are entirely up to the
+ // App Engine app.
+ Flags uint32
+ // Expiration is the maximum duration that the item will stay
+ // in the cache.
+ // The zero value means the Item has no expiration time.
+ // Subsecond precision is ignored.
+ // This is not set when getting items.
+ Expiration time.Duration
+ // casID is a client-opaque value used for compare-and-swap operations.
+ // Zero means that compare-and-swap is not used.
+ casID uint64
+}
+
+const (
+ secondsIn30Years = 60 * 60 * 24 * 365 * 30 // from memcache server code
+ thirtyYears = time.Duration(secondsIn30Years) * time.Second
+)
+
+// protoToItem converts a protocol buffer item to a Go struct.
+func protoToItem(p *pb.MemcacheGetResponse_Item) *Item {
+ return &Item{
+ Key: string(p.Key),
+ Value: p.Value,
+ Flags: p.GetFlags(),
+ casID: p.GetCasId(),
+ }
+}
+
+// If err is an appengine.MultiError, return its first element. Otherwise, return err.
+func singleError(err error) error {
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// Get gets the item for the given key. ErrCacheMiss is returned for a memcache
+// cache miss. The key must be at most 250 bytes in length.
+func Get(c appengine.Context, key string) (*Item, error) {
+ m, err := GetMulti(c, []string{key})
+ if err != nil {
+ return nil, err
+ }
+ if _, ok := m[key]; !ok {
+ return nil, ErrCacheMiss
+ }
+ return m[key], nil
+}
+
+// GetMulti is a batch version of Get. The returned map from keys to items may
+// have fewer elements than the input slice, due to memcache cache misses.
+// Each key must be at most 250 bytes in length.
+func GetMulti(c appengine.Context, key []string) (map[string]*Item, error) {
+ if len(key) == 0 {
+ return nil, nil
+ }
+ keyAsBytes := make([][]byte, len(key))
+ for i, k := range key {
+ keyAsBytes[i] = []byte(k)
+ }
+ req := &pb.MemcacheGetRequest{
+ Key: keyAsBytes,
+ ForCas: proto.Bool(true),
+ }
+ res := &pb.MemcacheGetResponse{}
+ if err := c.Call("memcache", "Get", req, res, nil); err != nil {
+ return nil, err
+ }
+ m := make(map[string]*Item, len(res.Item))
+ for _, p := range res.Item {
+ t := protoToItem(p)
+ m[t.Key] = t
+ }
+ return m, nil
+}
+
+// Delete deletes the item for the given key.
+// ErrCacheMiss is returned if the specified item can not be found.
+// The key must be at most 250 bytes in length.
+func Delete(c appengine.Context, key string) error {
+ return singleError(DeleteMulti(c, []string{key}))
+}
+
+// DeleteMulti is a batch version of Delete.
+// If any keys cannot be found, an appengine.MultiError is returned.
+// Each key must be at most 250 bytes in length.
+func DeleteMulti(c appengine.Context, key []string) error {
+ if len(key) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheDeleteRequest{
+ Item: make([]*pb.MemcacheDeleteRequest_Item, len(key)),
+ }
+ for i, k := range key {
+ req.Item[i] = &pb.MemcacheDeleteRequest_Item{Key: []byte(k)}
+ }
+ res := &pb.MemcacheDeleteResponse{}
+ if err := c.Call("memcache", "Delete", req, res, nil); err != nil {
+ return err
+ }
+ if len(res.DeleteStatus) != len(key) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(key)), false
+ for i, s := range res.DeleteStatus {
+ switch s {
+ case pb.MemcacheDeleteResponse_DELETED:
+ // OK
+ case pb.MemcacheDeleteResponse_NOT_FOUND:
+ me[i] = ErrCacheMiss
+ any = true
+ default:
+ me[i] = ErrServerError
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Increment atomically increments the decimal value in the given key
+// by delta and returns the new value. The value must fit in a uint64.
+// Overflow wraps around, and underflow is capped to zero. The
+// provided delta may be negative. If the key doesn't exist in
+// memcacheg, the provided initial value is used to atomically
+// populate it before the delta is applied.
+// The key must be at most 250 bytes in length.
+func Increment(c appengine.Context, key string, delta int64, initialValue uint64) (newValue uint64, err error) {
+ return incr(c, key, delta, &initialValue)
+}
+
+// IncrementExisting works like Increment but assumes that the key
+// already exists in memcache and doesn't take an initial value.
+// IncrementExisting can save work if calculating the initial value is
+// expensive.
+// An error is returned if the specified item can not be found.
+func IncrementExisting(c appengine.Context, key string, delta int64) (newValue uint64, err error) {
+ return incr(c, key, delta, nil)
+}
+
+func incr(c appengine.Context, key string, delta int64, initialValue *uint64) (newValue uint64, err error) {
+ req := &pb.MemcacheIncrementRequest{
+ Key: []byte(key),
+ InitialValue: initialValue,
+ }
+ if delta >= 0 {
+ req.Delta = proto.Uint64(uint64(delta))
+ } else {
+ req.Delta = proto.Uint64(uint64(-delta))
+ req.Direction = pb.MemcacheIncrementRequest_DECREMENT.Enum()
+ }
+ res := &pb.MemcacheIncrementResponse{}
+ err = c.Call("memcache", "Increment", req, res, nil)
+ if err != nil {
+ return
+ }
+ if res.NewValue == nil {
+ return 0, ErrCacheMiss
+ }
+ return *res.NewValue, nil
+}
+
+// set sets the given items using the given conflict resolution policy.
+// appengine.MultiError may be returned.
+func set(c appengine.Context, item []*Item, value [][]byte, policy pb.MemcacheSetRequest_SetPolicy) error {
+ if len(item) == 0 {
+ return nil
+ }
+ req := &pb.MemcacheSetRequest{
+ Item: make([]*pb.MemcacheSetRequest_Item, len(item)),
+ }
+ for i, t := range item {
+ p := &pb.MemcacheSetRequest_Item{
+ Key: []byte(t.Key),
+ }
+ if value == nil {
+ p.Value = t.Value
+ } else {
+ p.Value = value[i]
+ }
+ if t.Flags != 0 {
+ p.Flags = proto.Uint32(t.Flags)
+ }
+ if t.Expiration != 0 {
+ // In the .proto file, MemcacheSetRequest_Item uses a fixed32 (i.e. unsigned)
+ // for expiration time, while MemcacheGetRequest_Item uses int32 (i.e. signed).
+ // Throughout this .go file, we use int32.
+ // Also, in the proto, the expiration value is either a duration (in seconds)
+ // or an absolute Unix timestamp (in seconds), depending on whether the
+ // value is less than or greater than or equal to 30 years, respectively.
+ if t.Expiration < time.Second {
+ // Because an Expiration of 0 means no expiration, we take
+ // care here to translate an item with an expiration
+ // Duration between 0-1 seconds as immediately expiring
+ // (saying it expired a few seconds ago), rather than
+ // rounding it down to 0 and making it live forever.
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) - 5)
+ } else if t.Expiration >= thirtyYears {
+ p.ExpirationTime = proto.Uint32(uint32(time.Now().Unix()) + uint32(t.Expiration/time.Second))
+ } else {
+ p.ExpirationTime = proto.Uint32(uint32(t.Expiration / time.Second))
+ }
+ }
+ if t.casID != 0 {
+ p.CasId = proto.Uint64(t.casID)
+ p.ForCas = proto.Bool(true)
+ }
+ p.SetPolicy = policy.Enum()
+ req.Item[i] = p
+ }
+ res := &pb.MemcacheSetResponse{}
+ if err := c.Call("memcache", "Set", req, res, nil); err != nil {
+ return err
+ }
+ if len(res.SetStatus) != len(item) {
+ return ErrServerError
+ }
+ me, any := make(appengine.MultiError, len(item)), false
+ for i, st := range res.SetStatus {
+ var err error
+ switch st {
+ case pb.MemcacheSetResponse_STORED:
+ // OK
+ case pb.MemcacheSetResponse_NOT_STORED:
+ err = ErrNotStored
+ case pb.MemcacheSetResponse_EXISTS:
+ err = ErrCASConflict
+ default:
+ err = ErrServerError
+ }
+ if err != nil {
+ me[i] = err
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Set writes the given item, unconditionally.
+func Set(c appengine.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func SetMulti(c appengine.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func Add(c appengine.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func AddMulti(c appengine.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func CompareAndSwap(c appengine.Context, item *Item) error {
+ return singleError(set(c, []*Item{item}, nil, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func CompareAndSwapMulti(c appengine.Context, item []*Item) error {
+ return set(c, item, nil, pb.MemcacheSetRequest_CAS)
+}
+
+// Codec represents a symmetric pair of functions that implement a codec.
+// Items stored into or retrieved from memcache using a Codec have their
+// values marshaled or unmarshaled.
+//
+// All the methods provided for Codec behave analogously to the package level
+// function with same name.
+type Codec struct {
+ Marshal func(interface{}) ([]byte, error)
+ Unmarshal func([]byte, interface{}) error
+}
+
+// Get gets the item for the given key and decodes the obtained value into v.
+// ErrCacheMiss is returned for a memcache cache miss.
+// The key must be at most 250 bytes in length.
+func (cd Codec) Get(c appengine.Context, key string, v interface{}) (*Item, error) {
+ i, err := Get(c, key)
+ if err != nil {
+ return nil, err
+ }
+ if err := cd.Unmarshal(i.Value, v); err != nil {
+ return nil, err
+ }
+ return i, nil
+}
+
+func (cd Codec) set(c appengine.Context, items []*Item, policy pb.MemcacheSetRequest_SetPolicy) error {
+ var vs [][]byte
+ var me appengine.MultiError
+ for i, item := range items {
+ v, err := cd.Marshal(item.Object)
+ if err != nil {
+ if me == nil {
+ me = make(appengine.MultiError, len(items))
+ }
+ me[i] = err
+ continue
+ }
+ if me == nil {
+ vs = append(vs, v)
+ }
+ }
+ if me != nil {
+ return me
+ }
+
+ return set(c, items, vs, policy)
+}
+
+// Set writes the given item, unconditionally.
+func (cd Codec) Set(c appengine.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_SET))
+}
+
+// SetMulti is a batch version of Set.
+// appengine.MultiError may be returned.
+func (cd Codec) SetMulti(c appengine.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_SET)
+}
+
+// Add writes the given item, if no value already exists for its key.
+// ErrNotStored is returned if that condition is not met.
+func (cd Codec) Add(c appengine.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_ADD))
+}
+
+// AddMulti is a batch version of Add.
+// appengine.MultiError may be returned.
+func (cd Codec) AddMulti(c appengine.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_ADD)
+}
+
+// CompareAndSwap writes the given item that was previously returned by Get,
+// if the value was neither modified or evicted between the Get and the
+// CompareAndSwap calls. The item's Key should not change between calls but
+// all other item fields may differ.
+// ErrCASConflict is returned if the value was modified in between the calls.
+// ErrNotStored is returned if the value was evicted in between the calls.
+func (cd Codec) CompareAndSwap(c appengine.Context, item *Item) error {
+ return singleError(cd.set(c, []*Item{item}, pb.MemcacheSetRequest_CAS))
+}
+
+// CompareAndSwapMulti is a batch version of CompareAndSwap.
+// appengine.MultiError may be returned.
+func (cd Codec) CompareAndSwapMulti(c appengine.Context, items []*Item) error {
+ return cd.set(c, items, pb.MemcacheSetRequest_CAS)
+}
+
+var (
+ // Gob is a Codec that uses the gob package.
+ Gob = Codec{gobMarshal, gobUnmarshal}
+ // JSON is a Codec that uses the json package.
+ JSON = Codec{json.Marshal, json.Unmarshal}
+)
+
+func gobMarshal(v interface{}) ([]byte, error) {
+ var buf bytes.Buffer
+ if err := gob.NewEncoder(&buf).Encode(v); err != nil {
+ return nil, err
+ }
+ return buf.Bytes(), nil
+}
+
+func gobUnmarshal(data []byte, v interface{}) error {
+ return gob.NewDecoder(bytes.NewBuffer(data)).Decode(v)
+}
+
+// Statistics represents a set of statistics about the memcache cache.
+// This may include items that have expired but have not yet been removed from the cache.
+type Statistics struct {
+ Hits uint64 // Counter of cache hits
+ Misses uint64 // Counter of cache misses
+ ByteHits uint64 // Counter of bytes transferred for gets
+
+ Items uint64 // Items currently in the cache
+ Bytes uint64 // Size of all items currently in the cache
+
+ Oldest int64 // Age of access of the oldest item, in seconds
+}
+
+// Stats retrieves the current memcache statistics.
+func Stats(c appengine.Context) (*Statistics, error) {
+ req := &pb.MemcacheStatsRequest{}
+ res := &pb.MemcacheStatsResponse{}
+ if err := c.Call("memcache", "Stats", req, res, nil); err != nil {
+ return nil, err
+ }
+ if res.Stats == nil {
+ return nil, ErrNoStats
+ }
+ return &Statistics{
+ Hits: *res.Stats.Hits,
+ Misses: *res.Stats.Misses,
+ ByteHits: *res.Stats.ByteHits,
+ Items: *res.Stats.Items,
+ Bytes: *res.Stats.Bytes,
+ Oldest: int64(*res.Stats.OldestItemAge),
+ }, nil
+}
+
+// Flush flushes all items from memcache.
+func Flush(c appengine.Context) error {
+ req := &pb.MemcacheFlushRequest{}
+ res := &pb.MemcacheFlushResponse{}
+ return c.Call("memcache", "FlushAll", req, res, nil)
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ switch m := m.(type) {
+ case *pb.MemcacheDeleteRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheGetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheIncrementRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ case *pb.MemcacheSetRequest:
+ if m.NameSpace == nil {
+ m.NameSpace = &namespace
+ }
+ // MemcacheFlushRequest, MemcacheStatsRequest do not apply namespace.
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("memcache", pb.MemcacheServiceError_ErrorCode_name)
+ internal.NamespaceMods["memcache"] = namespaceMod
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache_test.go b/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache_test.go
new file mode 100644
index 000000000000..70e38307793e
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/memcache/memcache_test.go
@@ -0,0 +1,255 @@
+package memcache
+
+import (
+ "fmt"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/memcache"
+)
+
+var errRPC = fmt.Errorf("RPC error")
+
+func TestGetRequest(t *testing.T) {
+ serviceCalled := false
+ apiKey := "lyric"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ // Test request.
+ if n := len(req.Key); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ if k := string(req.Key[0]); k != apiKey {
+ t.Errorf("got %q want %q", k, apiKey)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ // Test the "forward" path from the API call parameters to the
+ // protobuf request object. (The "backward" path from the
+ // protobuf response object to the API call response,
+ // including the error response, are handled in the next few
+ // tests).
+ Get(c, apiKey)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestGetResponseHit(t *testing.T) {
+ key := "lyric"
+ value := "Where the buffalo roam"
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ res.Item = []*pb.MemcacheGetResponse_Item{
+ {Key: []byte(key), Value: []byte(value)},
+ }
+ return nil
+ })
+ apiItem, err := Get(c, key)
+ if apiItem == nil || apiItem.Key != key || string(apiItem.Value) != value {
+ t.Errorf("got %q, %q want {%q,%q}, nil", apiItem, err, key, value)
+ }
+}
+
+func TestGetResponseMiss(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ // don't fill in any of the response
+ return nil
+ })
+ _, err := Get(c, "something")
+ if err != ErrCacheMiss {
+ t.Errorf("got %v want ErrCacheMiss", err)
+ }
+}
+
+func TestGetResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(_ *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ return errRPC
+ })
+
+ if _, err := Get(c, "something"); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestAddRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Oh, give me a home"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(apiItem.Value); v != string(pbItem.Value) {
+ t.Errorf("got %q want %q", v, string(pbItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_ADD {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_ADD)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Add(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestAddResponseStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestAddResponseNotStored(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_NOT_STORED}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrNotStored {
+ t.Errorf("got %v want ErrNotStored", err)
+ }
+}
+
+func TestAddResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Add(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestAddResponseRPCError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ return errRPC
+ })
+
+ if err := Add(c, &Item{}); err != errRPC {
+ t.Errorf("got %v want errRPC", err)
+ }
+}
+
+func TestSetRequest(t *testing.T) {
+ var apiItem = &Item{
+ Key: "lyric",
+ Value: []byte("Where the buffalo roam"),
+ }
+
+ serviceCalled := false
+
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(req *pb.MemcacheSetRequest, _ *pb.MemcacheSetResponse) error {
+ // Test request.
+ if n := len(req.Item); n != 1 {
+ t.Errorf("got %d want 1", n)
+ return nil
+ }
+ pbItem := req.Item[0]
+ if k := string(pbItem.Key); k != apiItem.Key {
+ t.Errorf("got %q want %q", k, apiItem.Key)
+ }
+ if v := string(pbItem.Value); v != string(apiItem.Value) {
+ t.Errorf("got %q want %q", v, string(apiItem.Value))
+ }
+ if p := *pbItem.SetPolicy; p != pb.MemcacheSetRequest_SET {
+ t.Errorf("got %v want %v", p, pb.MemcacheSetRequest_SET)
+ }
+
+ serviceCalled = true
+ return nil
+ })
+
+ Set(c, apiItem)
+ if !serviceCalled {
+ t.Error("Service was not called as expected")
+ }
+}
+
+func TestSetResponse(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_STORED}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != nil {
+ t.Errorf("got %v want nil", err)
+ }
+}
+
+func TestSetResponseError(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "memcache", "Set", func(_ *pb.MemcacheSetRequest, res *pb.MemcacheSetResponse) error {
+ res.SetStatus = []pb.MemcacheSetResponse_SetStatusCode{pb.MemcacheSetResponse_ERROR}
+ return nil
+ })
+
+ if err := Set(c, &Item{}); err != ErrServerError {
+ t.Errorf("got %v want ErrServerError", err)
+ }
+}
+
+func TestNamespaceResetting(t *testing.T) {
+ var nsField *string
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, res *pb.MemcacheGetResponse) error {
+ nsField = req.NameSpace
+ return errRPC
+ })
+
+ // Check that wrapping c in a namespace twice works correctly.
+ nc, err := appengine.Namespace(c, "A")
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+ c0, err := appengine.Namespace(nc, "") // should act as the original context
+ if err != nil {
+ t.Fatalf("appengine.Namespace: %v", err)
+ }
+
+ Get(c, "key")
+ if nsField != nil {
+ t.Fatalf("Get with c yielded %q", *nsField)
+ }
+ Get(nc, "key")
+ if nsField == nil || *nsField != "A" {
+ t.Fatalf("Get with nc yielded %v", nsField)
+ }
+ Get(c0, "key")
+ if nsField != nil && *nsField != "" {
+ t.Fatalf("Get with c0 yielded %q", *nsField)
+ }
+}
+
+func TestGetMultiEmpty(t *testing.T) {
+ serviceCalled := false
+ c := aetesting.FakeSingleContext(t, "memcache", "Get", func(req *pb.MemcacheGetRequest, _ *pb.MemcacheGetResponse) error {
+ serviceCalled = true
+ return nil
+ })
+
+ // Test that the Memcache service is not called when
+ // GetMulti is passed an empty slice of keys.
+ GetMulti(c, []string{})
+ if serviceCalled {
+ t.Error("Service was called but should not have been")
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/module/module.go b/Godeps/_workspace/src/google.golang.org/appengine/module/module.go
new file mode 100644
index 000000000000..d130b7dee8e6
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/module/module.go
@@ -0,0 +1,112 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package module provides functions for interacting with modules.
+
+The appengine package contains functions that report the identity of the app,
+including the module name.
+*/
+package module // import "google.golang.org/appengine/module"
+
+import (
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+// List returns the names of modules belonging to this application.
+func List(c appengine.Context) ([]string, error) {
+ req := &pb.GetModulesRequest{}
+ res := &pb.GetModulesResponse{}
+ err := c.Call("modules", "GetModules", req, res, nil)
+ return res.Module, err
+}
+
+// NumInstances returns the number of instances of the given module/version.
+// If either argument is the empty string it means the default.
+func NumInstances(c appengine.Context, module, version string) (int, error) {
+ req := &pb.GetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.GetNumInstancesResponse{}
+
+ if err := c.Call("modules", "GetNumInstances", req, res, nil); err != nil {
+ return 0, err
+ }
+ return int(*res.Instances), nil
+}
+
+// SetNumInstances sets the number of instances of the given module.version to the
+// specified value. If either module or version are the empty string it means the
+// default.
+func SetNumInstances(c appengine.Context, module, version string, instances int) error {
+ req := &pb.SetNumInstancesRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ req.Instances = proto.Int64(int64(instances))
+ res := &pb.SetNumInstancesResponse{}
+ return c.Call("modules", "SetNumInstances", req, res, nil)
+}
+
+// Versions returns the names of the versions that belong to the specified module.
+// If module is the empty string, it means the default module.
+func Versions(c appengine.Context, module string) ([]string, error) {
+ req := &pb.GetVersionsRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetVersionsResponse{}
+ err := c.Call("modules", "GetVersions", req, res, nil)
+ return res.GetVersion(), err
+}
+
+// DefaultVersion returns the default version of the specified module.
+// If module is the empty string, it means the default module.
+func DefaultVersion(c appengine.Context, module string) (string, error) {
+ req := &pb.GetDefaultVersionRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ res := &pb.GetDefaultVersionResponse{}
+ err := c.Call("modules", "GetDefaultVersion", req, res, nil)
+ return res.GetVersion(), err
+}
+
+// Start starts the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Start(c appengine.Context, module, version string) error {
+ req := &pb.StartModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StartModuleResponse{}
+ return c.Call("modules", "StartModule", req, res, nil)
+}
+
+// Stop stops the specified version of the specified module.
+// If either module or version are the empty string, it means the default.
+func Stop(c appengine.Context, module, version string) error {
+ req := &pb.StopModuleRequest{}
+ if module != "" {
+ req.Module = &module
+ }
+ if version != "" {
+ req.Version = &version
+ }
+ res := &pb.StopModuleResponse{}
+ return c.Call("modules", "StopModule", req, res, nil)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/module/module_test.go b/Godeps/_workspace/src/google.golang.org/appengine/module/module_test.go
new file mode 100644
index 000000000000..73e8971dc62d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/module/module_test.go
@@ -0,0 +1,124 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package module
+
+import (
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/modules"
+)
+
+const version = "test-version"
+const module = "test-module"
+const instances = 3
+
+func TestList(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetModules", func(req *pb.GetModulesRequest, res *pb.GetModulesResponse) error {
+ res.Module = []string{"default", "mod1"}
+ return nil
+ })
+ got, err := List(c)
+ if err != nil {
+ t.Fatalf("List: %v", err)
+ }
+ want := []string{"default", "mod1"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("List = %v, want %v", got, want)
+ }
+}
+
+func TestSetNumInstances(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "SetNumInstances", func(req *pb.SetNumInstancesRequest, res *pb.SetNumInstancesResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ if *req.Instances != instances {
+ t.Errorf("Instances = %v, want %d", req.Instances, instances)
+ }
+ return nil
+ })
+ err := SetNumInstances(c, module, version, instances)
+ if err != nil {
+ t.Fatalf("SetNumInstances: %v", err)
+ }
+}
+
+func TestVersions(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetVersions", func(req *pb.GetVersionsRequest, res *pb.GetVersionsResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = []string{"v1", "v2", "v3"}
+ return nil
+ })
+ got, err := Versions(c, module)
+ if err != nil {
+ t.Fatalf("Versions: %v", err)
+ }
+ want := []string{"v1", "v2", "v3"}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("Versions = %v, want %v", got, want)
+ }
+}
+
+func TestDefaultVersion(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "GetDefaultVersion", func(req *pb.GetDefaultVersionRequest, res *pb.GetDefaultVersionResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ res.Version = proto.String(version)
+ return nil
+ })
+ got, err := DefaultVersion(c, module)
+ if err != nil {
+ t.Fatalf("DefaultVersion: %v", err)
+ }
+ if got != version {
+ t.Errorf("Version = %v, want %v", got, version)
+ }
+}
+
+func TestStart(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StartModule", func(req *pb.StartModuleRequest, res *pb.StartModuleResponse) error {
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Start(c, module, version)
+ if err != nil {
+ t.Fatalf("Start: %v", err)
+ }
+}
+
+func TestStop(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "modules", "StopModule", func(req *pb.StopModuleRequest, res *pb.StopModuleResponse) error {
+ version := "test-version"
+ module := "test-module"
+ if *req.Module != module {
+ t.Errorf("Module = %v, want %v", req.Module, module)
+ }
+ if *req.Version != version {
+ t.Errorf("Version = %v, want %v", req.Version, version)
+ }
+ return nil
+ })
+
+ err := Stop(c, module, version)
+ if err != nil {
+ t.Fatalf("Stop: %v", err)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/namespace.go b/Godeps/_workspace/src/google.golang.org/appengine/namespace.go
new file mode 100644
index 000000000000..73b01e096f52
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/namespace.go
@@ -0,0 +1,48 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "fmt"
+ "regexp"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ basepb "google.golang.org/appengine/internal/base"
+)
+
+// Namespace returns a replacement context that operates within the given namespace.
+func Namespace(c Context, namespace string) (Context, error) {
+ if !validNamespace.MatchString(namespace) {
+ return nil, fmt.Errorf("appengine: namespace %q does not match /%s/", namespace, validNamespace)
+ }
+ return &namespacedContext{
+ Context: c,
+ namespace: namespace,
+ }, nil
+}
+
+// validNamespace matches valid namespace names.
+var validNamespace = regexp.MustCompile(`^[0-9A-Za-z._-]{0,100}$`)
+
+// namespacedContext wraps a Context to support namespaces.
+type namespacedContext struct {
+ Context
+ namespace string
+}
+
+func (n *namespacedContext) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ // Apply any namespace mods.
+ if mod, ok := internal.NamespaceMods[service]; ok {
+ mod(in, n.namespace)
+ }
+ if service == "__go__" && method == "GetNamespace" {
+ out.(*basepb.StringProto).Value = proto.String(n.namespace)
+ return nil
+ }
+
+ return n.Context.Call(service, method, in, out, opts)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/namespace_test.go b/Godeps/_workspace/src/google.golang.org/appengine/namespace_test.go
new file mode 100644
index 000000000000..92fa53479f1f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/namespace_test.go
@@ -0,0 +1,33 @@
+package appengine
+
+import (
+ "testing"
+)
+
+func TestNamespaceValidity(t *testing.T) {
+ testCases := []struct {
+ namespace string
+ ok bool
+ }{
+ // data from Python's namespace_manager_test.py
+ {"", true},
+ {"__a.namespace.123__", true},
+ {"-_A....NAMESPACE-_", true},
+ {"-", true},
+ {".", true},
+ {".-", true},
+
+ {"?", false},
+ {"+", false},
+ {"!", false},
+ {" ", false},
+ }
+ for _, tc := range testCases {
+ _, err := Namespace(nil, tc.namespace)
+ if err == nil && !tc.ok {
+ t.Errorf("Namespace %q should be rejected, but wasn't", tc.namespace)
+ } else if err != nil && tc.ok {
+ t.Errorf("Namespace %q should be accepted, but wasn't", tc.namespace)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client.go b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client.go
new file mode 100644
index 000000000000..3b80041ed412
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client.go
@@ -0,0 +1,173 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package remote_api
+
+// This file provides the client for connecting remotely to a user's production
+// application.
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "log"
+ "math/rand"
+ "net/http"
+ "net/url"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+)
+
+// NewRemoteContext returns a context that gives access to the production
+// APIs for the application at the given host. All communication will be
+// performed over SSL unless the host is localhost.
+func NewRemoteContext(host string, client *http.Client) (appengine.Context, error) {
+ // Add an appcfg header to outgoing requests.
+ t := client.Transport
+ if t == nil {
+ t = http.DefaultTransport
+ }
+ client.Transport = &headerAddingRoundTripper{t}
+
+ url := url.URL{
+ Scheme: "https",
+ Host: host,
+ Path: "/_ah/remote_api",
+ }
+ if host == "localhost" || strings.HasPrefix(host, "localhost:") {
+ url.Scheme = "http"
+ }
+ u := url.String()
+ appID, err := getAppID(client, u)
+ if err != nil {
+ return nil, fmt.Errorf("unable to contact server: %v", err)
+ }
+ return &context{
+ client: client,
+ url: u,
+ appID: appID,
+ }, nil
+}
+
+type context struct {
+ client *http.Client
+ url string
+ appID string
+}
+
+func (c *context) Request() interface{} { return nil }
+func (c *context) FullyQualifiedAppID() string { return c.appID }
+
+func (c *context) logf(level, format string, args ...interface{}) {
+ log.Printf(level+": "+format, args...)
+}
+
+func (c *context) Debugf(format string, args ...interface{}) { c.logf("DEBUG", format, args...) }
+func (c *context) Infof(format string, args ...interface{}) { c.logf("INFO", format, args...) }
+func (c *context) Warningf(format string, args ...interface{}) { c.logf("WARNING", format, args...) }
+func (c *context) Errorf(format string, args ...interface{}) { c.logf("ERROR", format, args...) }
+func (c *context) Criticalf(format string, args ...interface{}) { c.logf("CRITICAL", format, args...) }
+
+func (c *context) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ req, err := proto.Marshal(in)
+ if err != nil {
+ return fmt.Errorf("error marshalling request: %v", err)
+ }
+
+ remReq := &pb.Request{
+ ServiceName: proto.String(service),
+ Method: proto.String(method),
+ Request: req,
+ // NOTE(djd): RequestId is unused in the server.
+ }
+
+ req, err = proto.Marshal(remReq)
+ if err != nil {
+ return fmt.Errorf("proto.Marshal: %v", err)
+ }
+
+ // TODO(djd): Respect opts.Timeout?
+ resp, err := c.client.Post(c.url, "application/octet-stream", bytes.NewReader(req))
+ if err != nil {
+ return fmt.Errorf("error sending request: %v", err)
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return fmt.Errorf("failed reading response: %v", err)
+ }
+ remResp := &pb.Response{}
+ if err := proto.Unmarshal(body, remResp); err != nil {
+ return fmt.Errorf("error unmarshalling response: %v", err)
+ }
+
+ if ae := remResp.GetApplicationError(); ae != nil {
+ return &internal.APIError{
+ Code: ae.GetCode(),
+ Detail: ae.GetDetail(),
+ Service: service,
+ }
+ }
+
+ if remResp.Response == nil {
+ return fmt.Errorf("unexpected response: %s", proto.MarshalTextString(remResp))
+ }
+
+ return proto.Unmarshal(remResp.Response, out)
+}
+
+// This is a forgiving regexp designed to parse the app ID from YAML.
+var appIDRE = regexp.MustCompile(`app_id["']?\s*:\s*['"]?([-a-z0-9.:~]+)`)
+
+func getAppID(client *http.Client, url string) (string, error) {
+ // Generate a pseudo-random token for handshaking.
+ token := strconv.Itoa(rand.New(rand.NewSource(time.Now().UnixNano())).Int())
+
+ resp, err := client.Get(fmt.Sprintf("%s?rtok=%s", url, token))
+ if err != nil {
+ return "", err
+ }
+ defer resp.Body.Close()
+
+ body, err := ioutil.ReadAll(resp.Body)
+ if resp.StatusCode != http.StatusOK {
+ return "", fmt.Errorf("bad response %d; body: %q", resp.StatusCode, body)
+ }
+ if err != nil {
+ return "", fmt.Errorf("failed reading response: %v", err)
+ }
+
+ // Check the token is present in response.
+ if !bytes.Contains(body, []byte(token)) {
+ return "", fmt.Errorf("token not found: want %q; body %q", token, body)
+ }
+
+ match := appIDRE.FindSubmatch(body)
+ if match == nil {
+ return "", fmt.Errorf("app ID not found: body %q", body)
+ }
+
+ return string(match[1]), nil
+}
+
+type headerAddingRoundTripper struct {
+ Wrapped http.RoundTripper
+}
+
+func (t *headerAddingRoundTripper) RoundTrip(r *http.Request) (*http.Response, error) {
+ r.Header.Set("X-Appcfg-Api-Version", "1")
+ return t.Wrapped.RoundTrip(r)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client_test.go b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client_test.go
new file mode 100644
index 000000000000..4a49cbe81fcc
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/client_test.go
@@ -0,0 +1,20 @@
+package remote_api
+
+import (
+ "testing"
+)
+
+func TestAppIDRE(t *testing.T) {
+ appID := "s~my-appid-539"
+ tests := []string{
+ "{rtok: 8306111115908860449, app_id: s~my-appid-539}\n",
+ "{rtok: 8306111115908860449, app_id: 's~my-appid-539'}\n",
+ `{rtok: 8306111115908860449, app_id: "s~my-appid-539"}`,
+ `{rtok: 8306111115908860449, "app_id":"s~my-appid-539"}`,
+ }
+ for _, v := range tests {
+ if g := appIDRE.FindStringSubmatch(v); g == nil || g[1] != appID {
+ t.Errorf("appIDRE.FindStringSubmatch(%s) got %q, want %q", v, g, appID)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/remote_api/remote_api.go b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/remote_api.go
new file mode 100644
index 000000000000..68c13e8f8f7d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/remote_api/remote_api.go
@@ -0,0 +1,142 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package remote_api implements the /_ah/remote_api endpoint.
+This endpoint is used by offline tools such as the bulk loader.
+*/
+package remote_api // import "google.golang.org/appengine/remote_api"
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "strconv"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/remote_api"
+ "google.golang.org/appengine/user"
+)
+
+func init() {
+ http.HandleFunc("/_ah/remote_api", handle)
+}
+
+func handle(w http.ResponseWriter, req *http.Request) {
+ c := appengine.NewContext(req)
+
+ u := user.Current(c)
+ if u == nil {
+ u, _ = user.CurrentOAuth(c, "")
+ }
+
+ if u == nil || !u.Admin {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusUnauthorized)
+ io.WriteString(w, "You must be logged in as an administrator to access this.\n")
+ return
+ }
+ if req.Header.Get("X-Appcfg-Api-Version") == "" {
+ w.Header().Set("Content-Type", "text/plain; charset=utf-8")
+ w.WriteHeader(http.StatusForbidden)
+ io.WriteString(w, "This request did not contain a necessary header.\n")
+ return
+ }
+
+ if req.Method != "POST" {
+ // Response must be YAML.
+ rtok := req.FormValue("rtok")
+ if rtok == "" {
+ rtok = "0"
+ }
+ w.Header().Set("Content-Type", "text/yaml; charset=utf-8")
+ fmt.Fprintf(w, `{app_id: %q, rtok: %q}`, c.FullyQualifiedAppID(), rtok)
+ return
+ }
+
+ defer req.Body.Close()
+ body, err := ioutil.ReadAll(req.Body)
+ if err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ c.Errorf("Failed reading body: %v", err)
+ return
+ }
+ remReq := &pb.Request{}
+ if err := proto.Unmarshal(body, remReq); err != nil {
+ w.WriteHeader(http.StatusBadRequest)
+ c.Errorf("Bad body: %v", err)
+ return
+ }
+
+ service, method := *remReq.ServiceName, *remReq.Method
+ if !requestSupported(service, method) {
+ w.WriteHeader(http.StatusBadRequest)
+ c.Errorf("Unsupported RPC /%s.%s", service, method)
+ return
+ }
+
+ rawReq := &rawMessage{remReq.Request}
+ rawRes := &rawMessage{}
+ err = c.Call(service, method, rawReq, rawRes, nil)
+
+ remRes := &pb.Response{}
+ if err == nil {
+ remRes.Response = rawRes.buf
+ } else if ae, ok := err.(*internal.APIError); ok {
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: &ae.Code,
+ Detail: &ae.Detail,
+ }
+ } else {
+ // This shouldn't normally happen.
+ c.Errorf("appengine/remote_api: Unexpected error of type %T: %v", err, err)
+ remRes.ApplicationError = &pb.ApplicationError{
+ Code: proto.Int32(0),
+ Detail: proto.String(err.Error()),
+ }
+ }
+ out, err := proto.Marshal(remRes)
+ if err != nil {
+ // This should not be possible.
+ w.WriteHeader(500)
+ c.Errorf("proto.Marshal: %v", err)
+ return
+ }
+
+ c.Infof("Spooling %d bytes of response to /%s.%s", len(out), service, method)
+ w.Header().Set("Content-Type", "application/octet-stream")
+ w.Header().Set("Content-Length", strconv.Itoa(len(out)))
+ w.Write(out)
+}
+
+// rawMessage is a protocol buffer type that is already serialised.
+// This allows the remote_api code here to handle messages
+// without having to know the real type.
+type rawMessage struct {
+ buf []byte
+}
+
+func (rm *rawMessage) Marshal() ([]byte, error) {
+ return rm.buf, nil
+}
+
+func (rm *rawMessage) Unmarshal(buf []byte) error {
+ rm.buf = make([]byte, len(buf))
+ copy(rm.buf, buf)
+ return nil
+}
+
+func requestSupported(service, method string) bool {
+ // Only allow datastore_v3 for now, or AllocateIds for datastore_v4.
+ return service == "datastore_v3" || (service == "datastore_v4" && method == "AllocateIds")
+}
+
+// Methods to satisfy proto.Message.
+func (rm *rawMessage) Reset() { rm.buf = nil }
+func (rm *rawMessage) String() string { return strconv.Quote(string(rm.buf)) }
+func (*rawMessage) ProtoMessage() {}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/search/field.go b/Godeps/_workspace/src/google.golang.org/appengine/search/field.go
new file mode 100644
index 000000000000..b4c31c0c9a60
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/search/field.go
@@ -0,0 +1,144 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "fmt"
+ "reflect"
+)
+
+// Field is a name/value pair. A search index's document can be loaded and
+// saved as a sequence of Fields.
+type Field struct {
+ // Name is the field name.
+ Name string
+ // Value is the field value. The valid types are:
+ // - string,
+ // - search.Atom,
+ // - search.HTML,
+ // - time.Time (stored with millisecond precision),
+ // - float64,
+ // - GeoPoint.
+ Value interface{}
+ // Language is a two-letter ISO 693-1 code for the field's language,
+ // defaulting to "en" if nothing is specified. It may only be specified for
+ // fields of type string and search.HTML.
+ Language string
+ // Derived marks fields that were calculated as a result of a
+ // FieldExpression provided to Search. This field is ignored when saving a
+ // document.
+ Derived bool
+}
+
+// DocumentMetadata is a struct containing information describing a given document.
+type DocumentMetadata struct {
+ // Rank is an integer specifying the order the document will be returned in
+ // search results. If zero, the rank will be set to the number of seconds since
+ // 2011-01-01 00:00:00 UTC when being Put into an index.
+ Rank int
+}
+
+// FieldLoadSaver can be converted from and to a slice of Fields
+// with additional document metadata.
+type FieldLoadSaver interface {
+ Load([]Field, *DocumentMetadata) error
+ Save() ([]Field, *DocumentMetadata, error)
+}
+
+// FieldList converts a []Field to implement FieldLoadSaver.
+type FieldList []Field
+
+// Load loads all of the provided fields into l.
+// It does not first reset *l to an empty slice.
+func (l *FieldList) Load(f []Field, _ *DocumentMetadata) error {
+ *l = append(*l, f...)
+ return nil
+}
+
+// Save returns all of l's fields as a slice of Fields.
+func (l *FieldList) Save() ([]Field, *DocumentMetadata, error) {
+ return *l, nil, nil
+}
+
+var _ FieldLoadSaver = (*FieldList)(nil)
+
+// structFLS adapts a struct to be a FieldLoadSaver.
+type structFLS struct {
+ reflect.Value
+}
+
+func (s structFLS) Load(fields []Field, _ *DocumentMetadata) (err error) {
+ for _, field := range fields {
+ f := s.FieldByName(field.Name)
+ if !f.IsValid() {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "no such struct field",
+ }
+ continue
+ }
+ if !f.CanSet() {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: "cannot set struct field",
+ }
+ continue
+ }
+ v := reflect.ValueOf(field.Value)
+ if ft, vt := f.Type(), v.Type(); ft != vt {
+ err = &ErrFieldMismatch{
+ FieldName: field.Name,
+ Reason: fmt.Sprintf("type mismatch: %v for %v data", ft, vt),
+ }
+ continue
+ }
+ f.Set(v)
+ }
+ return err
+}
+
+func (s structFLS) Save() ([]Field, *DocumentMetadata, error) {
+ fields := make([]Field, 0, s.NumField())
+ for i := 0; i < s.NumField(); i++ {
+ f := s.Field(i)
+ if !f.CanSet() {
+ continue
+ }
+ fields = append(fields, Field{
+ Name: s.Type().Field(i).Name,
+ Value: f.Interface(),
+ })
+ }
+ return fields, nil, nil
+}
+
+// newStructFLS returns a FieldLoadSaver for the struct pointer p.
+func newStructFLS(p interface{}) (FieldLoadSaver, error) {
+ v := reflect.ValueOf(p)
+ if v.Kind() != reflect.Ptr || v.IsNil() || v.Elem().Kind() != reflect.Struct {
+ return nil, ErrInvalidDocumentType
+ }
+ return structFLS{v.Elem()}, nil
+}
+
+// LoadStruct loads the fields from f to dst. dst must be a struct pointer.
+func LoadStruct(dst interface{}, f []Field) error {
+ x, err := newStructFLS(dst)
+ if err != nil {
+ return err
+ }
+ return x.Load(f, nil)
+}
+
+// SaveStruct returns the fields from src as a slice of Field.
+// src must be a struct pointer.
+func SaveStruct(src interface{}) ([]Field, error) {
+ x, err := newStructFLS(src)
+ if err != nil {
+ return nil, err
+ }
+ fs, _, err := x.Save()
+ return fs, err
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/search/search.go b/Godeps/_workspace/src/google.golang.org/appengine/search/search.go
new file mode 100644
index 000000000000..2eefd41a58e9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/search/search.go
@@ -0,0 +1,853 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package search provides a client for App Engine's search service.
+
+Indexes contains documents, and a document's contents are a mapping from case-
+sensitive field names to values. In Go, documents are represented by struct
+pointers, and the valid types for a struct's fields are:
+ - string,
+ - search.Atom,
+ - search.HTML,
+ - time.Time (stored with millisecond precision),
+ - float64 (value between -2,147,483,647 and 2,147,483,647 inclusive),
+ - appengine.GeoPoint.
+
+Documents can also be represented by any type implementing the FieldLoadSaver
+interface.
+
+Example code:
+
+ type Doc struct {
+ Author string
+ Comment string
+ Creation time.Time
+ }
+
+ index, err := search.Open("comments")
+ if err != nil {
+ return err
+ }
+ newID, err := index.Put(c, "", &Doc{
+ Author: "gopher",
+ Comment: "the truth of the matter",
+ Creation: time.Now(),
+ })
+ if err != nil {
+ return err
+ }
+
+Searching an index for a query will result in an iterator. As with an iterator
+from package datastore, pass a destination struct to Next to decode the next
+result. Next will return Done when the iterator is exhausted.
+
+ for t := index.Search(c, "Comment:truth", nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+Call List to iterate over documents.
+
+ for t := index.List(c, nil); ; {
+ var doc Doc
+ id, err := t.Next(&doc)
+ if err == search.Done {
+ break
+ }
+ if err != nil {
+ return err
+ }
+ fmt.Fprintf(w, "%s -> %#v\n", id, doc)
+ }
+
+A single document can also be retrieved by its ID. Pass a destination struct
+to Get to hold the resulting document.
+
+ var doc Doc
+ err := index.Get(c, id, &doc)
+ if err != nil {
+ return err
+ }
+
+Queries are expressed as strings, plus some optional parameters. The query
+language is described at
+https://cloud.google.com/appengine/docs/go/search/query_strings
+
+Note that in Go, field names come from the struct field definition and begin
+with an upper case letter.
+*/
+package search // import "google.golang.org/appengine/search"
+
+// TODO: let Put specify the document language: "en", "fr", etc. Also: order_id?? storage??
+// TODO: Index.GetAll (or Iterator.GetAll)?
+// TODO: struct <-> protobuf tests.
+// TODO: enforce Python's MIN_NUMBER_VALUE and MIN_DATE (which would disallow a zero
+// time.Time)? _MAXIMUM_STRING_LENGTH?
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "regexp"
+ "strconv"
+ "strings"
+ "time"
+ "unicode/utf8"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+var (
+ // ErrInvalidDocumentType is returned when methods like Put, Get or Next
+ // are passed a dst or src argument of invalid type.
+ ErrInvalidDocumentType = errors.New("search: invalid document type")
+
+ // ErrNoSuchDocument is returned when no document was found for a given ID.
+ ErrNoSuchDocument = errors.New("search: no such document")
+)
+
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// than the one it was stored from, or when a field is missing or unexported in
+// the destination struct.
+type ErrFieldMismatch struct {
+ FieldName string
+ Reason string
+}
+
+func (e *ErrFieldMismatch) Error() string {
+ return fmt.Sprintf("search: cannot load field %q: %s", e.FieldName, e.Reason)
+}
+
+// Atom is a document field whose contents are indexed as a single indivisible
+// string.
+type Atom string
+
+// HTML is a document field whose contents are indexed as HTML. Only text nodes
+// are indexed: "foobar" will be treated as "foobar".
+type HTML string
+
+// validIndexNameOrDocID is the Go equivalent of Python's
+// _ValidateVisiblePrintableAsciiNotReserved.
+func validIndexNameOrDocID(s string) bool {
+ if strings.HasPrefix(s, "!") {
+ return false
+ }
+ for _, c := range s {
+ if c < 0x21 || 0x7f <= c {
+ return false
+ }
+ }
+ return true
+}
+
+var (
+ fieldNameRE = regexp.MustCompile(`^[A-Z][A-Za-z0-9_]*$`)
+ languageRE = regexp.MustCompile(`^[a-z]{2}$`)
+)
+
+// validFieldName is the Go equivalent of Python's _CheckFieldName.
+func validFieldName(s string) bool {
+ return len(s) <= 500 && fieldNameRE.MatchString(s)
+}
+
+// validDocRank checks that the ranks is in the range [0, 2^31).
+func validDocRank(r int) bool {
+ return 0 <= r && r <= (1<<31-1)
+}
+
+// validLanguage checks that a language looks like ISO 639-1.
+func validLanguage(s string) bool {
+ return languageRE.MatchString(s)
+}
+
+// validFloat checks that f is in the range [-2147483647, 2147483647].
+func validFloat(f float64) bool {
+ return -(1<<31-1) <= f && f <= (1<<31-1)
+}
+
+// Index is an index of documents.
+type Index struct {
+ spec pb.IndexSpec
+}
+
+// orderIDEpoch forms the basis for populating OrderId on documents.
+var orderIDEpoch = time.Date(2011, 1, 1, 0, 0, 0, 0, time.UTC)
+
+// Open opens the index with the given name. The index is created if it does
+// not already exist.
+//
+// The name is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+func Open(name string) (*Index, error) {
+ if !validIndexNameOrDocID(name) {
+ return nil, fmt.Errorf("search: invalid index name %q", name)
+ }
+ return &Index{
+ spec: pb.IndexSpec{
+ Name: &name,
+ },
+ }, nil
+}
+
+// Put saves src to the index. If id is empty, a new ID is allocated by the
+// service and returned. If id is not empty, any existing index entry for that
+// ID is replaced.
+//
+// The ID is a human-readable ASCII string. It must contain no whitespace
+// characters and not start with "!".
+//
+// src must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+func (x *Index) Put(c appengine.Context, id string, src interface{}) (string, error) {
+ fields, meta, err := saveDoc(src)
+ if err != nil {
+ return "", err
+ }
+ d := &pb.Document{
+ Field: fields,
+ OrderId: proto.Int32(int32(time.Since(orderIDEpoch).Seconds())),
+ }
+ if meta != nil {
+ if meta.Rank != 0 {
+ if !validDocRank(meta.Rank) {
+ return "", fmt.Errorf("search: invalid rank %d, must be [0, 2^31)", meta.Rank)
+ }
+ *d.OrderId = int32(meta.Rank)
+ }
+ }
+ if id != "" {
+ if !validIndexNameOrDocID(id) {
+ return "", fmt.Errorf("search: invalid ID %q", id)
+ }
+ d.Id = proto.String(id)
+ }
+ req := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{d},
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.IndexDocumentResponse{}
+ if err := c.Call("search", "IndexDocument", req, res, nil); err != nil {
+ return "", err
+ }
+ if len(res.Status) > 0 {
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return "", fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ }
+ if len(res.Status) != 1 || len(res.DocId) != 1 {
+ return "", fmt.Errorf("search: internal error: wrong number of results (%d Statuses, %d DocIDs)",
+ len(res.Status), len(res.DocId))
+ }
+ return res.DocId[0], nil
+}
+
+// Get loads the document with the given ID into dst.
+//
+// The ID is a human-readable ASCII string. It must be non-empty, contain no
+// whitespace characters and not start with "!".
+//
+// dst must be a non-nil struct pointer or implement the FieldLoadSaver
+// interface.
+//
+// ErrFieldMismatch is returned when a field is to be loaded into a different
+// type than the one it was stored from, or when a field is missing or
+// unexported in the destination struct. ErrFieldMismatch is only returned if
+// dst is a struct pointer. It is up to the callee to decide whether this error
+// is fatal, recoverable, or ignorable.
+func (x *Index) Get(c appengine.Context, id string, dst interface{}) error {
+ if id == "" || !validIndexNameOrDocID(id) {
+ return fmt.Errorf("search: invalid ID %q", id)
+ }
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &x.spec,
+ StartDocId: proto.String(id),
+ Limit: proto.Int32(1),
+ },
+ }
+ res := &pb.ListDocumentsResponse{}
+ if err := c.Call("search", "ListDocuments", req, res, nil); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ if len(res.Document) != 1 || res.Document[0].GetId() != id {
+ return ErrNoSuchDocument
+ }
+ metadata := &DocumentMetadata{
+ Rank: int(res.Document[0].GetOrderId()),
+ }
+ return loadDoc(dst, res.Document[0].Field, nil, metadata)
+}
+
+// Delete deletes a document from the index.
+func (x *Index) Delete(c appengine.Context, id string) error {
+ req := &pb.DeleteDocumentRequest{
+ Params: &pb.DeleteDocumentParams{
+ DocId: []string{id},
+ IndexSpec: &x.spec,
+ },
+ }
+ res := &pb.DeleteDocumentResponse{}
+ if err := c.Call("search", "DeleteDocument", req, res, nil); err != nil {
+ return err
+ }
+ if len(res.Status) != 1 {
+ return fmt.Errorf("search: internal error: wrong number of results (%d)", len(res.Status))
+ }
+ if s := res.Status[0]; s.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", s.GetCode(), s.GetErrorDetail())
+ }
+ return nil
+}
+
+// List lists all of the documents in an index. The documents are returned in
+// increasing ID order.
+func (x *Index) List(c appengine.Context, opts *ListOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ count: -1,
+ listInclusive: true,
+ more: moreList,
+ limit: -1,
+ }
+ if opts != nil {
+ t.listStartID = opts.StartID
+ if opts.Limit > 0 {
+ t.limit = opts.Limit
+ }
+ t.idsOnly = opts.IDsOnly
+ }
+ return t
+}
+
+func moreList(t *Iterator) error {
+ req := &pb.ListDocumentsRequest{
+ Params: &pb.ListDocumentsParams{
+ IndexSpec: &t.index.spec,
+ },
+ }
+ if t.listStartID != "" {
+ req.Params.StartDocId = &t.listStartID
+ req.Params.IncludeStartDoc = &t.listInclusive
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+
+ res := &pb.ListDocumentsResponse{}
+ if err := t.c.Call("search", "ListDocuments", req, res, nil); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.listRes = res.Document
+ t.listStartID, t.listInclusive, t.more = "", false, nil
+ if len(res.Document) != 0 {
+ if id := res.Document[len(res.Document)-1].GetId(); id != "" {
+ t.listStartID, t.more = id, moreList
+ }
+ }
+ return nil
+}
+
+// ListOptions are the options for listing documents in an index. Passing a nil
+// *ListOptions is equivalent to using the default values.
+type ListOptions struct {
+ // StartID is the inclusive lower bound for the ID of the returned
+ // documents. The zero value means all documents will be returned.
+ StartID string
+
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the list
+ // operation; no document fields are populated.
+ IDsOnly bool
+}
+
+// Search searches the index for the given query.
+func (x *Index) Search(c appengine.Context, query string, opts *SearchOptions) *Iterator {
+ t := &Iterator{
+ c: c,
+ index: x,
+ searchQuery: query,
+ more: moreSearch,
+ limit: -1,
+ }
+ if opts != nil {
+ if opts.Limit > 0 {
+ t.limit = opts.Limit
+ }
+ t.fields = opts.Fields
+ t.idsOnly = opts.IDsOnly
+ t.sort = opts.Sort
+ t.exprs = opts.Expressions
+ }
+ return t
+}
+
+func moreSearch(t *Iterator) error {
+ req := &pb.SearchRequest{
+ Params: &pb.SearchParams{
+ IndexSpec: &t.index.spec,
+ Query: &t.searchQuery,
+ CursorType: pb.SearchParams_SINGLE.Enum(),
+ FieldSpec: &pb.FieldSpec{
+ Name: t.fields,
+ },
+ },
+ }
+ if t.limit > 0 {
+ req.Params.Limit = proto.Int32(int32(t.limit))
+ }
+ if t.idsOnly {
+ req.Params.KeysOnly = &t.idsOnly
+ }
+ if t.sort != nil {
+ if err := sortToProto(t.sort, req.Params); err != nil {
+ return err
+ }
+ }
+ for _, e := range t.exprs {
+ req.Params.FieldSpec.Expression = append(req.Params.FieldSpec.Expression, &pb.FieldSpec_Expression{
+ Name: proto.String(e.Name),
+ Expression: proto.String(e.Expr),
+ })
+ }
+
+ if t.searchCursor != nil {
+ req.Params.Cursor = t.searchCursor
+ }
+ res := &pb.SearchResponse{}
+ if err := t.c.Call("search", "Search", req, res, nil); err != nil {
+ return err
+ }
+ if res.Status == nil || res.Status.GetCode() != pb.SearchServiceError_OK {
+ return fmt.Errorf("search: %s: %s", res.Status.GetCode(), res.Status.GetErrorDetail())
+ }
+ t.searchRes = res.Result
+ t.count = int(*res.MatchedCount)
+ if res.Cursor != nil {
+ t.searchCursor, t.more = res.Cursor, moreSearch
+ } else {
+ t.searchCursor, t.more = nil, nil
+ }
+ return nil
+}
+
+// SearchOptions are the options for searching an index. Passing a nil
+// *SearchOptions is equivalent to using the default values.
+type SearchOptions struct {
+ // Limit is the maximum number of documents to return. The zero value
+ // indicates no limit.
+ Limit int
+
+ // IDsOnly indicates that only document IDs should be returned for the search
+ // operation; no document fields are populated.
+ IDsOnly bool
+
+ // Sort controls the ordering of search results.
+ Sort *SortOptions
+
+ // Fields specifies which document fields to include in the results. If omitted,
+ // all document fields are returned. No more than 100 fields may be specified.
+ Fields []string
+
+ // Expressions specifies additional computed fields to add to each returned
+ // document.
+ Expressions []FieldExpression
+
+ // TODO: cursor, offset, maybe others.
+}
+
+// FieldExpression defines a custom expression to evaluate for each result.
+type FieldExpression struct {
+ // Name is the name to use for the computed field.
+ Name string
+
+ // Expr is evaluated to provide a custom content snippet for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+}
+
+// SortOptions control the ordering and scoring of search results.
+type SortOptions struct {
+ // Expressions is a slice of expressions representing a multi-dimensional
+ // sort.
+ Expressions []SortExpression
+
+ // Scorer, when specified, will cause the documents to be scored according to
+ // search term frequency.
+ Scorer Scorer
+
+ // Limit is the maximum number of objects to score and/or sort. Limit cannot
+ // be more than 10,000. The zero value indicates a default limit.
+ Limit int
+}
+
+// SortExpression defines a single dimension for sorting a document.
+type SortExpression struct {
+ // Expr is evaluated to provide a sorting value for each document.
+ // See https://cloud.google.com/appengine/docs/go/search/options for
+ // the supported expression syntax.
+ Expr string
+
+ // Reverse causes the documents to be sorted in ascending order.
+ Reverse bool
+
+ // The default value to use when no field is present or the expresion
+ // cannot be calculated for a document. For text sorts, Default must
+ // be of type string; for numeric sorts, float64.
+ Default interface{}
+}
+
+// A Scorer defines how a document is scored.
+type Scorer interface {
+ toProto(*pb.ScorerSpec)
+}
+
+type enumScorer struct {
+ enum pb.ScorerSpec_Scorer
+}
+
+func (e enumScorer) toProto(spec *pb.ScorerSpec) {
+ spec.Scorer = e.enum.Enum()
+}
+
+var (
+ // MatchScorer assigns a score based on term frequency in a document.
+ MatchScorer Scorer = enumScorer{pb.ScorerSpec_MATCH_SCORER}
+
+ // RescoringMatchScorer assigns a score based on the quality of the query
+ // match. It is similar to a MatchScorer but uses a more complex scoring
+ // algorithm based on match term frequency and other factors like field type.
+ // Please be aware that this algorithm is continually refined and can change
+ // over time without notice. This means that the ordering of search results
+ // that use this scorer can also change without notice.
+ RescoringMatchScorer Scorer = enumScorer{pb.ScorerSpec_RESCORING_MATCH_SCORER}
+)
+
+func sortToProto(sort *SortOptions, params *pb.SearchParams) error {
+ for _, e := range sort.Expressions {
+ spec := &pb.SortSpec{
+ SortExpression: proto.String(e.Expr),
+ }
+ if e.Reverse {
+ spec.SortDescending = proto.Bool(false)
+ }
+ if e.Default != nil {
+ switch d := e.Default.(type) {
+ case float64:
+ spec.DefaultValueNumeric = &d
+ case string:
+ spec.DefaultValueText = &d
+ default:
+ return fmt.Errorf("search: invalid Default type %T for expression %q", d, e.Expr)
+ }
+ }
+ params.SortSpec = append(params.SortSpec, spec)
+ }
+
+ spec := &pb.ScorerSpec{}
+ if sort.Limit > 0 {
+ spec.Limit = proto.Int32(int32(sort.Limit))
+ params.ScorerSpec = spec
+ }
+ if sort.Scorer != nil {
+ sort.Scorer.toProto(spec)
+ params.ScorerSpec = spec
+ }
+
+ return nil
+}
+
+// Iterator is the result of searching an index for a query or listing an
+// index.
+type Iterator struct {
+ c appengine.Context
+ index *Index
+ err error
+
+ listRes []*pb.Document
+ listStartID string
+ listInclusive bool
+
+ searchRes []*pb.SearchResult
+ searchQuery string
+ searchCursor *string
+ sort *SortOptions
+
+ fields []string
+ exprs []FieldExpression
+
+ more func(*Iterator) error
+
+ count int
+ limit int // items left to return; -1 for unlimited.
+ idsOnly bool
+}
+
+// Done is returned when a query iteration has completed.
+var Done = errors.New("search: query has no more results")
+
+// Count returns an approximation of the number of documents matched by the
+// query. It is only valid to call for iterators returned by Search.
+func (t *Iterator) Count() int { return t.count }
+
+// Next returns the ID of the next result. When there are no more results,
+// Done is returned as the error.
+//
+// dst must be a non-nil struct pointer, implement the FieldLoadSaver
+// interface, or be a nil interface value. If a non-nil dst is provided, it
+// will be filled with the indexed fields. dst is ignored if this iterator was
+// created with an IDsOnly option.
+func (t *Iterator) Next(dst interface{}) (string, error) {
+ if t.err == nil && len(t.listRes)+len(t.searchRes) == 0 && t.more != nil {
+ t.err = t.more(t)
+ }
+ if t.err != nil {
+ return "", t.err
+ }
+
+ var doc *pb.Document
+ var exprs []*pb.Field
+ switch {
+ case len(t.listRes) != 0:
+ doc = t.listRes[0]
+ t.listRes = t.listRes[1:]
+ case len(t.searchRes) != 0:
+ doc = t.searchRes[0].Document
+ exprs = t.searchRes[0].Expression
+ t.searchRes = t.searchRes[1:]
+ default:
+ return "", Done
+ }
+ if doc == nil {
+ return "", errors.New("search: internal error: no document returned")
+ }
+ if !t.idsOnly && dst != nil {
+ metadata := &DocumentMetadata{
+ Rank: int(doc.GetOrderId()),
+ }
+ if err := loadDoc(dst, doc.Field, exprs, metadata); err != nil {
+ return "", err
+ }
+ }
+ if t.limit > 0 {
+ t.limit--
+ if t.limit == 0 {
+ t.more = nil // prevent further fetches
+ }
+ }
+ return doc.GetId(), nil
+}
+
+// saveDoc converts from a struct pointer or FieldLoadSaver to protobufs.
+func saveDoc(src interface{}) ([]*pb.Field, *DocumentMetadata, error) {
+ var err error
+ var fields []Field
+ var meta *DocumentMetadata
+ switch x := src.(type) {
+ case FieldLoadSaver:
+ fields, meta, err = x.Save()
+ default:
+ fields, err = SaveStruct(src)
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ f, err := fieldsToProto(fields)
+ return f, meta, err
+}
+
+func fieldsToProto(src []Field) ([]*pb.Field, error) {
+ // Maps to catch duplicate time or numeric fields.
+ timeFields, numericFields := make(map[string]bool), make(map[string]bool)
+ dst := make([]*pb.Field, 0, len(src))
+ for _, f := range src {
+ if !validFieldName(f.Name) {
+ return nil, fmt.Errorf("search: invalid field name %q", f.Name)
+ }
+ fieldValue := &pb.FieldValue{}
+ switch x := f.Value.(type) {
+ case string:
+ fieldValue.Type = pb.FieldValue_TEXT.Enum()
+ fieldValue.StringValue = proto.String(x)
+ case Atom:
+ fieldValue.Type = pb.FieldValue_ATOM.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case HTML:
+ fieldValue.Type = pb.FieldValue_HTML.Enum()
+ fieldValue.StringValue = proto.String(string(x))
+ case time.Time:
+ if timeFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate time field %q", f.Name)
+ }
+ timeFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_DATE.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatInt(x.UnixNano()/1e6, 10))
+ case float64:
+ if numericFields[f.Name] {
+ return nil, fmt.Errorf("search: duplicate numeric field %q", f.Name)
+ }
+ if !validFloat(x) {
+ return nil, fmt.Errorf("search: numeric field %q with invalid value %f", f.Name, x)
+ }
+ numericFields[f.Name] = true
+ fieldValue.Type = pb.FieldValue_NUMBER.Enum()
+ fieldValue.StringValue = proto.String(strconv.FormatFloat(x, 'e', -1, 64))
+ case appengine.GeoPoint:
+ if !x.Valid() {
+ return nil, fmt.Errorf(
+ "search: GeoPoint field %q with invalid value %v",
+ f.Name, x)
+ }
+ fieldValue.Type = pb.FieldValue_GEO.Enum()
+ fieldValue.Geo = &pb.FieldValue_Geo{
+ Lat: proto.Float64(x.Lat),
+ Lng: proto.Float64(x.Lng),
+ }
+ default:
+ return nil, fmt.Errorf("search: unsupported field type: %v", reflect.TypeOf(f.Value))
+ }
+ if f.Language != "" {
+ switch f.Value.(type) {
+ case string, HTML:
+ if !validLanguage(f.Language) {
+ return nil, fmt.Errorf("search: invalid language for field %q: %q", f.Name, f.Language)
+ }
+ fieldValue.Language = proto.String(f.Language)
+ default:
+ return nil, fmt.Errorf("search: setting language not supported for field %q of type %T", f.Name, f.Value)
+ }
+ }
+ if p := fieldValue.StringValue; p != nil && !utf8.ValidString(*p) {
+ return nil, fmt.Errorf("search: %q field is invalid UTF-8: %q", f.Name, *p)
+ }
+ dst = append(dst, &pb.Field{
+ Name: proto.String(f.Name),
+ Value: fieldValue,
+ })
+ }
+ return dst, nil
+}
+
+// loadDoc converts from protobufs and document metadata to a struct pointer or
+// FieldLoadSaver/FieldMetadataLoadSaver. Two slices of fields may be provided:
+// src represents the document's stored fields; exprs is the derived expressions
+// requested by the developer. The latter may be empty.
+func loadDoc(dst interface{}, src, exprs []*pb.Field, meta *DocumentMetadata) (err error) {
+ fields, err := protoToFields(src)
+ if err != nil {
+ return err
+ }
+ if len(exprs) > 0 {
+ exprFields, err := protoToFields(exprs)
+ if err != nil {
+ return err
+ }
+ // Mark each field as derived.
+ for i := range exprFields {
+ exprFields[i].Derived = true
+ }
+ fields = append(fields, exprFields...)
+ }
+ switch x := dst.(type) {
+ case FieldLoadSaver:
+ return x.Load(fields, meta)
+ default:
+ return LoadStruct(dst, fields)
+ }
+}
+
+func protoToFields(fields []*pb.Field) ([]Field, error) {
+ dst := make([]Field, 0, len(fields))
+ for _, field := range fields {
+ fieldValue := field.GetValue()
+ f := Field{
+ Name: field.GetName(),
+ }
+ switch fieldValue.GetType() {
+ case pb.FieldValue_TEXT:
+ f.Value = fieldValue.GetStringValue()
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_ATOM:
+ f.Value = Atom(fieldValue.GetStringValue())
+ case pb.FieldValue_HTML:
+ f.Value = HTML(fieldValue.GetStringValue())
+ f.Language = fieldValue.GetLanguage()
+ case pb.FieldValue_DATE:
+ sv := fieldValue.GetStringValue()
+ millis, err := strconv.ParseInt(sv, 10, 64)
+ if err != nil {
+ return nil, fmt.Errorf("search: internal error: bad time.Time encoding %q: %v", sv, err)
+ }
+ f.Value = time.Unix(0, millis*1e6)
+ case pb.FieldValue_NUMBER:
+ sv := fieldValue.GetStringValue()
+ x, err := strconv.ParseFloat(sv, 64)
+ if err != nil {
+ return nil, err
+ }
+ f.Value = x
+ case pb.FieldValue_GEO:
+ geoValue := fieldValue.GetGeo()
+ geoPoint := appengine.GeoPoint{geoValue.GetLat(), geoValue.GetLng()}
+ if !geoPoint.Valid() {
+ return nil, fmt.Errorf("search: internal error: invalid GeoPoint encoding: %v", geoPoint)
+ }
+ f.Value = geoPoint
+ default:
+ return nil, fmt.Errorf("search: internal error: unknown data type %s", fieldValue.GetType())
+ }
+ dst = append(dst, f)
+ }
+ return dst, nil
+}
+
+func namespaceMod(m proto.Message, namespace string) {
+ set := func(s **string) {
+ if *s == nil {
+ *s = &namespace
+ }
+ }
+ switch m := m.(type) {
+ case *pb.IndexDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.ListDocumentsRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.DeleteDocumentRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ case *pb.SearchRequest:
+ set(&m.Params.IndexSpec.Namespace)
+ }
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("search", pb.SearchServiceError_ErrorCode_name)
+ internal.NamespaceMods["search"] = namespaceMod
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/search/search_test.go b/Godeps/_workspace/src/google.golang.org/appengine/search/search_test.go
new file mode 100644
index 000000000000..6178c517bcf8
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/search/search_test.go
@@ -0,0 +1,650 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package search
+
+import (
+ "errors"
+ "fmt"
+ "reflect"
+ "strings"
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/search"
+)
+
+type TestDoc struct {
+ String string
+ Atom Atom
+ HTML HTML
+ Float float64
+ Location appengine.GeoPoint
+ Time time.Time
+}
+
+type FieldListWithMeta struct {
+ Fields FieldList
+ Meta *DocumentMetadata
+}
+
+func (f *FieldListWithMeta) Load(fields []Field, meta *DocumentMetadata) error {
+ f.Meta = meta
+ return f.Fields.Load(fields, nil)
+}
+
+func (f *FieldListWithMeta) Save() ([]Field, *DocumentMetadata, error) {
+ fields, _, err := f.Fields.Save()
+ return fields, f.Meta, err
+}
+
+// Assert that FieldListWithMeta satisfies FieldLoadSaver
+var _ FieldLoadSaver = &FieldListWithMeta{}
+
+var (
+ float = 3.14159
+ floatOut = "3.14159e+00"
+ latitude = 37.3894
+ longitude = 122.0819
+ testGeo = appengine.GeoPoint{latitude, longitude}
+ testString = "foobar"
+ testTime = time.Unix(1337324400, 0)
+ testTimeOut = "1337324400000"
+ searchMeta = &DocumentMetadata{
+ Rank: 42,
+ }
+ searchDoc = TestDoc{
+ String: testString,
+ Atom: Atom(testString),
+ HTML: HTML(testString),
+ Float: float,
+ Location: testGeo,
+ Time: testTime,
+ }
+ searchFields = FieldList{
+ Field{Name: "String", Value: testString},
+ Field{Name: "Atom", Value: Atom(testString)},
+ Field{Name: "HTML", Value: HTML(testString)},
+ Field{Name: "Float", Value: float},
+ Field{Name: "Location", Value: testGeo},
+ Field{Name: "Time", Value: testTime},
+ }
+ // searchFieldsWithLang is a copy of the searchFields with the Language field
+ // set on text/HTML Fields.
+ searchFieldsWithLang = FieldList{}
+ protoFields = []*pb.Field{
+ newStringValueField("String", testString, pb.FieldValue_TEXT),
+ newStringValueField("Atom", testString, pb.FieldValue_ATOM),
+ newStringValueField("HTML", testString, pb.FieldValue_HTML),
+ newStringValueField("Float", floatOut, pb.FieldValue_NUMBER),
+ {
+ Name: proto.String("Location"),
+ Value: &pb.FieldValue{
+ Geo: &pb.FieldValue_Geo{
+ Lat: proto.Float64(latitude),
+ Lng: proto.Float64(longitude),
+ },
+ Type: pb.FieldValue_GEO.Enum(),
+ },
+ },
+ newStringValueField("Time", testTimeOut, pb.FieldValue_DATE),
+ }
+)
+
+func init() {
+ for _, f := range searchFields {
+ if f.Name == "String" || f.Name == "HTML" {
+ f.Language = "en"
+ }
+ searchFieldsWithLang = append(searchFieldsWithLang, f)
+ }
+}
+
+func newStringValueField(name, value string, valueType pb.FieldValue_ContentType) *pb.Field {
+ return &pb.Field{
+ Name: proto.String(name),
+ Value: &pb.FieldValue{
+ StringValue: proto.String(value),
+ Type: valueType.Enum(),
+ },
+ }
+}
+
+func TestValidIndexNameOrDocID(t *testing.T) {
+ testCases := []struct {
+ s string
+ want bool
+ }{
+ {"", true},
+ {"!", false},
+ {"$", true},
+ {"!bad", false},
+ {"good!", true},
+ {"alsoGood", true},
+ {"has spaces", false},
+ {"is_inva\xffid_UTF-8", false},
+ {"is_non-ASCïI", false},
+ {"underscores_are_ok", true},
+ }
+ for _, tc := range testCases {
+ if got := validIndexNameOrDocID(tc.s); got != tc.want {
+ t.Errorf("%q: got %v, want %v", tc.s, got, tc.want)
+ }
+ }
+}
+
+func TestLoadDoc(t *testing.T) {
+ got, want := TestDoc{}, searchDoc
+ if err := loadDoc(&got, protoFields, nil, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if got != want {
+ t.Errorf("loadDoc: got %v, wanted %v", got, want)
+ }
+}
+
+func TestSaveDoc(t *testing.T) {
+ got, _, err := saveDoc(&searchDoc)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldList(t *testing.T) {
+ var got FieldList
+ want := searchFieldsWithLang
+ if err := loadDoc(&got, protoFields, nil, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLangFields(t *testing.T) {
+ fl := &FieldList{
+ {Name: "Foo", Value: "I am English", Language: "en"},
+ {Name: "Bar", Value: "私は日本人だ", Language: "jp"},
+ }
+ var got FieldList
+ protoFields, _, err := saveDoc(fl)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ if err := loadDoc(&got, protoFields, nil, nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if want := fl; !reflect.DeepEqual(&got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveFieldList(t *testing.T) {
+ got, _, err := saveDoc(&searchFields)
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ want := protoFields
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadFieldAndExprList(t *testing.T) {
+ var got, want FieldList
+ for i, f := range searchFieldsWithLang {
+ f.Derived = (i >= 2) // First 2 elements are "fields", next are "expressions".
+ want = append(want, f)
+ }
+ if err := loadDoc(&got, protoFields[:2], protoFields[2:], nil); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestLoadMeta(t *testing.T) {
+ var got FieldListWithMeta
+ want := FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFieldsWithLang,
+ }
+ if err := loadDoc(&got, protoFields, nil, searchMeta); err != nil {
+ t.Fatalf("loadDoc: %v", err)
+ }
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("got %v\nwant %v", got, want)
+ }
+}
+
+func TestSaveMeta(t *testing.T) {
+ got, gotMeta, err := saveDoc(&FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatalf("saveDoc: %v", err)
+ }
+ if want := protoFields; !reflect.DeepEqual(got, want) {
+ t.Errorf("\ngot %v\nwant %v", got, want)
+ }
+ if want := searchMeta; !reflect.DeepEqual(gotMeta, want) {
+ t.Errorf("\ngot %v\nwant %v", gotMeta, want)
+ }
+}
+
+func TestValidFieldNames(t *testing.T) {
+ testCases := []struct {
+ name string
+ valid bool
+ }{
+ {"Normal", true},
+ {"Also_OK_123", true},
+ {"Not so great", false},
+ {"lower_case", false},
+ {"Exclaim!", false},
+ {"Hello세상아 안녕", false},
+ {"", false},
+ {"Hεllo", false},
+ {strings.Repeat("A", 500), true},
+ {strings.Repeat("A", 501), false},
+ }
+
+ for _, tc := range testCases {
+ _, _, err := saveDoc(&FieldList{
+ Field{Name: tc.name, Value: "val"},
+ })
+ if err != nil && !strings.Contains(err.Error(), "invalid field name") {
+ t.Errorf("unexpected err %q for field name %q", err, tc.name)
+ }
+ if (err == nil) != tc.valid {
+ t.Errorf("field %q: expected valid %t, received err %v", tc.name, tc.valid, err)
+ }
+ }
+}
+
+func TestValidLangs(t *testing.T) {
+ testCases := []struct {
+ field Field
+ valid bool
+ }{
+ {Field{Name: "Foo", Value: "String", Language: ""}, true},
+ {Field{Name: "Foo", Value: "String", Language: "en"}, true},
+ {Field{Name: "Foo", Value: "String", Language: "aussie"}, false},
+ {Field{Name: "Foo", Value: "String", Language: "12"}, false},
+ {Field{Name: "Foo", Value: HTML("String"), Language: "en"}, true},
+ {Field{Name: "Foo", Value: Atom("String"), Language: "en"}, false},
+ {Field{Name: "Foo", Value: 42, Language: "en"}, false},
+ }
+
+ for _, tt := range testCases {
+ _, _, err := saveDoc(&FieldList{tt.field})
+ if err == nil != tt.valid {
+ t.Errorf("Field %v, got error %v, wanted valid %t", tt.field, err, tt.valid)
+ }
+ }
+}
+
+func TestDuplicateFields(t *testing.T) {
+ testCases := []struct {
+ desc string
+ fields FieldList
+ errMsg string // Non-empty if we expect an error
+ }{
+ {
+ desc: "multi string",
+ fields: FieldList{{Name: "FieldA", Value: "val1"}, {Name: "FieldA", Value: "val2"}, {Name: "FieldA", Value: "val3"}},
+ },
+ {
+ desc: "multi atom",
+ fields: FieldList{{Name: "FieldA", Value: Atom("val1")}, {Name: "FieldA", Value: Atom("val2")}, {Name: "FieldA", Value: Atom("val3")}},
+ },
+ {
+ desc: "mixed",
+ fields: FieldList{{Name: "FieldA", Value: testString}, {Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: float}},
+ },
+ {
+ desc: "multi time",
+ fields: FieldList{{Name: "FieldA", Value: testTime}, {Name: "FieldA", Value: testTime}},
+ errMsg: `duplicate time field "FieldA"`,
+ },
+ {
+ desc: "multi num",
+ fields: FieldList{{Name: "FieldA", Value: float}, {Name: "FieldA", Value: float}},
+ errMsg: `duplicate numeric field "FieldA"`,
+ },
+ }
+ for _, tc := range testCases {
+ _, _, err := saveDoc(&tc.fields)
+ if (err == nil) != (tc.errMsg == "") || (err != nil && !strings.Contains(err.Error(), tc.errMsg)) {
+ t.Errorf("%s: got err %v, wanted %q", tc.desc, err, tc.errMsg)
+ }
+ }
+}
+
+func TestLoadErrFieldMismatch(t *testing.T) {
+ testCases := []struct {
+ desc string
+ dst interface{}
+ src []*pb.Field
+ err error
+ }{
+ {
+ desc: "missing",
+ dst: &struct{ One string }{},
+ src: []*pb.Field{newStringValueField("Two", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Two",
+ Reason: "no such struct field",
+ },
+ },
+ {
+ desc: "wrong type",
+ dst: &struct{ Num float64 }{},
+ src: []*pb.Field{newStringValueField("Num", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "Num",
+ Reason: "type mismatch: float64 for string data",
+ },
+ },
+ {
+ desc: "unsettable",
+ dst: &struct{ lower string }{},
+ src: []*pb.Field{newStringValueField("lower", "woop!", pb.FieldValue_TEXT)},
+ err: &ErrFieldMismatch{
+ FieldName: "lower",
+ Reason: "cannot set struct field",
+ },
+ },
+ }
+ for _, tc := range testCases {
+ err := loadDoc(tc.dst, tc.src, nil, nil)
+ if !reflect.DeepEqual(err, tc.err) {
+ t.Errorf("%s, got err %v, wanted %v", tc.desc, err, tc.err)
+ }
+ }
+}
+
+func TestLimit(t *testing.T) {
+ more := func(it *Iterator) error {
+ if it.limit == 0 {
+ return errors.New("Iterator.limit should not be zero in next")
+ }
+ // Page up to 20 items at once.
+ ret := 20
+ if it.limit > 0 && it.limit < ret {
+ ret = it.limit
+ }
+ it.listRes = make([]*pb.Document, ret)
+ for i := range it.listRes {
+ it.listRes[i] = &pb.Document{}
+ }
+ return nil
+ }
+
+ it := &Iterator{
+ more: more,
+ limit: 42,
+ }
+
+ count := 0
+ for {
+ _, err := it.Next(nil)
+ if err == Done {
+ break
+ }
+ if err != nil {
+ t.Fatalf("err after %d: %v", count, err)
+ }
+ count++
+ }
+ if count != 42 {
+ t.Errorf("got %d results, expected 42", count)
+ }
+}
+
+func TestPut(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ expectedIn := &pb.IndexDocumentRequest{
+ Params: &pb.IndexDocumentParams{
+ Document: []*pb.Document{
+ {Field: protoFields, OrderId: proto.Int32(42)},
+ },
+ IndexSpec: &pb.IndexSpec{
+ Name: proto.String("Doc"),
+ },
+ },
+ }
+ if !proto.Equal(in, expectedIn) {
+ return fmt.Errorf("unsupported argument:\ngot %v\nwant %v", in, expectedIn)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ id, err := index.Put(c, "", &FieldListWithMeta{
+ Meta: searchMeta,
+ Fields: searchFields,
+ })
+ if err != nil {
+ t.Fatal(err)
+ }
+ if want := "doc_id"; id != want {
+ t.Errorf("Got doc ID %q, want %q", id, want)
+ }
+}
+
+func TestPutAutoOrderID(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(in *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ if len(in.Params.GetDocument()) < 1 {
+ return fmt.Errorf("expected at least one Document, got %v", in)
+ }
+ got, want := in.Params.Document[0].GetOrderId(), int32(time.Since(orderIDEpoch).Seconds())
+ if d := got - want; -5 > d || d > 5 {
+ return fmt.Errorf("got OrderId %d, want near %d", got, want)
+ }
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {Code: pb.SearchServiceError_OK.Enum()},
+ },
+ DocId: []string{
+ "doc_id",
+ },
+ }
+ return nil
+ })
+
+ if _, err := index.Put(c, "", &searchFields); err != nil {
+ t.Fatal(err)
+ }
+}
+
+func TestPutBadStatus(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ c := aetesting.FakeSingleContext(t, "search", "IndexDocument", func(_ *pb.IndexDocumentRequest, out *pb.IndexDocumentResponse) error {
+ *out = pb.IndexDocumentResponse{
+ Status: []*pb.RequestStatus{
+ {
+ Code: pb.SearchServiceError_INVALID_REQUEST.Enum(),
+ ErrorDetail: proto.String("insufficient gophers"),
+ },
+ },
+ }
+ return nil
+ })
+
+ wantErr := "search: INVALID_REQUEST: insufficient gophers"
+ if _, err := index.Put(c, "", &searchFields); err == nil || err.Error() != wantErr {
+ t.Fatalf("Put: got %v error, want %q", err, wantErr)
+ }
+}
+
+func TestSortOptions(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ noErr := errors.New("") // sentinel error when there isn't one…
+
+ testCases := []struct {
+ desc string
+ sort *SortOptions
+ wantSort []*pb.SortSpec
+ wantScorer *pb.ScorerSpec
+ wantErr string
+ }{
+ {
+ desc: "No SortOptions",
+ },
+ {
+ desc: "Basic",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog"},
+ {Expr: "cat", Reverse: true},
+ {Expr: "gopher", Default: "blue"},
+ {Expr: "fish", Default: 2.0},
+ },
+ Limit: 42,
+ Scorer: MatchScorer,
+ },
+ wantSort: []*pb.SortSpec{
+ {SortExpression: proto.String("dog")},
+ {SortExpression: proto.String("cat"), SortDescending: proto.Bool(false)},
+ {SortExpression: proto.String("gopher"), DefaultValueText: proto.String("blue")},
+ {SortExpression: proto.String("fish"), DefaultValueNumeric: proto.Float64(2)},
+ },
+ wantScorer: &pb.ScorerSpec{
+ Limit: proto.Int32(42),
+ Scorer: pb.ScorerSpec_MATCH_SCORER.Enum(),
+ },
+ },
+ {
+ desc: "Bad expression default",
+ sort: &SortOptions{
+ Expressions: []SortExpression{
+ {Expr: "dog", Default: true},
+ },
+ },
+ wantErr: `search: invalid Default type bool for expression "dog"`,
+ },
+ {
+ desc: "RescoringMatchScorer",
+ sort: &SortOptions{Scorer: RescoringMatchScorer},
+ wantScorer: &pb.ScorerSpec{Scorer: pb.ScorerSpec_RESCORING_MATCH_SCORER.Enum()},
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.SortSpec, tt.wantSort) {
+ t.Errorf("%s: params.SortSpec=%v; want %v", tt.desc, params.SortSpec, tt.wantSort)
+ }
+ if !reflect.DeepEqual(params.ScorerSpec, tt.wantScorer) {
+ t.Errorf("%s: params.ScorerSpec=%v; want %v", tt.desc, params.ScorerSpec, tt.wantScorer)
+ }
+ return noErr // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", &SearchOptions{Sort: tt.sort})
+ _, err := it.Next(nil)
+ if err == nil {
+ t.Fatalf("%s: err==nil; should not happen", tt.desc)
+ }
+ if err.Error() != tt.wantErr {
+ t.Errorf("%s: got error %q, want %q", tt.desc, err, tt.wantErr)
+ }
+ }
+}
+
+func TestFieldSpec(t *testing.T) {
+ index, err := Open("Doc")
+ if err != nil {
+ t.Fatalf("err from Open: %v", err)
+ }
+
+ errFoo := errors.New("foo") // sentinel error when there isn't one.
+
+ testCases := []struct {
+ desc string
+ opts *SearchOptions
+ want *pb.FieldSpec
+ }{
+ {
+ desc: "No options",
+ want: &pb.FieldSpec{},
+ },
+ {
+ desc: "Fields",
+ opts: &SearchOptions{
+ Fields: []string{"one", "two"},
+ },
+ want: &pb.FieldSpec{
+ Name: []string{"one", "two"},
+ },
+ },
+ {
+ desc: "Expressions",
+ opts: &SearchOptions{
+ Expressions: []FieldExpression{
+ {Name: "one", Expr: "price * quantity"},
+ {Name: "two", Expr: "min(daily_use, 10) * rate"},
+ },
+ },
+ want: &pb.FieldSpec{
+ Expression: []*pb.FieldSpec_Expression{
+ {Name: proto.String("one"), Expression: proto.String("price * quantity")},
+ {Name: proto.String("two"), Expression: proto.String("min(daily_use, 10) * rate")},
+ },
+ },
+ },
+ }
+
+ for _, tt := range testCases {
+ c := aetesting.FakeSingleContext(t, "search", "Search", func(req *pb.SearchRequest, _ *pb.SearchResponse) error {
+ params := req.Params
+ if !reflect.DeepEqual(params.FieldSpec, tt.want) {
+ t.Errorf("%s: params.FieldSpec=%v; want %v", tt.desc, params.FieldSpec, tt.want)
+ }
+ return errFoo // Always return some error to prevent response parsing.
+ })
+
+ it := index.Search(c, "gopher", tt.opts)
+ if _, err := it.Next(nil); err != errFoo {
+ t.Fatalf("%s: got error %v; want %v", tt.desc, err, errFoo)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue.go b/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue.go
new file mode 100644
index 000000000000..8068b0d48b04
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue.go
@@ -0,0 +1,493 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package taskqueue provides a client for App Engine's taskqueue service.
+Using this service, applications may perform work outside a user's request.
+
+A Task may be constructed manually; alternatively, since the most common
+taskqueue operation is to add a single POST task, NewPOSTTask makes it easy.
+
+ t := taskqueue.NewPOSTTask("/worker", url.Values{
+ "key": {key},
+ })
+ taskqueue.Add(c, t, "") // add t to the default queue
+*/
+package taskqueue // import "google.golang.org/appengine/taskqueue"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+ "net/url"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ dspb "google.golang.org/appengine/internal/datastore"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+var (
+ // ErrTaskAlreadyAdded is the error returned by Add and AddMulti when a task has already been added with a particular name.
+ ErrTaskAlreadyAdded = errors.New("taskqueue: task has already been added")
+)
+
+// RetryOptions let you control whether to retry a task and the backoff intervals between tries.
+type RetryOptions struct {
+ // Number of tries/leases after which the task fails permanently and is deleted.
+ // If AgeLimit is also set, both limits must be exceeded for the task to fail permanently.
+ RetryLimit int32
+
+ // Maximum time allowed since the task's first try before the task fails permanently and is deleted (only for push tasks).
+ // If RetryLimit is also set, both limits must be exceeded for the task to fail permanently.
+ AgeLimit time.Duration
+
+ // Minimum time between successive tries (only for push tasks).
+ MinBackoff time.Duration
+
+ // Maximum time between successive tries (only for push tasks).
+ MaxBackoff time.Duration
+
+ // Maximum number of times to double the interval between successive tries before the intervals increase linearly (only for push tasks).
+ MaxDoublings int32
+
+ // If MaxDoublings is zero, set ApplyZeroMaxDoublings to true to override the default non-zero value.
+ // Otherwise a zero MaxDoublings is ignored and the default is used.
+ ApplyZeroMaxDoublings bool
+}
+
+// toRetryParameter converts RetryOptions to pb.TaskQueueRetryParameters.
+func (opt *RetryOptions) toRetryParameters() *pb.TaskQueueRetryParameters {
+ params := &pb.TaskQueueRetryParameters{}
+ if opt.RetryLimit > 0 {
+ params.RetryLimit = proto.Int32(opt.RetryLimit)
+ }
+ if opt.AgeLimit > 0 {
+ params.AgeLimitSec = proto.Int64(int64(opt.AgeLimit.Seconds()))
+ }
+ if opt.MinBackoff > 0 {
+ params.MinBackoffSec = proto.Float64(opt.MinBackoff.Seconds())
+ }
+ if opt.MaxBackoff > 0 {
+ params.MaxBackoffSec = proto.Float64(opt.MaxBackoff.Seconds())
+ }
+ if opt.MaxDoublings > 0 || (opt.MaxDoublings == 0 && opt.ApplyZeroMaxDoublings) {
+ params.MaxDoublings = proto.Int32(opt.MaxDoublings)
+ }
+ return params
+}
+
+// A Task represents a task to be executed.
+type Task struct {
+ // Path is the worker URL for the task.
+ // If unset, it will default to /_ah/queue/.
+ Path string
+
+ // Payload is the data for the task.
+ // This will be delivered as the HTTP request body.
+ // It is only used when Method is POST, PUT or PULL.
+ // url.Values' Encode method may be used to generate this for POST requests.
+ Payload []byte
+
+ // Additional HTTP headers to pass at the task's execution time.
+ // To schedule the task to be run with an alternate app version
+ // or backend, set the "Host" header.
+ Header http.Header
+
+ // Method is the HTTP method for the task ("GET", "POST", etc.),
+ // or "PULL" if this is task is destined for a pull-based queue.
+ // If empty, this defaults to "POST".
+ Method string
+
+ // A name for the task.
+ // If empty, a name will be chosen.
+ Name string
+
+ // Delay specifies the duration the task queue service must wait
+ // before executing the task.
+ // Either Delay or ETA may be set, but not both.
+ Delay time.Duration
+
+ // ETA specifies the earliest time a task may be executed (push queues)
+ // or leased (pull queues).
+ // Either Delay or ETA may be set, but not both.
+ ETA time.Time
+
+ // The number of times the task has been dispatched or leased.
+ RetryCount int32
+
+ // Tag for the task. Only used when Method is PULL.
+ Tag string
+
+ // Retry options for this task. May be nil.
+ RetryOptions *RetryOptions
+}
+
+func (t *Task) method() string {
+ if t.Method == "" {
+ return "POST"
+ }
+ return t.Method
+}
+
+// NewPOSTTask creates a Task that will POST to a path with the given form data.
+func NewPOSTTask(path string, params url.Values) *Task {
+ h := make(http.Header)
+ h.Set("Content-Type", "application/x-www-form-urlencoded")
+ return &Task{
+ Path: path,
+ Payload: []byte(params.Encode()),
+ Header: h,
+ Method: "POST",
+ }
+}
+
+var (
+ currentNamespace = http.CanonicalHeaderKey("X-AppEngine-Current-Namespace")
+ defaultNamespace = http.CanonicalHeaderKey("X-AppEngine-Default-Namespace")
+)
+
+func newAddReq(c appengine.Context, task *Task, queueName string) (*pb.TaskQueueAddRequest, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ eta := task.ETA
+ if eta.IsZero() {
+ eta = time.Now().Add(task.Delay)
+ } else if task.Delay != 0 {
+ panic("taskqueue: both Delay and ETA are set")
+ }
+ req := &pb.TaskQueueAddRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(eta.UnixNano() / 1e3),
+ }
+ method := task.method()
+ if method == "PULL" {
+ // Pull-based task
+ req.Body = task.Payload
+ req.Mode = pb.TaskQueueMode_PULL.Enum()
+ if task.Tag != "" {
+ req.Tag = []byte(task.Tag)
+ }
+ } else {
+ // HTTP-based task
+ if v, ok := pb.TaskQueueAddRequest_RequestMethod_value[method]; ok {
+ req.Method = pb.TaskQueueAddRequest_RequestMethod(v).Enum()
+ } else {
+ return nil, fmt.Errorf("taskqueue: bad method %q", method)
+ }
+ req.Url = []byte(task.Path)
+ for k, vs := range task.Header {
+ for _, v := range vs {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(k),
+ Value: []byte(v),
+ })
+ }
+ }
+ if method == "POST" || method == "PUT" {
+ req.Body = task.Payload
+ }
+
+ // Namespace headers.
+ if _, ok := task.Header[currentNamespace]; !ok {
+ // Fetch the current namespace of this request.
+ ns := internal.VirtAPI(c, "GetNamespace")
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(currentNamespace),
+ Value: []byte(ns),
+ })
+ }
+ if _, ok := task.Header[defaultNamespace]; !ok {
+ // Fetch the X-AppEngine-Default-Namespace header of this request.
+ if ns := internal.VirtAPI(c, "GetDefaultNamespace"); ns != "" {
+ req.Header = append(req.Header, &pb.TaskQueueAddRequest_Header{
+ Key: []byte(defaultNamespace),
+ Value: []byte(ns),
+ })
+ }
+ }
+ }
+
+ if task.RetryOptions != nil {
+ req.RetryParameters = task.RetryOptions.toRetryParameters()
+ }
+
+ return req, nil
+}
+
+var alreadyAddedErrors = map[pb.TaskQueueServiceError_ErrorCode]bool{
+ pb.TaskQueueServiceError_TASK_ALREADY_EXISTS: true,
+ pb.TaskQueueServiceError_TOMBSTONED_TASK: true,
+}
+
+// Add adds the task to a named queue.
+// An empty queue name means that the default queue will be used.
+// Add returns an equivalent Task with defaults filled in, including setting
+// the task's Name field to the chosen name if the original was empty.
+func Add(c appengine.Context, task *Task, queueName string) (*Task, error) {
+ req, err := newAddReq(c, task, queueName)
+ if err != nil {
+ return nil, err
+ }
+ res := &pb.TaskQueueAddResponse{}
+ if err := c.Call("taskqueue", "Add", req, res, nil); err != nil {
+ apiErr, ok := err.(*internal.APIError)
+ if ok && alreadyAddedErrors[pb.TaskQueueServiceError_ErrorCode(apiErr.Code)] {
+ return nil, ErrTaskAlreadyAdded
+ }
+ return nil, err
+ }
+ resultTask := *task
+ resultTask.Method = task.method()
+ if task.Name == "" {
+ resultTask.Name = string(res.ChosenTaskName)
+ }
+ return &resultTask, nil
+}
+
+// AddMulti adds multiple tasks to a named queue.
+// An empty queue name means that the default queue will be used.
+// AddMulti returns a slice of equivalent tasks with defaults filled in, including setting
+// each task's Name field to the chosen name if the original was empty.
+// If a given task is badly formed or could not be added, an appengine.MultiError is returned.
+func AddMulti(c appengine.Context, tasks []*Task, queueName string) ([]*Task, error) {
+ req := &pb.TaskQueueBulkAddRequest{
+ AddRequest: make([]*pb.TaskQueueAddRequest, len(tasks)),
+ }
+ me, any := make(appengine.MultiError, len(tasks)), false
+ for i, t := range tasks {
+ req.AddRequest[i], me[i] = newAddReq(c, t, queueName)
+ any = any || me[i] != nil
+ }
+ if any {
+ return nil, me
+ }
+ res := &pb.TaskQueueBulkAddResponse{}
+ if err := c.Call("taskqueue", "BulkAdd", req, res, nil); err != nil {
+ return nil, err
+ }
+ if len(res.Taskresult) != len(tasks) {
+ return nil, errors.New("taskqueue: server error")
+ }
+ tasksOut := make([]*Task, len(tasks))
+ for i, tr := range res.Taskresult {
+ tasksOut[i] = new(Task)
+ *tasksOut[i] = *tasks[i]
+ tasksOut[i].Method = tasksOut[i].method()
+ if tasksOut[i].Name == "" {
+ tasksOut[i].Name = string(tr.ChosenTaskName)
+ }
+ if *tr.Result != pb.TaskQueueServiceError_OK {
+ if alreadyAddedErrors[*tr.Result] {
+ me[i] = ErrTaskAlreadyAdded
+ } else {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(*tr.Result),
+ }
+ }
+ any = true
+ }
+ }
+ if any {
+ return tasksOut, me
+ }
+ return tasksOut, nil
+}
+
+// Delete deletes a task from a named queue.
+func Delete(c appengine.Context, task *Task, queueName string) error {
+ err := DeleteMulti(c, []*Task{task}, queueName)
+ if me, ok := err.(appengine.MultiError); ok {
+ return me[0]
+ }
+ return err
+}
+
+// DeleteMulti deletes multiple tasks from a named queue.
+// If a given task could not be deleted, an appengine.MultiError is returned.
+func DeleteMulti(c appengine.Context, tasks []*Task, queueName string) error {
+ taskNames := make([][]byte, len(tasks))
+ for i, t := range tasks {
+ taskNames[i] = []byte(t.Name)
+ }
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueDeleteRequest{
+ QueueName: []byte(queueName),
+ TaskName: taskNames,
+ }
+ res := &pb.TaskQueueDeleteResponse{}
+ if err := c.Call("taskqueue", "Delete", req, res, nil); err != nil {
+ return err
+ }
+ if a, b := len(req.TaskName), len(res.Result); a != b {
+ return fmt.Errorf("taskqueue: internal error: requested deletion of %d tasks, got %d results", a, b)
+ }
+ me, any := make(appengine.MultiError, len(res.Result)), false
+ for i, ec := range res.Result {
+ if ec != pb.TaskQueueServiceError_OK {
+ me[i] = &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(ec),
+ }
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+func lease(c appengine.Context, maxTasks int, queueName string, leaseTime int, groupByTag bool, tag []byte) ([]*Task, error) {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueQueryAndOwnTasksRequest{
+ QueueName: []byte(queueName),
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ MaxTasks: proto.Int64(int64(maxTasks)),
+ GroupByTag: proto.Bool(groupByTag),
+ Tag: tag,
+ }
+ res := &pb.TaskQueueQueryAndOwnTasksResponse{}
+ callOpts := &internal.CallOptions{
+ Timeout: 10 * time.Second,
+ }
+ if err := c.Call("taskqueue", "QueryAndOwnTasks", req, res, callOpts); err != nil {
+ return nil, err
+ }
+ tasks := make([]*Task, len(res.Task))
+ for i, t := range res.Task {
+ tasks[i] = &Task{
+ Payload: t.Body,
+ Name: string(t.TaskName),
+ Method: "PULL",
+ ETA: time.Unix(0, *t.EtaUsec*1e3),
+ RetryCount: *t.RetryCount,
+ Tag: string(t.Tag),
+ }
+ }
+ return tasks, nil
+}
+
+// Lease leases tasks from a queue.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func Lease(c appengine.Context, maxTasks int, queueName string, leaseTime int) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, false, nil)
+}
+
+// LeaseByTag leases tasks from a queue, grouped by tag.
+// If tag is empty, then the returned tasks are grouped by the tag of the task with earliest ETA.
+// leaseTime is in seconds.
+// The number of tasks fetched will be at most maxTasks.
+func LeaseByTag(c appengine.Context, maxTasks int, queueName string, leaseTime int, tag string) ([]*Task, error) {
+ return lease(c, maxTasks, queueName, leaseTime, true, []byte(tag))
+}
+
+// Purge removes all tasks from a queue.
+func Purge(c appengine.Context, queueName string) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueuePurgeQueueRequest{
+ QueueName: []byte(queueName),
+ }
+ res := &pb.TaskQueuePurgeQueueResponse{}
+ return c.Call("taskqueue", "PurgeQueue", req, res, nil)
+}
+
+// ModifyLease modifies the lease of a task.
+// Used to request more processing time, or to abandon processing.
+// leaseTime is in seconds and must not be negative.
+func ModifyLease(c appengine.Context, task *Task, queueName string, leaseTime int) error {
+ if queueName == "" {
+ queueName = "default"
+ }
+ req := &pb.TaskQueueModifyTaskLeaseRequest{
+ QueueName: []byte(queueName),
+ TaskName: []byte(task.Name),
+ EtaUsec: proto.Int64(task.ETA.UnixNano() / 1e3), // Used to verify ownership.
+ LeaseSeconds: proto.Float64(float64(leaseTime)),
+ }
+ res := &pb.TaskQueueModifyTaskLeaseResponse{}
+ if err := c.Call("taskqueue", "ModifyTaskLease", req, res, nil); err != nil {
+ return err
+ }
+ task.ETA = time.Unix(0, *res.UpdatedEtaUsec*1e3)
+ return nil
+}
+
+// QueueStatistics represents statistics about a single task queue.
+type QueueStatistics struct {
+ Tasks int // may be an approximation
+ OldestETA time.Time // zero if there are no pending tasks
+
+ Executed1Minute int // tasks executed in the last minute
+ InFlight int // tasks executing now
+ EnforcedRate float64 // requests per second
+}
+
+// QueueStats retrieves statistics about queues.
+func QueueStats(c appengine.Context, queueNames []string) ([]QueueStatistics, error) {
+ req := &pb.TaskQueueFetchQueueStatsRequest{
+ QueueName: make([][]byte, len(queueNames)),
+ }
+ for i, q := range queueNames {
+ if q == "" {
+ q = "default"
+ }
+ req.QueueName[i] = []byte(q)
+ }
+ res := &pb.TaskQueueFetchQueueStatsResponse{}
+ callOpts := &internal.CallOptions{
+ Timeout: 10 * time.Second,
+ }
+ if err := c.Call("taskqueue", "FetchQueueStats", req, res, callOpts); err != nil {
+ return nil, err
+ }
+ qs := make([]QueueStatistics, len(res.Queuestats))
+ for i, qsg := range res.Queuestats {
+ qs[i] = QueueStatistics{
+ Tasks: int(*qsg.NumTasks),
+ }
+ if eta := *qsg.OldestEtaUsec; eta > -1 {
+ qs[i].OldestETA = time.Unix(0, eta*1e3)
+ }
+ if si := qsg.ScannerInfo; si != nil {
+ qs[i].Executed1Minute = int(*si.ExecutedLastMinute)
+ qs[i].InFlight = int(si.GetRequestsInFlight())
+ qs[i].EnforcedRate = si.GetEnforcedRate()
+ }
+ }
+ return qs, nil
+}
+
+func setTransaction(x *pb.TaskQueueAddRequest, t *dspb.Transaction) {
+ x.Transaction = t
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("taskqueue", pb.TaskQueueServiceError_ErrorCode_name)
+
+ // Datastore error codes are shifted by DATASTORE_ERROR when presented through taskqueue.
+ dsCode := int32(pb.TaskQueueServiceError_DATASTORE_ERROR) + int32(dspb.Error_TIMEOUT)
+ internal.RegisterTimeoutErrorCode("taskqueue", dsCode)
+
+ // Transaction registration.
+ internal.RegisterTransactionSetter(setTransaction)
+ internal.RegisterTransactionSetter(func(x *pb.TaskQueueBulkAddRequest, t *dspb.Transaction) {
+ for _, req := range x.AddRequest {
+ setTransaction(req, t)
+ }
+ })
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue_test.go b/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue_test.go
new file mode 100644
index 000000000000..7a06aa4af428
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/taskqueue/taskqueue_test.go
@@ -0,0 +1,103 @@
+// Copyright 2014 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package taskqueue
+
+import (
+ "errors"
+ "reflect"
+ "testing"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/taskqueue"
+)
+
+func TestAddErrors(t *testing.T) {
+ var tests = []struct {
+ err, want error
+ sameErr bool // if true, should return err exactly
+ }{
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TASK_ALREADY_EXISTS),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_TOMBSTONED_TASK),
+ },
+ want: ErrTaskAlreadyAdded,
+ },
+ {
+ err: &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_UNKNOWN_QUEUE),
+ },
+ want: errors.New("not used"),
+ sameErr: true,
+ },
+ }
+ for _, tc := range tests {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "Add", func(req *pb.TaskQueueAddRequest, res *pb.TaskQueueAddResponse) error {
+ // don't fill in any of the response
+ return tc.err
+ })
+ task := &Task{Path: "/worker", Method: "PULL"}
+ _, err := Add(c, task, "a-queue")
+ want := tc.want
+ if tc.sameErr {
+ want = tc.err
+ }
+ if err != want {
+ t.Errorf("Add with tc.err = %v, got %#v, want = %#v", tc.err, err, want)
+ }
+ }
+}
+
+func TestAddMulti(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "taskqueue", "BulkAdd", func(req *pb.TaskQueueBulkAddRequest, res *pb.TaskQueueBulkAddResponse) error {
+ res.Taskresult = []*pb.TaskQueueBulkAddResponse_TaskResult{
+ {
+ Result: pb.TaskQueueServiceError_OK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TASK_ALREADY_EXISTS.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_TOMBSTONED_TASK.Enum(),
+ },
+ {
+ Result: pb.TaskQueueServiceError_INTERNAL_ERROR.Enum(),
+ },
+ }
+ return nil
+ })
+ tasks := []*Task{
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ {Path: "/worker", Method: "PULL"},
+ }
+ r, err := AddMulti(c, tasks, "a-queue")
+ if len(r) != len(tasks) {
+ t.Fatalf("AddMulti returned %d tasks, want %d", len(r), len(tasks))
+ }
+ want := appengine.MultiError{
+ nil,
+ ErrTaskAlreadyAdded,
+ ErrTaskAlreadyAdded,
+ &internal.APIError{
+ Service: "taskqueue",
+ Code: int32(pb.TaskQueueServiceError_INTERNAL_ERROR),
+ },
+ }
+ if !reflect.DeepEqual(err, want) {
+ t.Errorf("AddMulti got %v, wanted %v", err, want)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/timeout.go b/Godeps/_workspace/src/google.golang.org/appengine/timeout.go
new file mode 100644
index 000000000000..47fda68ec24e
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/timeout.go
@@ -0,0 +1,49 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+)
+
+// IsTimeoutError reports whether err is a timeout error.
+func IsTimeoutError(err error) bool {
+ if t, ok := err.(interface {
+ IsTimeout() bool
+ }); ok {
+ return t.IsTimeout()
+ }
+ return false
+}
+
+// Timeout returns a replacement context that uses d as the default API RPC timeout.
+func Timeout(c Context, d time.Duration) Context {
+ return &timeoutContext{
+ Context: c,
+ d: d,
+ }
+}
+
+type timeoutContext struct {
+ Context
+ d time.Duration
+}
+
+func (t *timeoutContext) Call(service, method string, in, out proto.Message, opts *internal.CallOptions) error {
+ // Only affect calls that don't have a timeout.
+ if opts == nil || opts.Timeout == 0 {
+ newOpts := new(internal.CallOptions)
+ if opts != nil {
+ *newOpts = *opts
+ }
+ newOpts.Timeout = t.d
+ opts = newOpts
+ }
+ return t.Context.Call(service, method, in, out, opts)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/timeout_test.go b/Godeps/_workspace/src/google.golang.org/appengine/timeout_test.go
new file mode 100644
index 000000000000..52df3fc6516a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/timeout_test.go
@@ -0,0 +1,59 @@
+// Copyright 2013 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package appengine
+
+import (
+ "testing"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+)
+
+type timeoutRecorder struct {
+ Context
+ d time.Duration
+}
+
+func (tr *timeoutRecorder) Call(_, _ string, _, _ proto.Message, opts *internal.CallOptions) error {
+ tr.d = 5 * time.Second // default
+ if opts != nil {
+ tr.d = opts.Timeout
+ }
+ return nil
+}
+
+func TestTimeout(t *testing.T) {
+ tests := []struct {
+ desc string
+ opts *internal.CallOptions
+ want time.Duration
+ }{
+ {
+ "no opts",
+ nil,
+ 6 * time.Second,
+ },
+ {
+ "empty opts",
+ &internal.CallOptions{},
+ 6 * time.Second,
+ },
+ {
+ "set opts",
+ &internal.CallOptions{Timeout: 7 * time.Second},
+ 7 * time.Second,
+ },
+ }
+ for _, test := range tests {
+ tr := new(timeoutRecorder)
+ c := Timeout(tr, 6*time.Second)
+ c.Call("service", "method", nil, nil, test.opts)
+ if tr.d != test.want {
+ t.Errorf("%s: timeout was %v, want %v", test.desc, tr.d, test.want)
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/urlfetch/urlfetch.go b/Godeps/_workspace/src/google.golang.org/appengine/urlfetch/urlfetch.go
new file mode 100644
index 000000000000..a1cb12276055
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/urlfetch/urlfetch.go
@@ -0,0 +1,211 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package urlfetch provides an http.RoundTripper implementation
+// for fetching URLs via App Engine's urlfetch service.
+package urlfetch // import "google.golang.org/appengine/urlfetch"
+
+import (
+ "errors"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/urlfetch"
+)
+
+// Transport is an implementation of http.RoundTripper for
+// App Engine. Users should generally create an http.Client using
+// this transport and use the Client rather than using this transport
+// directly.
+type Transport struct {
+ Context appengine.Context
+ Deadline time.Duration // zero means 5-second default
+
+ // Controls whether the application checks the validity of SSL certificates
+ // over HTTPS connections. A value of false (the default) instructs the
+ // application to send a request to the server only if the certificate is
+ // valid and signed by a trusted certificate authority (CA), and also
+ // includes a hostname that matches the certificate. A value of true
+ // instructs the application to perform no certificate validation.
+ AllowInvalidServerCertificate bool
+}
+
+// Verify statically that *Transport implements http.RoundTripper.
+var _ http.RoundTripper = (*Transport)(nil)
+
+// Client returns an *http.Client using a default urlfetch Transport. This
+// client will have the default deadline of 5 seconds, and will check the
+// validity of SSL certificates.
+func Client(context appengine.Context) *http.Client {
+ return &http.Client{
+ Transport: &Transport{
+ Context: context,
+ },
+ }
+}
+
+type bodyReader struct {
+ content []byte
+ truncated bool
+ closed bool
+}
+
+// ErrTruncatedBody is the error returned after the final Read() from a
+// response's Body if the body has been truncated by App Engine's proxy.
+var ErrTruncatedBody = errors.New("urlfetch: truncated body")
+
+func statusCodeToText(code int) string {
+ if t := http.StatusText(code); t != "" {
+ return t
+ }
+ return strconv.Itoa(code)
+}
+
+func (br *bodyReader) Read(p []byte) (n int, err error) {
+ if br.closed {
+ if br.truncated {
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+ }
+ n = copy(p, br.content)
+ if n > 0 {
+ br.content = br.content[n:]
+ return
+ }
+ if br.truncated {
+ br.closed = true
+ return 0, ErrTruncatedBody
+ }
+ return 0, io.EOF
+}
+
+func (br *bodyReader) Close() error {
+ br.closed = true
+ br.content = nil
+ return nil
+}
+
+// A map of the URL Fetch-accepted methods that take a request body.
+var methodAcceptsRequestBody = map[string]bool{
+ "POST": true,
+ "PUT": true,
+ "PATCH": true,
+}
+
+// urlString returns a valid string given a URL. This function is necessary because
+// the String method of URL doesn't correctly handle URLs with non-empty Opaque values.
+// See http://code.google.com/p/go/issues/detail?id=4860.
+func urlString(u *url.URL) string {
+ if u.Opaque == "" || strings.HasPrefix(u.Opaque, "//") {
+ return u.String()
+ }
+ aux := *u
+ aux.Opaque = "//" + aux.Host + aux.Opaque
+ return aux.String()
+}
+
+// RoundTrip issues a single HTTP request and returns its response. Per the
+// http.RoundTripper interface, RoundTrip only returns an error if there
+// was an unsupported request or the URL Fetch proxy fails.
+// Note that HTTP response codes such as 5xx, 403, 404, etc are not
+// errors as far as the transport is concerned and will be returned
+// with err set to nil.
+func (t *Transport) RoundTrip(req *http.Request) (res *http.Response, err error) {
+ methNum, ok := pb.URLFetchRequest_RequestMethod_value[req.Method]
+ if !ok {
+ return nil, fmt.Errorf("urlfetch: unsupported HTTP method %q", req.Method)
+ }
+
+ method := pb.URLFetchRequest_RequestMethod(methNum)
+
+ freq := &pb.URLFetchRequest{
+ Method: &method,
+ Url: proto.String(urlString(req.URL)),
+ FollowRedirects: proto.Bool(false), // http.Client's responsibility
+ MustValidateServerCertificate: proto.Bool(!t.AllowInvalidServerCertificate),
+ }
+ opts := &internal.CallOptions{}
+
+ if t.Deadline != 0 {
+ freq.Deadline = proto.Float64(t.Deadline.Seconds())
+ opts.Timeout = t.Deadline
+ }
+
+ for k, vals := range req.Header {
+ for _, val := range vals {
+ freq.Header = append(freq.Header, &pb.URLFetchRequest_Header{
+ Key: proto.String(k),
+ Value: proto.String(val),
+ })
+ }
+ }
+ if methodAcceptsRequestBody[req.Method] && req.Body != nil {
+ // Avoid a []byte copy if req.Body has a Bytes method.
+ switch b := req.Body.(type) {
+ case interface {
+ Bytes() []byte
+ }:
+ freq.Payload = b.Bytes()
+ default:
+ freq.Payload, err = ioutil.ReadAll(req.Body)
+ if err != nil {
+ return nil, err
+ }
+ }
+ }
+
+ fres := &pb.URLFetchResponse{}
+ if err := t.Context.Call("urlfetch", "Fetch", freq, fres, opts); err != nil {
+ return nil, err
+ }
+
+ res = &http.Response{}
+ res.StatusCode = int(*fres.StatusCode)
+ res.Status = fmt.Sprintf("%d %s", res.StatusCode, statusCodeToText(res.StatusCode))
+ res.Header = make(http.Header)
+ res.Request = req
+
+ // Faked:
+ res.ProtoMajor = 1
+ res.ProtoMinor = 1
+ res.Proto = "HTTP/1.1"
+ res.Close = true
+
+ for _, h := range fres.Header {
+ hkey := http.CanonicalHeaderKey(*h.Key)
+ hval := *h.Value
+ if hkey == "Content-Length" {
+ // Will get filled in below for all but HEAD requests.
+ if req.Method == "HEAD" {
+ res.ContentLength, _ = strconv.ParseInt(hval, 10, 64)
+ }
+ continue
+ }
+ res.Header.Add(hkey, hval)
+ }
+
+ if req.Method != "HEAD" {
+ res.ContentLength = int64(len(fres.Content))
+ }
+
+ truncated := fres.GetContentWasTruncated()
+ res.Body = &bodyReader{content: fres.Content, truncated: truncated}
+ return
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("urlfetch", pb.URLFetchServiceError_ErrorCode_name)
+ internal.RegisterTimeoutErrorCode("urlfetch", int32(pb.URLFetchServiceError_DEADLINE_EXCEEDED))
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/user/oauth.go b/Godeps/_workspace/src/google.golang.org/appengine/user/oauth.go
new file mode 100644
index 000000000000..3ae1ad11395a
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/user/oauth.go
@@ -0,0 +1,46 @@
+// Copyright 2012 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "google.golang.org/appengine"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// CurrentOAuth returns the user associated with the OAuth consumer making this
+// request. If the OAuth consumer did not make a valid OAuth request, or the
+// scope is non-empty and the current user does not have this scope, this method
+// will return an error.
+func CurrentOAuth(c appengine.Context, scope string) (*User, error) {
+ req := &pb.GetOAuthUserRequest{}
+ if scope != "" {
+ req.Scope = &scope
+ }
+ res := &pb.GetOAuthUserResponse{}
+
+ err := c.Call("user", "GetOAuthUser", req, res, nil)
+ if err != nil {
+ return nil, err
+ }
+ return &User{
+ Email: *res.Email,
+ AuthDomain: *res.AuthDomain,
+ Admin: res.GetIsAdmin(),
+ ID: *res.UserId,
+ }, nil
+}
+
+// OAuthConsumerKey returns the OAuth consumer key provided with the current
+// request. This method will return an error if the OAuth request was invalid.
+func OAuthConsumerKey(c appengine.Context) (string, error) {
+ req := &pb.CheckOAuthSignatureRequest{}
+ res := &pb.CheckOAuthSignatureResponse{}
+
+ err := c.Call("user", "CheckOAuthSignature", req, res, nil)
+ if err != nil {
+ return "", err
+ }
+ return *res.OauthConsumerKey, err
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/user/user.go b/Godeps/_workspace/src/google.golang.org/appengine/user/user.go
new file mode 100644
index 000000000000..16b0e7ec91be
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/user/user.go
@@ -0,0 +1,103 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+// Package user provides a client for App Engine's user authentication service.
+package user // import "google.golang.org/appengine/user"
+
+import (
+ "strings"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+// User represents a user of the application.
+type User struct {
+ Email string
+ AuthDomain string
+ Admin bool
+
+ // ID is the unique permanent ID of the user.
+ // It is populated if the Email is associated
+ // with a Google account, or empty otherwise.
+ ID string
+
+ FederatedIdentity string
+ FederatedProvider string
+}
+
+// String returns a displayable name for the user.
+func (u *User) String() string {
+ if u.AuthDomain != "" && strings.HasSuffix(u.Email, "@"+u.AuthDomain) {
+ return u.Email[:len(u.Email)-len("@"+u.AuthDomain)]
+ }
+ if u.FederatedIdentity != "" {
+ return u.FederatedIdentity
+ }
+ return u.Email
+}
+
+// LoginURL returns a URL that, when visited, prompts the user to sign in,
+// then redirects the user to the URL specified by dest.
+func LoginURL(c appengine.Context, dest string) (string, error) {
+ return LoginURLFederated(c, dest, "")
+}
+
+// LoginURLFederated is like LoginURL but accepts a user's OpenID identifier.
+func LoginURLFederated(c appengine.Context, dest, identity string) (string, error) {
+ req := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ if identity != "" {
+ req.FederatedIdentity = proto.String(identity)
+ }
+ res := &pb.CreateLoginURLResponse{}
+ if err := c.Call("user", "CreateLoginURL", req, res, nil); err != nil {
+ return "", err
+ }
+ return *res.LoginUrl, nil
+}
+
+// LogoutURL returns a URL that, when visited, signs the user out,
+// then redirects the user to the URL specified by dest.
+func LogoutURL(c appengine.Context, dest string) (string, error) {
+ req := &pb.CreateLogoutURLRequest{
+ DestinationUrl: proto.String(dest),
+ }
+ res := &pb.CreateLogoutURLResponse{}
+ if err := c.Call("user", "CreateLogoutURL", req, res, nil); err != nil {
+ return "", err
+ }
+ return *res.LogoutUrl, nil
+}
+
+// Current returns the currently logged-in user,
+// or nil if the user is not signed in.
+func Current(c appengine.Context) *User {
+ u := &User{
+ Email: internal.VirtAPI(c, "user:Email"),
+ AuthDomain: internal.VirtAPI(c, "user:AuthDomain"),
+ ID: internal.VirtAPI(c, "user:ID"),
+ Admin: internal.VirtAPI(c, "user:IsAdmin") == "1",
+ FederatedIdentity: internal.VirtAPI(c, "user:FederatedIdentity"),
+ FederatedProvider: internal.VirtAPI(c, "user:FederatedProvider"),
+ }
+ if u.Email == "" && u.FederatedIdentity == "" {
+ return nil
+ }
+ return u
+}
+
+// IsAdmin returns true if the current user is signed in and
+// is currently registered as an administrator of the application.
+func IsAdmin(c appengine.Context) bool {
+ return internal.VirtAPI(c, "user:IsAdmin") == "1"
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("user", pb.UserServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/user/user_test.go b/Godeps/_workspace/src/google.golang.org/appengine/user/user_test.go
new file mode 100644
index 000000000000..77b9404db2e9
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/user/user_test.go
@@ -0,0 +1,97 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package user
+
+import (
+ "fmt"
+ "net/http"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine/internal"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/user"
+)
+
+func baseReq() *http.Request {
+ return &http.Request{
+ Header: http.Header{},
+ }
+}
+
+type basicUserTest struct {
+ nickname, email, authDomain, admin string
+ // expectations
+ isNil, isAdmin bool
+ displayName string
+}
+
+var basicUserTests = []basicUserTest{
+ {"", "", "", "0", true, false, ""},
+ {"ken", "ken@example.com", "example.com", "0", false, false, "ken"},
+ {"ken", "ken@example.com", "auth_domain.com", "1", false, true, "ken@example.com"},
+}
+
+func TestBasicUserAPI(t *testing.T) {
+ for i, tc := range basicUserTests {
+ req := baseReq()
+ req.Header.Set("X-AppEngine-User-Nickname", tc.nickname)
+ req.Header.Set("X-AppEngine-User-Email", tc.email)
+ req.Header.Set("X-AppEngine-Auth-Domain", tc.authDomain)
+ req.Header.Set("X-AppEngine-User-Is-Admin", tc.admin)
+
+ c := internal.ContextForTesting(req)
+
+ if ga := IsAdmin(c); ga != tc.isAdmin {
+ t.Errorf("test %d: expected IsAdmin(c) = %v, got %v", i, tc.isAdmin, ga)
+ }
+
+ u := Current(c)
+ if tc.isNil {
+ if u != nil {
+ t.Errorf("test %d: expected u == nil, got %+v", i, u)
+ }
+ continue
+ }
+ if u == nil {
+ t.Errorf("test %d: expected u != nil, got nil", i)
+ continue
+ }
+ if u.Email != tc.email {
+ t.Errorf("test %d: expected u.Email = %q, got %q", i, tc.email, u.Email)
+ }
+ if gs := u.String(); gs != tc.displayName {
+ t.Errorf("test %d: expected u.String() = %q, got %q", i, tc.displayName, gs)
+ }
+ if u.Admin != tc.isAdmin {
+ t.Errorf("test %d: expected u.Admin = %v, got %v", i, tc.isAdmin, u.Admin)
+ }
+ }
+}
+
+func TestLoginURL(t *testing.T) {
+ expectedQuery := &pb.CreateLoginURLRequest{
+ DestinationUrl: proto.String("/destination"),
+ }
+ const expectedDest = "/redir/dest"
+ c := aetesting.FakeSingleContext(t, "user", "CreateLoginURL", func(req *pb.CreateLoginURLRequest, res *pb.CreateLoginURLResponse) error {
+ if !proto.Equal(req, expectedQuery) {
+ return fmt.Errorf("got %v, want %v", req, expectedQuery)
+ }
+ res.LoginUrl = proto.String(expectedDest)
+ return nil
+ })
+
+ url, err := LoginURL(c, "/destination")
+ if err != nil {
+ t.Fatalf("LoginURL failed: %v", err)
+ }
+ if url != expectedDest {
+ t.Errorf("got %v, want %v", url, expectedDest)
+ }
+}
+
+// TODO(dsymonds): Add test for LogoutURL.
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp.go b/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp.go
new file mode 100644
index 000000000000..f32e77df8c54
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp.go
@@ -0,0 +1,251 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+/*
+Package xmpp provides the means to send and receive instant messages
+to and from users of XMPP-compatible services.
+
+To send a message,
+ m := &xmpp.Message{
+ To: []string{"kaylee@example.com"},
+ Body: `Hi! How's the carrot?`,
+ }
+ err := m.Send(c)
+
+To receive messages,
+ func init() {
+ xmpp.Handle(handleChat)
+ }
+
+ func handleChat(c appengine.Context, m *xmpp.Message) {
+ // ...
+ }
+*/
+package xmpp // import "google.golang.org/appengine/xmpp"
+
+import (
+ "errors"
+ "fmt"
+ "net/http"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+// Message represents an incoming chat message.
+type Message struct {
+ // Sender is the JID of the sender.
+ // Optional for outgoing messages.
+ Sender string
+
+ // To is the intended recipients of the message.
+ // Incoming messages will have exactly one element.
+ To []string
+
+ // Body is the body of the message.
+ Body string
+
+ // Type is the message type, per RFC 3921.
+ // It defaults to "chat".
+ Type string
+
+ // RawXML is whether the body contains raw XML.
+ RawXML bool
+}
+
+// Presence represents an outgoing presence update.
+type Presence struct {
+ // Sender is the JID (optional).
+ Sender string
+
+ // The intended recipient of the presence update.
+ To string
+
+ // Type, per RFC 3921 (optional). Defaults to "available".
+ Type string
+
+ // State of presence (optional).
+ // Valid values: "away", "chat", "xa", "dnd" (RFC 3921).
+ State string
+
+ // Free text status message (optional).
+ Status string
+}
+
+var (
+ ErrPresenceUnavailable = errors.New("xmpp: presence unavailable")
+ ErrInvalidJID = errors.New("xmpp: invalid JID")
+)
+
+// Handle arranges for f to be called for incoming XMPP messages.
+// Only messages of type "chat" or "normal" will be handled.
+func Handle(f func(c appengine.Context, m *Message)) {
+ http.HandleFunc("/_ah/xmpp/message/chat/", func(_ http.ResponseWriter, r *http.Request) {
+ f(appengine.NewContext(r), &Message{
+ Sender: r.FormValue("from"),
+ To: []string{r.FormValue("to")},
+ Body: r.FormValue("body"),
+ })
+ })
+}
+
+// Send sends a message.
+// If any failures occur with specific recipients, the error will be an appengine.MultiError.
+func (m *Message) Send(c appengine.Context) error {
+ req := &pb.XmppMessageRequest{
+ Jid: m.To,
+ Body: &m.Body,
+ RawXml: &m.RawXML,
+ }
+ if m.Type != "" && m.Type != "chat" {
+ req.Type = &m.Type
+ }
+ if m.Sender != "" {
+ req.FromJid = &m.Sender
+ }
+ res := &pb.XmppMessageResponse{}
+ if err := c.Call("xmpp", "SendMessage", req, res, nil); err != nil {
+ return err
+ }
+
+ if len(res.Status) != len(req.Jid) {
+ return fmt.Errorf("xmpp: sent message to %d JIDs, but only got %d statuses back", len(req.Jid), len(res.Status))
+ }
+ me, any := make(appengine.MultiError, len(req.Jid)), false
+ for i, st := range res.Status {
+ if st != pb.XmppMessageResponse_NO_ERROR {
+ me[i] = errors.New(st.String())
+ any = true
+ }
+ }
+ if any {
+ return me
+ }
+ return nil
+}
+
+// Invite sends an invitation. If the from address is an empty string
+// the default (yourapp@appspot.com/bot) will be used.
+func Invite(c appengine.Context, to, from string) error {
+ req := &pb.XmppInviteRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.XmppInviteResponse{}
+ return c.Call("xmpp", "SendInvite", req, res, nil)
+}
+
+// Send sends a presence update.
+func (p *Presence) Send(c appengine.Context) error {
+ req := &pb.XmppSendPresenceRequest{
+ Jid: &p.To,
+ }
+ if p.State != "" {
+ req.Show = &p.State
+ }
+ if p.Type != "" {
+ req.Type = &p.Type
+ }
+ if p.Sender != "" {
+ req.FromJid = &p.Sender
+ }
+ if p.Status != "" {
+ req.Status = &p.Status
+ }
+ res := &pb.XmppSendPresenceResponse{}
+ return c.Call("xmpp", "SendPresence", req, res, nil)
+}
+
+var presenceMap = map[pb.PresenceResponse_SHOW]string{
+ pb.PresenceResponse_NORMAL: "",
+ pb.PresenceResponse_AWAY: "away",
+ pb.PresenceResponse_DO_NOT_DISTURB: "dnd",
+ pb.PresenceResponse_CHAT: "chat",
+ pb.PresenceResponse_EXTENDED_AWAY: "xa",
+}
+
+// GetPresence retrieves a user's presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// ErrPresenceUnavailable is returned if the presence is unavailable.
+func GetPresence(c appengine.Context, to string, from string) (string, error) {
+ req := &pb.PresenceRequest{
+ Jid: &to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.PresenceResponse{}
+ if err := c.Call("xmpp", "GetPresence", req, res, nil); err != nil {
+ return "", err
+ }
+ if !*res.IsAvailable || res.Presence == nil {
+ return "", ErrPresenceUnavailable
+ }
+ presence, ok := presenceMap[*res.Presence]
+ if ok {
+ return presence, nil
+ }
+ return "", fmt.Errorf("xmpp: unknown presence %v", *res.Presence)
+}
+
+// GetPresenceMulti retrieves multiple users' presence.
+// If the from address is an empty string the default
+// (yourapp@appspot.com/bot) will be used.
+// Possible return values are "", "away", "dnd", "chat", "xa".
+// If any presence is unavailable, an appengine.MultiError is returned
+func GetPresenceMulti(c appengine.Context, to []string, from string) ([]string, error) {
+ req := &pb.BulkPresenceRequest{
+ Jid: to,
+ }
+ if from != "" {
+ req.FromJid = &from
+ }
+ res := &pb.BulkPresenceResponse{}
+
+ if err := c.Call("xmpp", "BulkGetPresence", req, res, nil); err != nil {
+ return nil, err
+ }
+
+ presences := make([]string, 0, len(res.PresenceResponse))
+ errs := appengine.MultiError{}
+
+ addResult := func(presence string, err error) {
+ presences = append(presences, presence)
+ errs = append(errs, err)
+ }
+
+ anyErr := false
+ for _, subres := range res.PresenceResponse {
+ if !subres.GetValid() {
+ anyErr = true
+ addResult("", ErrInvalidJID)
+ continue
+ }
+ if !*subres.IsAvailable || subres.Presence == nil {
+ anyErr = true
+ addResult("", ErrPresenceUnavailable)
+ continue
+ }
+ presence, ok := presenceMap[*subres.Presence]
+ if ok {
+ addResult(presence, nil)
+ } else {
+ anyErr = true
+ addResult("", fmt.Errorf("xmpp: unknown presence %q", *subres.Presence))
+ }
+ }
+ if anyErr {
+ return presences, errs
+ }
+ return presences, nil
+}
+
+func init() {
+ internal.RegisterErrorCodeMap("xmpp", pb.XmppServiceError_ErrorCode_name)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp_test.go b/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp_test.go
new file mode 100644
index 000000000000..c3030d36d90f
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/appengine/xmpp/xmpp_test.go
@@ -0,0 +1,173 @@
+// Copyright 2011 Google Inc. All rights reserved.
+// Use of this source code is governed by the Apache 2.0
+// license that can be found in the LICENSE file.
+
+package xmpp
+
+import (
+ "fmt"
+ "reflect"
+ "testing"
+
+ "github.com/golang/protobuf/proto"
+
+ "google.golang.org/appengine"
+ "google.golang.org/appengine/internal/aetesting"
+ pb "google.golang.org/appengine/internal/xmpp"
+)
+
+func newPresenceResponse(isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) *pb.PresenceResponse {
+ return &pb.PresenceResponse{
+ IsAvailable: proto.Bool(isAvailable),
+ Presence: presence.Enum(),
+ Valid: proto.Bool(valid),
+ }
+}
+
+func setPresenceResponse(m *pb.PresenceResponse, isAvailable bool, presence pb.PresenceResponse_SHOW, valid bool) {
+ m.IsAvailable = &isAvailable
+ m.Presence = presence.Enum()
+ m.Valid = &valid
+}
+
+func TestGetPresence(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "GetPresence", func(in *pb.PresenceRequest, out *pb.PresenceResponse) error {
+ if jid := in.GetJid(); jid != "user@example.com" {
+ return fmt.Errorf("bad jid %q", jid)
+ }
+ setPresenceResponse(out, true, pb.PresenceResponse_CHAT, true)
+ return nil
+ })
+
+ presence, err := GetPresence(c, "user@example.com", "")
+ if err != nil {
+ t.Fatalf("GetPresence: %v", err)
+ }
+
+ if presence != "chat" {
+ t.Errorf("GetPresence: got %#v, want %#v", presence, pb.PresenceResponse_CHAT)
+ }
+}
+
+func TestGetPresenceMultiSingleJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ }
+ return nil
+ })
+
+ presence, err := GetPresenceMulti(c, []string{"user@example.com"}, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ if !reflect.DeepEqual(presence, []string{""}) {
+ t.Errorf("GetPresenceMulti: got %s, want %s", presence, []string{""})
+ }
+}
+
+func TestGetPresenceMultiJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_AWAY, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "away"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiFromJID(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ if jid := in.GetFromJid(); jid != "bot@appspot.com" {
+ return fmt.Errorf("bad from jid %q", jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_NORMAL, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "bot@appspot.com")
+ if err != nil {
+ t.Fatalf("GetPresenceMulti: %v", err)
+ }
+ want := []string{"", "chat"}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %v, want %v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiInvalid(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(true, pb.PresenceResponse_EXTENDED_AWAY, true),
+ newPresenceResponse(true, pb.PresenceResponse_CHAT, false),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{nil, ErrInvalidJID}
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+
+ want := []string{"xa", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
+
+func TestGetPresenceMultiUnavailable(t *testing.T) {
+ c := aetesting.FakeSingleContext(t, "xmpp", "BulkGetPresence", func(in *pb.BulkPresenceRequest, out *pb.BulkPresenceResponse) error {
+ if !reflect.DeepEqual(in.Jid, []string{"user@example.com", "user2@example.com"}) {
+ return fmt.Errorf("bad request jids %#v", in.Jid)
+ }
+ out.PresenceResponse = []*pb.PresenceResponse{
+ newPresenceResponse(false, pb.PresenceResponse_AWAY, true),
+ newPresenceResponse(false, pb.PresenceResponse_DO_NOT_DISTURB, true),
+ }
+ return nil
+ })
+
+ jids := []string{"user@example.com", "user2@example.com"}
+ presence, err := GetPresenceMulti(c, jids, "")
+
+ wantErr := appengine.MultiError{
+ ErrPresenceUnavailable,
+ ErrPresenceUnavailable,
+ }
+ if !reflect.DeepEqual(err, wantErr) {
+ t.Fatalf("GetPresenceMulti: got %#v, want %#v", err, wantErr)
+ }
+ want := []string{"", ""}
+ if !reflect.DeepEqual(presence, want) {
+ t.Errorf("GetPresenceMulti: got %#v, want %#v", presence, want)
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go
new file mode 100644
index 000000000000..c979f43906d4
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/go13.go
@@ -0,0 +1,37 @@
+// Copyright 2015 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// +build go1.3
+
+package metadata
+
+import (
+ "net"
+ "time"
+)
+
+// This is a workaround for https://github.com/golang/oauth2/issues/70, where
+// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
+// Jan 2015 still runs).
+//
+// TODO(bradfitz,jbd,adg): remove this once App Engine supports Go
+// 1.3+.
+func init() {
+ go13Dialer = func() *net.Dialer {
+ return &net.Dialer{
+ Timeout: 750 * time.Millisecond,
+ KeepAlive: 30 * time.Second,
+ }
+ }
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
new file mode 100644
index 000000000000..7753a05b6741
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/compute/metadata/metadata.go
@@ -0,0 +1,267 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package metadata provides access to Google Compute Engine (GCE)
+// metadata and API service accounts.
+//
+// This package is a wrapper around the GCE metadata service,
+// as documented at https://developers.google.com/compute/docs/metadata.
+package metadata
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "strings"
+ "sync"
+ "time"
+
+ "google.golang.org/cloud/internal"
+)
+
+type cachedValue struct {
+ k string
+ trim bool
+ mu sync.Mutex
+ v string
+}
+
+var (
+ projID = &cachedValue{k: "project/project-id", trim: true}
+ projNum = &cachedValue{k: "project/numeric-project-id", trim: true}
+ instID = &cachedValue{k: "instance/id", trim: true}
+)
+
+var metaClient = &http.Client{
+ Transport: &internal.Transport{
+ Base: &http.Transport{
+ Dial: dialer().Dial,
+ ResponseHeaderTimeout: 750 * time.Millisecond,
+ },
+ },
+}
+
+// go13Dialer is nil until we're using Go 1.3+.
+// This is a workaround for https://github.com/golang/oauth2/issues/70, where
+// net.Dialer.KeepAlive is unavailable on Go 1.2 (which App Engine as of
+// Jan 2015 still runs).
+//
+// TODO(bradfitz,jbd,adg,dsymonds): remove this once App Engine supports Go
+// 1.3+ and go-app-builder also supports 1.3+, or when Go 1.2 is no longer an
+// option on App Engine.
+var go13Dialer func() *net.Dialer
+
+func dialer() *net.Dialer {
+ if fn := go13Dialer; fn != nil {
+ return fn()
+ }
+ return &net.Dialer{
+ Timeout: 750 * time.Millisecond,
+ }
+}
+
+// NotDefinedError is returned when requested metadata is not defined.
+//
+// The underlying string is the suffix after "/computeMetadata/v1/".
+//
+// This error is not returned if the value is defined to be the empty
+// string.
+type NotDefinedError string
+
+func (suffix NotDefinedError) Error() string {
+ return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
+}
+
+// Get returns a value from the metadata service.
+// The suffix is appended to "http://metadata/computeMetadata/v1/".
+//
+// If the requested metadata is not defined, the returned error will
+// be of type NotDefinedError.
+func Get(suffix string) (string, error) {
+ // Using 169.254.169.254 instead of "metadata" here because Go
+ // binaries built with the "netgo" tag and without cgo won't
+ // know the search suffix for "metadata" is
+ // ".google.internal", and this IP address is documented as
+ // being stable anyway.
+ url := "http://169.254.169.254/computeMetadata/v1/" + suffix
+ req, _ := http.NewRequest("GET", url, nil)
+ req.Header.Set("Metadata-Flavor", "Google")
+ res, err := metaClient.Do(req)
+ if err != nil {
+ return "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return "", NotDefinedError(suffix)
+ }
+ if res.StatusCode != 200 {
+ return "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
+ }
+ all, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return "", err
+ }
+ return string(all), nil
+}
+
+func getTrimmed(suffix string) (s string, err error) {
+ s, err = Get(suffix)
+ s = strings.TrimSpace(s)
+ return
+}
+
+func (c *cachedValue) get() (v string, err error) {
+ defer c.mu.Unlock()
+ c.mu.Lock()
+ if c.v != "" {
+ return c.v, nil
+ }
+ if c.trim {
+ v, err = getTrimmed(c.k)
+ } else {
+ v, err = Get(c.k)
+ }
+ if err == nil {
+ c.v = v
+ }
+ return
+}
+
+var onGCE struct {
+ sync.Mutex
+ set bool
+ v bool
+}
+
+// OnGCE reports whether this process is running on Google Compute Engine.
+func OnGCE() bool {
+ defer onGCE.Unlock()
+ onGCE.Lock()
+ if onGCE.set {
+ return onGCE.v
+ }
+ onGCE.set = true
+
+ // We use the DNS name of the metadata service here instead of the IP address
+ // because we expect that to fail faster in the not-on-GCE case.
+ res, err := metaClient.Get("http://metadata.google.internal")
+ if err != nil {
+ return false
+ }
+ onGCE.v = res.Header.Get("Metadata-Flavor") == "Google"
+ return onGCE.v
+}
+
+// ProjectID returns the current instance's project ID string.
+func ProjectID() (string, error) { return projID.get() }
+
+// NumericProjectID returns the current instance's numeric project ID.
+func NumericProjectID() (string, error) { return projNum.get() }
+
+// InternalIP returns the instance's primary internal IP address.
+func InternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/ip")
+}
+
+// ExternalIP returns the instance's primary external (public) IP address.
+func ExternalIP() (string, error) {
+ return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
+}
+
+// Hostname returns the instance's hostname. This will probably be of
+// the form "INSTANCENAME.c.PROJECT.internal" but that isn't
+// guaranteed.
+//
+// TODO: what is this defined to be? Docs say "The host name of the
+// instance."
+func Hostname() (string, error) {
+ return getTrimmed("network-interfaces/0/ip")
+}
+
+// InstanceTags returns the list of user-defined instance tags,
+// assigned when initially creating a GCE instance.
+func InstanceTags() ([]string, error) {
+ var s []string
+ j, err := Get("instance/tags")
+ if err != nil {
+ return nil, err
+ }
+ if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
+ return nil, err
+ }
+ return s, nil
+}
+
+// InstanceID returns the current VM's numeric instance ID.
+func InstanceID() (string, error) {
+ return instID.get()
+}
+
+// InstanceAttributes returns the list of user-defined attributes,
+// assigned when initially creating a GCE VM instance. The value of an
+// attribute can be obtained with InstanceAttributeValue.
+func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
+
+// ProjectAttributes returns the list of user-defined attributes
+// applying to the project as a whole, not just this VM. The value of
+// an attribute can be obtained with ProjectAttributeValue.
+func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
+
+func lines(suffix string) ([]string, error) {
+ j, err := Get(suffix)
+ if err != nil {
+ return nil, err
+ }
+ s := strings.Split(strings.TrimSpace(j), "\n")
+ for i := range s {
+ s[i] = strings.TrimSpace(s[i])
+ }
+ return s, nil
+}
+
+// InstanceAttributeValue returns the value of the provided VM
+// instance attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// InstanceAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func InstanceAttributeValue(attr string) (string, error) {
+ return Get("instance/attributes/" + attr)
+}
+
+// ProjectAttributeValue returns the value of the provided
+// project attribute.
+//
+// If the requested attribute is not defined, the returned error will
+// be of type NotDefinedError.
+//
+// ProjectAttributeValue may return ("", nil) if the attribute was
+// defined to be the empty string.
+func ProjectAttributeValue(attr string) (string, error) {
+ return Get("project/attributes/" + attr)
+}
+
+// Scopes returns the service account scopes for the given account.
+// The account may be empty or the string "default" to use the instance's
+// main account.
+func Scopes(serviceAccount string) ([]string, error) {
+ if serviceAccount == "" {
+ serviceAccount = "default"
+ }
+ return lines("instance/service-accounts/" + serviceAccount + "/scopes")
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
new file mode 100644
index 000000000000..984323c26823
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/cloud.go
@@ -0,0 +1,128 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package internal provides support for the cloud packages.
+//
+// Users should not import this package directly.
+package internal
+
+import (
+ "fmt"
+ "net/http"
+ "sync"
+
+ "golang.org/x/net/context"
+)
+
+type contextKey struct{}
+
+func WithContext(parent context.Context, projID string, c *http.Client) context.Context {
+ if c == nil {
+ panic("nil *http.Client passed to WithContext")
+ }
+ if projID == "" {
+ panic("empty project ID passed to WithContext")
+ }
+ return context.WithValue(parent, contextKey{}, &cloudContext{
+ ProjectID: projID,
+ HTTPClient: c,
+ })
+}
+
+const userAgent = "gcloud-golang/0.1"
+
+type cloudContext struct {
+ ProjectID string
+ HTTPClient *http.Client
+
+ mu sync.Mutex // guards svc
+ svc map[string]interface{} // e.g. "storage" => *rawStorage.Service
+}
+
+// Service returns the result of the fill function if it's never been
+// called before for the given name (which is assumed to be an API
+// service name, like "datastore"). If it has already been cached, the fill
+// func is not run.
+// It's safe for concurrent use by multiple goroutines.
+func Service(ctx context.Context, name string, fill func(*http.Client) interface{}) interface{} {
+ return cc(ctx).service(name, fill)
+}
+
+func (c *cloudContext) service(name string, fill func(*http.Client) interface{}) interface{} {
+ c.mu.Lock()
+ defer c.mu.Unlock()
+
+ if c.svc == nil {
+ c.svc = make(map[string]interface{})
+ } else if v, ok := c.svc[name]; ok {
+ return v
+ }
+ v := fill(c.HTTPClient)
+ c.svc[name] = v
+ return v
+}
+
+// Transport is an http.RoundTripper that appends
+// Google Cloud client's user-agent to the original
+// request's user-agent header.
+type Transport struct {
+ // Base represents the actual http.RoundTripper
+ // the requests will be delegated to.
+ Base http.RoundTripper
+}
+
+// RoundTrip appends a user-agent to the existing user-agent
+// header and delegates the request to the base http.RoundTripper.
+func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) {
+ req = cloneRequest(req)
+ ua := req.Header.Get("User-Agent")
+ if ua == "" {
+ ua = userAgent
+ } else {
+ ua = fmt.Sprintf("%s;%s", ua, userAgent)
+ }
+ req.Header.Set("User-Agent", ua)
+ return t.Base.RoundTrip(req)
+}
+
+// cloneRequest returns a clone of the provided *http.Request.
+// The clone is a shallow copy of the struct and its Header map.
+func cloneRequest(r *http.Request) *http.Request {
+ // shallow copy of the struct
+ r2 := new(http.Request)
+ *r2 = *r
+ // deep copy of the Header
+ r2.Header = make(http.Header)
+ for k, s := range r.Header {
+ r2.Header[k] = s
+ }
+ return r2
+}
+
+func ProjID(ctx context.Context) string {
+ return cc(ctx).ProjectID
+}
+
+func HTTPClient(ctx context.Context) *http.Client {
+ return cc(ctx).HTTPClient
+}
+
+// cc returns the internal *cloudContext (cc) state for a context.Context.
+// It panics if the user did it wrong.
+func cc(ctx context.Context) *cloudContext {
+ if c, ok := ctx.Value(contextKey{}).(*cloudContext); ok {
+ return c
+ }
+ panic("invalid context.Context type; it should be created with cloud.NewContext")
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go
new file mode 100644
index 000000000000..be903e5ce08d
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.pb.go
@@ -0,0 +1,1633 @@
+// Code generated by protoc-gen-go.
+// source: datastore_v1.proto
+// DO NOT EDIT!
+
+/*
+Package pb is a generated protocol buffer package.
+
+It is generated from these files:
+ datastore_v1.proto
+
+It has these top-level messages:
+ PartitionId
+ Key
+ Value
+ Property
+ Entity
+ EntityResult
+ Query
+ KindExpression
+ PropertyReference
+ PropertyExpression
+ PropertyOrder
+ Filter
+ CompositeFilter
+ PropertyFilter
+ GqlQuery
+ GqlQueryArg
+ QueryResultBatch
+ Mutation
+ MutationResult
+ ReadOptions
+ LookupRequest
+ LookupResponse
+ RunQueryRequest
+ RunQueryResponse
+ BeginTransactionRequest
+ BeginTransactionResponse
+ RollbackRequest
+ RollbackResponse
+ CommitRequest
+ CommitResponse
+ AllocateIdsRequest
+ AllocateIdsResponse
+*/
+package pb
+
+import proto "github.com/golang/protobuf/proto"
+import math "math"
+
+// Reference imports to suppress errors if they are not otherwise used.
+var _ = proto.Marshal
+var _ = math.Inf
+
+// Specifies what data the 'entity' field contains.
+// A ResultType is either implied (for example, in LookupResponse.found it
+// is always FULL) or specified by context (for example, in message
+// QueryResultBatch, field 'entity_result_type' specifies a ResultType
+// for all the values in field 'entity_result').
+type EntityResult_ResultType int32
+
+const (
+ EntityResult_FULL EntityResult_ResultType = 1
+ EntityResult_PROJECTION EntityResult_ResultType = 2
+ // The entity may have no key.
+ // A property value may have meaning 18.
+ EntityResult_KEY_ONLY EntityResult_ResultType = 3
+)
+
+var EntityResult_ResultType_name = map[int32]string{
+ 1: "FULL",
+ 2: "PROJECTION",
+ 3: "KEY_ONLY",
+}
+var EntityResult_ResultType_value = map[string]int32{
+ "FULL": 1,
+ "PROJECTION": 2,
+ "KEY_ONLY": 3,
+}
+
+func (x EntityResult_ResultType) Enum() *EntityResult_ResultType {
+ p := new(EntityResult_ResultType)
+ *p = x
+ return p
+}
+func (x EntityResult_ResultType) String() string {
+ return proto.EnumName(EntityResult_ResultType_name, int32(x))
+}
+func (x *EntityResult_ResultType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(EntityResult_ResultType_value, data, "EntityResult_ResultType")
+ if err != nil {
+ return err
+ }
+ *x = EntityResult_ResultType(value)
+ return nil
+}
+
+type PropertyExpression_AggregationFunction int32
+
+const (
+ PropertyExpression_FIRST PropertyExpression_AggregationFunction = 1
+)
+
+var PropertyExpression_AggregationFunction_name = map[int32]string{
+ 1: "FIRST",
+}
+var PropertyExpression_AggregationFunction_value = map[string]int32{
+ "FIRST": 1,
+}
+
+func (x PropertyExpression_AggregationFunction) Enum() *PropertyExpression_AggregationFunction {
+ p := new(PropertyExpression_AggregationFunction)
+ *p = x
+ return p
+}
+func (x PropertyExpression_AggregationFunction) String() string {
+ return proto.EnumName(PropertyExpression_AggregationFunction_name, int32(x))
+}
+func (x *PropertyExpression_AggregationFunction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PropertyExpression_AggregationFunction_value, data, "PropertyExpression_AggregationFunction")
+ if err != nil {
+ return err
+ }
+ *x = PropertyExpression_AggregationFunction(value)
+ return nil
+}
+
+type PropertyOrder_Direction int32
+
+const (
+ PropertyOrder_ASCENDING PropertyOrder_Direction = 1
+ PropertyOrder_DESCENDING PropertyOrder_Direction = 2
+)
+
+var PropertyOrder_Direction_name = map[int32]string{
+ 1: "ASCENDING",
+ 2: "DESCENDING",
+}
+var PropertyOrder_Direction_value = map[string]int32{
+ "ASCENDING": 1,
+ "DESCENDING": 2,
+}
+
+func (x PropertyOrder_Direction) Enum() *PropertyOrder_Direction {
+ p := new(PropertyOrder_Direction)
+ *p = x
+ return p
+}
+func (x PropertyOrder_Direction) String() string {
+ return proto.EnumName(PropertyOrder_Direction_name, int32(x))
+}
+func (x *PropertyOrder_Direction) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PropertyOrder_Direction_value, data, "PropertyOrder_Direction")
+ if err != nil {
+ return err
+ }
+ *x = PropertyOrder_Direction(value)
+ return nil
+}
+
+type CompositeFilter_Operator int32
+
+const (
+ CompositeFilter_AND CompositeFilter_Operator = 1
+)
+
+var CompositeFilter_Operator_name = map[int32]string{
+ 1: "AND",
+}
+var CompositeFilter_Operator_value = map[string]int32{
+ "AND": 1,
+}
+
+func (x CompositeFilter_Operator) Enum() *CompositeFilter_Operator {
+ p := new(CompositeFilter_Operator)
+ *p = x
+ return p
+}
+func (x CompositeFilter_Operator) String() string {
+ return proto.EnumName(CompositeFilter_Operator_name, int32(x))
+}
+func (x *CompositeFilter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CompositeFilter_Operator_value, data, "CompositeFilter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = CompositeFilter_Operator(value)
+ return nil
+}
+
+type PropertyFilter_Operator int32
+
+const (
+ PropertyFilter_LESS_THAN PropertyFilter_Operator = 1
+ PropertyFilter_LESS_THAN_OR_EQUAL PropertyFilter_Operator = 2
+ PropertyFilter_GREATER_THAN PropertyFilter_Operator = 3
+ PropertyFilter_GREATER_THAN_OR_EQUAL PropertyFilter_Operator = 4
+ PropertyFilter_EQUAL PropertyFilter_Operator = 5
+ PropertyFilter_HAS_ANCESTOR PropertyFilter_Operator = 11
+)
+
+var PropertyFilter_Operator_name = map[int32]string{
+ 1: "LESS_THAN",
+ 2: "LESS_THAN_OR_EQUAL",
+ 3: "GREATER_THAN",
+ 4: "GREATER_THAN_OR_EQUAL",
+ 5: "EQUAL",
+ 11: "HAS_ANCESTOR",
+}
+var PropertyFilter_Operator_value = map[string]int32{
+ "LESS_THAN": 1,
+ "LESS_THAN_OR_EQUAL": 2,
+ "GREATER_THAN": 3,
+ "GREATER_THAN_OR_EQUAL": 4,
+ "EQUAL": 5,
+ "HAS_ANCESTOR": 11,
+}
+
+func (x PropertyFilter_Operator) Enum() *PropertyFilter_Operator {
+ p := new(PropertyFilter_Operator)
+ *p = x
+ return p
+}
+func (x PropertyFilter_Operator) String() string {
+ return proto.EnumName(PropertyFilter_Operator_name, int32(x))
+}
+func (x *PropertyFilter_Operator) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(PropertyFilter_Operator_value, data, "PropertyFilter_Operator")
+ if err != nil {
+ return err
+ }
+ *x = PropertyFilter_Operator(value)
+ return nil
+}
+
+// The possible values for the 'more_results' field.
+type QueryResultBatch_MoreResultsType int32
+
+const (
+ QueryResultBatch_NOT_FINISHED QueryResultBatch_MoreResultsType = 1
+ QueryResultBatch_MORE_RESULTS_AFTER_LIMIT QueryResultBatch_MoreResultsType = 2
+ // results after the limit.
+ QueryResultBatch_NO_MORE_RESULTS QueryResultBatch_MoreResultsType = 3
+)
+
+var QueryResultBatch_MoreResultsType_name = map[int32]string{
+ 1: "NOT_FINISHED",
+ 2: "MORE_RESULTS_AFTER_LIMIT",
+ 3: "NO_MORE_RESULTS",
+}
+var QueryResultBatch_MoreResultsType_value = map[string]int32{
+ "NOT_FINISHED": 1,
+ "MORE_RESULTS_AFTER_LIMIT": 2,
+ "NO_MORE_RESULTS": 3,
+}
+
+func (x QueryResultBatch_MoreResultsType) Enum() *QueryResultBatch_MoreResultsType {
+ p := new(QueryResultBatch_MoreResultsType)
+ *p = x
+ return p
+}
+func (x QueryResultBatch_MoreResultsType) String() string {
+ return proto.EnumName(QueryResultBatch_MoreResultsType_name, int32(x))
+}
+func (x *QueryResultBatch_MoreResultsType) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(QueryResultBatch_MoreResultsType_value, data, "QueryResultBatch_MoreResultsType")
+ if err != nil {
+ return err
+ }
+ *x = QueryResultBatch_MoreResultsType(value)
+ return nil
+}
+
+type ReadOptions_ReadConsistency int32
+
+const (
+ ReadOptions_DEFAULT ReadOptions_ReadConsistency = 0
+ ReadOptions_STRONG ReadOptions_ReadConsistency = 1
+ ReadOptions_EVENTUAL ReadOptions_ReadConsistency = 2
+)
+
+var ReadOptions_ReadConsistency_name = map[int32]string{
+ 0: "DEFAULT",
+ 1: "STRONG",
+ 2: "EVENTUAL",
+}
+var ReadOptions_ReadConsistency_value = map[string]int32{
+ "DEFAULT": 0,
+ "STRONG": 1,
+ "EVENTUAL": 2,
+}
+
+func (x ReadOptions_ReadConsistency) Enum() *ReadOptions_ReadConsistency {
+ p := new(ReadOptions_ReadConsistency)
+ *p = x
+ return p
+}
+func (x ReadOptions_ReadConsistency) String() string {
+ return proto.EnumName(ReadOptions_ReadConsistency_name, int32(x))
+}
+func (x *ReadOptions_ReadConsistency) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(ReadOptions_ReadConsistency_value, data, "ReadOptions_ReadConsistency")
+ if err != nil {
+ return err
+ }
+ *x = ReadOptions_ReadConsistency(value)
+ return nil
+}
+
+type BeginTransactionRequest_IsolationLevel int32
+
+const (
+ BeginTransactionRequest_SNAPSHOT BeginTransactionRequest_IsolationLevel = 0
+ // conflict if their mutations conflict. For example:
+ // Read(A),Write(B) may not conflict with Read(B),Write(A),
+ // but Read(B),Write(B) does conflict with Read(B),Write(B).
+ BeginTransactionRequest_SERIALIZABLE BeginTransactionRequest_IsolationLevel = 1
+)
+
+var BeginTransactionRequest_IsolationLevel_name = map[int32]string{
+ 0: "SNAPSHOT",
+ 1: "SERIALIZABLE",
+}
+var BeginTransactionRequest_IsolationLevel_value = map[string]int32{
+ "SNAPSHOT": 0,
+ "SERIALIZABLE": 1,
+}
+
+func (x BeginTransactionRequest_IsolationLevel) Enum() *BeginTransactionRequest_IsolationLevel {
+ p := new(BeginTransactionRequest_IsolationLevel)
+ *p = x
+ return p
+}
+func (x BeginTransactionRequest_IsolationLevel) String() string {
+ return proto.EnumName(BeginTransactionRequest_IsolationLevel_name, int32(x))
+}
+func (x *BeginTransactionRequest_IsolationLevel) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(BeginTransactionRequest_IsolationLevel_value, data, "BeginTransactionRequest_IsolationLevel")
+ if err != nil {
+ return err
+ }
+ *x = BeginTransactionRequest_IsolationLevel(value)
+ return nil
+}
+
+type CommitRequest_Mode int32
+
+const (
+ CommitRequest_TRANSACTIONAL CommitRequest_Mode = 1
+ CommitRequest_NON_TRANSACTIONAL CommitRequest_Mode = 2
+)
+
+var CommitRequest_Mode_name = map[int32]string{
+ 1: "TRANSACTIONAL",
+ 2: "NON_TRANSACTIONAL",
+}
+var CommitRequest_Mode_value = map[string]int32{
+ "TRANSACTIONAL": 1,
+ "NON_TRANSACTIONAL": 2,
+}
+
+func (x CommitRequest_Mode) Enum() *CommitRequest_Mode {
+ p := new(CommitRequest_Mode)
+ *p = x
+ return p
+}
+func (x CommitRequest_Mode) String() string {
+ return proto.EnumName(CommitRequest_Mode_name, int32(x))
+}
+func (x *CommitRequest_Mode) UnmarshalJSON(data []byte) error {
+ value, err := proto.UnmarshalJSONEnum(CommitRequest_Mode_value, data, "CommitRequest_Mode")
+ if err != nil {
+ return err
+ }
+ *x = CommitRequest_Mode(value)
+ return nil
+}
+
+// An identifier for a particular subset of entities.
+//
+// Entities are partitioned into various subsets, each used by different
+// datasets and different namespaces within a dataset and so forth.
+//
+// All input partition IDs are normalized before use.
+// A partition ID is normalized as follows:
+// If the partition ID is unset or is set to an empty partition ID, replace it
+// with the context partition ID.
+// Otherwise, if the partition ID has no dataset ID, assign it the context
+// partition ID's dataset ID.
+// Unless otherwise documented, the context partition ID has the dataset ID set
+// to the context dataset ID and no other partition dimension set.
+//
+// A partition ID is empty if all of its fields are unset.
+//
+// Partition dimension:
+// A dimension may be unset.
+// A dimension's value must never be "".
+// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
+// If the value of any dimension matches regex "__.*__",
+// the partition is reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented contexts.
+//
+// Dataset ID:
+// A dataset id's value must never be "".
+// A dataset id's value must match
+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
+type PartitionId struct {
+ // The dataset ID.
+ DatasetId *string `protobuf:"bytes,3,opt,name=dataset_id" json:"dataset_id,omitempty"`
+ // The namespace.
+ Namespace *string `protobuf:"bytes,4,opt,name=namespace" json:"namespace,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PartitionId) Reset() { *m = PartitionId{} }
+func (m *PartitionId) String() string { return proto.CompactTextString(m) }
+func (*PartitionId) ProtoMessage() {}
+
+func (m *PartitionId) GetDatasetId() string {
+ if m != nil && m.DatasetId != nil {
+ return *m.DatasetId
+ }
+ return ""
+}
+
+func (m *PartitionId) GetNamespace() string {
+ if m != nil && m.Namespace != nil {
+ return *m.Namespace
+ }
+ return ""
+}
+
+// A unique identifier for an entity.
+// If a key's partition id or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+type Key struct {
+ // Entities are partitioned into subsets, currently identified by a dataset
+ // (usually implicitly specified by the project) and namespace ID.
+ // Queries are scoped to a single partition.
+ PartitionId *PartitionId `protobuf:"bytes,1,opt,name=partition_id" json:"partition_id,omitempty"`
+ // The entity path.
+ // An entity path consists of one or more elements composed of a kind and a
+ // string or numerical identifier, which identify entities. The first
+ // element identifies a root entity, the second element identifies
+ // a child of the root entity, the third element a child of the
+ // second entity, and so forth. The entities identified by all prefixes of
+ // the path are called the element's ancestors.
+ // An entity path is always fully complete: ALL of the entity's ancestors
+ // are required to be in the path along with the entity identifier itself.
+ // The only exception is that in some documented cases, the identifier in the
+ // last path element (for the entity) itself may be omitted. A path can never
+ // be empty.
+ PathElement []*Key_PathElement `protobuf:"bytes,2,rep,name=path_element" json:"path_element,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Key) Reset() { *m = Key{} }
+func (m *Key) String() string { return proto.CompactTextString(m) }
+func (*Key) ProtoMessage() {}
+
+func (m *Key) GetPartitionId() *PartitionId {
+ if m != nil {
+ return m.PartitionId
+ }
+ return nil
+}
+
+func (m *Key) GetPathElement() []*Key_PathElement {
+ if m != nil {
+ return m.PathElement
+ }
+ return nil
+}
+
+// A (kind, ID/name) pair used to construct a key path.
+//
+// At most one of name or ID may be set.
+// If either is set, the element is complete.
+// If neither is set, the element is incomplete.
+type Key_PathElement struct {
+ // The kind of the entity.
+ // A kind matching regex "__.*__" is reserved/read-only.
+ // A kind must not contain more than 500 characters.
+ // Cannot be "".
+ Kind *string `protobuf:"bytes,1,req,name=kind" json:"kind,omitempty"`
+ // The ID of the entity.
+ // Never equal to zero. Values less than zero are discouraged and will not
+ // be supported in the future.
+ Id *int64 `protobuf:"varint,2,opt,name=id" json:"id,omitempty"`
+ // The name of the entity.
+ // A name matching regex "__.*__" is reserved/read-only.
+ // A name must not be more than 500 characters.
+ // Cannot be "".
+ Name *string `protobuf:"bytes,3,opt,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Key_PathElement) Reset() { *m = Key_PathElement{} }
+func (m *Key_PathElement) String() string { return proto.CompactTextString(m) }
+func (*Key_PathElement) ProtoMessage() {}
+
+func (m *Key_PathElement) GetKind() string {
+ if m != nil && m.Kind != nil {
+ return *m.Kind
+ }
+ return ""
+}
+
+func (m *Key_PathElement) GetId() int64 {
+ if m != nil && m.Id != nil {
+ return *m.Id
+ }
+ return 0
+}
+
+func (m *Key_PathElement) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+//
+// At most one of the Value fields may be set.
+// If none are set the value is "null".
+//
+type Value struct {
+ // A boolean value.
+ BooleanValue *bool `protobuf:"varint,1,opt,name=boolean_value" json:"boolean_value,omitempty"`
+ // An integer value.
+ IntegerValue *int64 `protobuf:"varint,2,opt,name=integer_value" json:"integer_value,omitempty"`
+ // A double value.
+ DoubleValue *float64 `protobuf:"fixed64,3,opt,name=double_value" json:"double_value,omitempty"`
+ // A timestamp value.
+ TimestampMicrosecondsValue *int64 `protobuf:"varint,4,opt,name=timestamp_microseconds_value" json:"timestamp_microseconds_value,omitempty"`
+ // A key value.
+ KeyValue *Key `protobuf:"bytes,5,opt,name=key_value" json:"key_value,omitempty"`
+ // A blob key value.
+ BlobKeyValue *string `protobuf:"bytes,16,opt,name=blob_key_value" json:"blob_key_value,omitempty"`
+ // A UTF-8 encoded string value.
+ StringValue *string `protobuf:"bytes,17,opt,name=string_value" json:"string_value,omitempty"`
+ // A blob value.
+ BlobValue []byte `protobuf:"bytes,18,opt,name=blob_value" json:"blob_value,omitempty"`
+ // An entity value.
+ // May have no key.
+ // May have a key with an incomplete key path.
+ // May have a reserved/read-only key.
+ EntityValue *Entity `protobuf:"bytes,6,opt,name=entity_value" json:"entity_value,omitempty"`
+ // A list value.
+ // Cannot contain another list value.
+ // Cannot also have a meaning and indexing set.
+ ListValue []*Value `protobuf:"bytes,7,rep,name=list_value" json:"list_value,omitempty"`
+ // The meaning field is reserved and should not be used.
+ Meaning *int32 `protobuf:"varint,14,opt,name=meaning" json:"meaning,omitempty"`
+ // If the value should be indexed.
+ //
+ // The indexed property may be set for a
+ // null value.
+ // When indexed is true, stringValue
+ // is limited to 500 characters and the blob value is limited to 500 bytes.
+ // Exception: If meaning is set to 2, string_value is limited to 2038
+ // characters regardless of indexed.
+ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
+ // will be ignored on input (and will never be set on output).
+ // Input values by default have indexed set to
+ // true; however, you can explicitly set indexed to
+ // true if you want. (An output value never has
+ // indexed explicitly set to true.) If a value is
+ // itself an entity, it cannot have indexed set to
+ // true.
+ // Exception: An entity value with meaning 9, 20 or 21 may be indexed.
+ Indexed *bool `protobuf:"varint,15,opt,name=indexed,def=1" json:"indexed,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Value) Reset() { *m = Value{} }
+func (m *Value) String() string { return proto.CompactTextString(m) }
+func (*Value) ProtoMessage() {}
+
+const Default_Value_Indexed bool = true
+
+func (m *Value) GetBooleanValue() bool {
+ if m != nil && m.BooleanValue != nil {
+ return *m.BooleanValue
+ }
+ return false
+}
+
+func (m *Value) GetIntegerValue() int64 {
+ if m != nil && m.IntegerValue != nil {
+ return *m.IntegerValue
+ }
+ return 0
+}
+
+func (m *Value) GetDoubleValue() float64 {
+ if m != nil && m.DoubleValue != nil {
+ return *m.DoubleValue
+ }
+ return 0
+}
+
+func (m *Value) GetTimestampMicrosecondsValue() int64 {
+ if m != nil && m.TimestampMicrosecondsValue != nil {
+ return *m.TimestampMicrosecondsValue
+ }
+ return 0
+}
+
+func (m *Value) GetKeyValue() *Key {
+ if m != nil {
+ return m.KeyValue
+ }
+ return nil
+}
+
+func (m *Value) GetBlobKeyValue() string {
+ if m != nil && m.BlobKeyValue != nil {
+ return *m.BlobKeyValue
+ }
+ return ""
+}
+
+func (m *Value) GetStringValue() string {
+ if m != nil && m.StringValue != nil {
+ return *m.StringValue
+ }
+ return ""
+}
+
+func (m *Value) GetBlobValue() []byte {
+ if m != nil {
+ return m.BlobValue
+ }
+ return nil
+}
+
+func (m *Value) GetEntityValue() *Entity {
+ if m != nil {
+ return m.EntityValue
+ }
+ return nil
+}
+
+func (m *Value) GetListValue() []*Value {
+ if m != nil {
+ return m.ListValue
+ }
+ return nil
+}
+
+func (m *Value) GetMeaning() int32 {
+ if m != nil && m.Meaning != nil {
+ return *m.Meaning
+ }
+ return 0
+}
+
+func (m *Value) GetIndexed() bool {
+ if m != nil && m.Indexed != nil {
+ return *m.Indexed
+ }
+ return Default_Value_Indexed
+}
+
+// An entity property.
+type Property struct {
+ // The name of the property.
+ // A property name matching regex "__.*__" is reserved.
+ // A reserved property name is forbidden in certain documented contexts.
+ // The name must not contain more than 500 characters.
+ // Cannot be "".
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ // The value(s) of the property.
+ // Each value can have only one value property populated. For example,
+ // you cannot have a values list of { value: { integerValue: 22,
+ // stringValue: "a" } }, but you can have { value: { listValue:
+ // [ { integerValue: 22 }, { stringValue: "a" } ] }.
+ Value *Value `protobuf:"bytes,4,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Property) Reset() { *m = Property{} }
+func (m *Property) String() string { return proto.CompactTextString(m) }
+func (*Property) ProtoMessage() {}
+
+func (m *Property) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *Property) GetValue() *Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+// An entity.
+//
+// An entity is limited to 1 megabyte when stored. That roughly
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+type Entity struct {
+ // The entity's key.
+ //
+ // An entity must have a key, unless otherwise documented (for example,
+ // an entity in Value.entityValue may have no key).
+ // An entity's kind is its key's path's last element's kind,
+ // or null if it has no key.
+ Key *Key `protobuf:"bytes,1,opt,name=key" json:"key,omitempty"`
+ // The entity's properties.
+ // Each property's name must be unique for its entity.
+ Property []*Property `protobuf:"bytes,2,rep,name=property" json:"property,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Entity) Reset() { *m = Entity{} }
+func (m *Entity) String() string { return proto.CompactTextString(m) }
+func (*Entity) ProtoMessage() {}
+
+func (m *Entity) GetKey() *Key {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func (m *Entity) GetProperty() []*Property {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+// The result of fetching an entity from the datastore.
+type EntityResult struct {
+ // The resulting entity.
+ Entity *Entity `protobuf:"bytes,1,req,name=entity" json:"entity,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *EntityResult) Reset() { *m = EntityResult{} }
+func (m *EntityResult) String() string { return proto.CompactTextString(m) }
+func (*EntityResult) ProtoMessage() {}
+
+func (m *EntityResult) GetEntity() *Entity {
+ if m != nil {
+ return m.Entity
+ }
+ return nil
+}
+
+// A query.
+type Query struct {
+ // The projection to return. If not set the entire entity is returned.
+ Projection []*PropertyExpression `protobuf:"bytes,2,rep,name=projection" json:"projection,omitempty"`
+ // The kinds to query (if empty, returns entities from all kinds).
+ Kind []*KindExpression `protobuf:"bytes,3,rep,name=kind" json:"kind,omitempty"`
+ // The filter to apply (optional).
+ Filter *Filter `protobuf:"bytes,4,opt,name=filter" json:"filter,omitempty"`
+ // The order to apply to the query results (if empty, order is unspecified).
+ Order []*PropertyOrder `protobuf:"bytes,5,rep,name=order" json:"order,omitempty"`
+ // The properties to group by (if empty, no grouping is applied to the
+ // result set).
+ GroupBy []*PropertyReference `protobuf:"bytes,6,rep,name=group_by" json:"group_by,omitempty"`
+ // A starting point for the query results. Optional. Query cursors are
+ // returned in query result batches.
+ StartCursor []byte `protobuf:"bytes,7,opt,name=start_cursor" json:"start_cursor,omitempty"`
+ // An ending point for the query results. Optional. Query cursors are
+ // returned in query result batches.
+ EndCursor []byte `protobuf:"bytes,8,opt,name=end_cursor" json:"end_cursor,omitempty"`
+ // The number of results to skip. Applies before limit, but after all other
+ // constraints (optional, defaults to 0).
+ Offset *int32 `protobuf:"varint,10,opt,name=offset,def=0" json:"offset,omitempty"`
+ // The maximum number of results to return. Applies after all other
+ // constraints. Optional.
+ Limit *int32 `protobuf:"varint,11,opt,name=limit" json:"limit,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Query) Reset() { *m = Query{} }
+func (m *Query) String() string { return proto.CompactTextString(m) }
+func (*Query) ProtoMessage() {}
+
+const Default_Query_Offset int32 = 0
+
+func (m *Query) GetProjection() []*PropertyExpression {
+ if m != nil {
+ return m.Projection
+ }
+ return nil
+}
+
+func (m *Query) GetKind() []*KindExpression {
+ if m != nil {
+ return m.Kind
+ }
+ return nil
+}
+
+func (m *Query) GetFilter() *Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+func (m *Query) GetOrder() []*PropertyOrder {
+ if m != nil {
+ return m.Order
+ }
+ return nil
+}
+
+func (m *Query) GetGroupBy() []*PropertyReference {
+ if m != nil {
+ return m.GroupBy
+ }
+ return nil
+}
+
+func (m *Query) GetStartCursor() []byte {
+ if m != nil {
+ return m.StartCursor
+ }
+ return nil
+}
+
+func (m *Query) GetEndCursor() []byte {
+ if m != nil {
+ return m.EndCursor
+ }
+ return nil
+}
+
+func (m *Query) GetOffset() int32 {
+ if m != nil && m.Offset != nil {
+ return *m.Offset
+ }
+ return Default_Query_Offset
+}
+
+func (m *Query) GetLimit() int32 {
+ if m != nil && m.Limit != nil {
+ return *m.Limit
+ }
+ return 0
+}
+
+// A representation of a kind.
+type KindExpression struct {
+ // The name of the kind.
+ Name *string `protobuf:"bytes,1,req,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *KindExpression) Reset() { *m = KindExpression{} }
+func (m *KindExpression) String() string { return proto.CompactTextString(m) }
+func (*KindExpression) ProtoMessage() {}
+
+func (m *KindExpression) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// A reference to a property relative to the kind expressions.
+// exactly.
+type PropertyReference struct {
+ // The name of the property.
+ Name *string `protobuf:"bytes,2,req,name=name" json:"name,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyReference) Reset() { *m = PropertyReference{} }
+func (m *PropertyReference) String() string { return proto.CompactTextString(m) }
+func (*PropertyReference) ProtoMessage() {}
+
+func (m *PropertyReference) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+// A representation of a property in a projection.
+type PropertyExpression struct {
+ // The property to project.
+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+ // The aggregation function to apply to the property. Optional.
+ // Can only be used when grouping by at least one property. Must
+ // then be set on all properties in the projection that are not
+ // being grouped by.
+ AggregationFunction *PropertyExpression_AggregationFunction `protobuf:"varint,2,opt,name=aggregation_function,enum=pb.PropertyExpression_AggregationFunction" json:"aggregation_function,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyExpression) Reset() { *m = PropertyExpression{} }
+func (m *PropertyExpression) String() string { return proto.CompactTextString(m) }
+func (*PropertyExpression) ProtoMessage() {}
+
+func (m *PropertyExpression) GetProperty() *PropertyReference {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *PropertyExpression) GetAggregationFunction() PropertyExpression_AggregationFunction {
+ if m != nil && m.AggregationFunction != nil {
+ return *m.AggregationFunction
+ }
+ return PropertyExpression_FIRST
+}
+
+// The desired order for a specific property.
+type PropertyOrder struct {
+ // The property to order by.
+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+ // The direction to order by.
+ Direction *PropertyOrder_Direction `protobuf:"varint,2,opt,name=direction,enum=pb.PropertyOrder_Direction,def=1" json:"direction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyOrder) Reset() { *m = PropertyOrder{} }
+func (m *PropertyOrder) String() string { return proto.CompactTextString(m) }
+func (*PropertyOrder) ProtoMessage() {}
+
+const Default_PropertyOrder_Direction PropertyOrder_Direction = PropertyOrder_ASCENDING
+
+func (m *PropertyOrder) GetProperty() *PropertyReference {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *PropertyOrder) GetDirection() PropertyOrder_Direction {
+ if m != nil && m.Direction != nil {
+ return *m.Direction
+ }
+ return Default_PropertyOrder_Direction
+}
+
+// A holder for any type of filter. Exactly one field should be specified.
+type Filter struct {
+ // A composite filter.
+ CompositeFilter *CompositeFilter `protobuf:"bytes,1,opt,name=composite_filter" json:"composite_filter,omitempty"`
+ // A filter on a property.
+ PropertyFilter *PropertyFilter `protobuf:"bytes,2,opt,name=property_filter" json:"property_filter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Filter) Reset() { *m = Filter{} }
+func (m *Filter) String() string { return proto.CompactTextString(m) }
+func (*Filter) ProtoMessage() {}
+
+func (m *Filter) GetCompositeFilter() *CompositeFilter {
+ if m != nil {
+ return m.CompositeFilter
+ }
+ return nil
+}
+
+func (m *Filter) GetPropertyFilter() *PropertyFilter {
+ if m != nil {
+ return m.PropertyFilter
+ }
+ return nil
+}
+
+// A filter that merges the multiple other filters using the given operation.
+type CompositeFilter struct {
+ // The operator for combining multiple filters.
+ Operator *CompositeFilter_Operator `protobuf:"varint,1,req,name=operator,enum=pb.CompositeFilter_Operator" json:"operator,omitempty"`
+ // The list of filters to combine.
+ // Must contain at least one filter.
+ Filter []*Filter `protobuf:"bytes,2,rep,name=filter" json:"filter,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CompositeFilter) Reset() { *m = CompositeFilter{} }
+func (m *CompositeFilter) String() string { return proto.CompactTextString(m) }
+func (*CompositeFilter) ProtoMessage() {}
+
+func (m *CompositeFilter) GetOperator() CompositeFilter_Operator {
+ if m != nil && m.Operator != nil {
+ return *m.Operator
+ }
+ return CompositeFilter_AND
+}
+
+func (m *CompositeFilter) GetFilter() []*Filter {
+ if m != nil {
+ return m.Filter
+ }
+ return nil
+}
+
+// A filter on a specific property.
+type PropertyFilter struct {
+ // The property to filter by.
+ Property *PropertyReference `protobuf:"bytes,1,req,name=property" json:"property,omitempty"`
+ // The operator to filter by.
+ Operator *PropertyFilter_Operator `protobuf:"varint,2,req,name=operator,enum=pb.PropertyFilter_Operator" json:"operator,omitempty"`
+ // The value to compare the property to.
+ Value *Value `protobuf:"bytes,3,req,name=value" json:"value,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *PropertyFilter) Reset() { *m = PropertyFilter{} }
+func (m *PropertyFilter) String() string { return proto.CompactTextString(m) }
+func (*PropertyFilter) ProtoMessage() {}
+
+func (m *PropertyFilter) GetProperty() *PropertyReference {
+ if m != nil {
+ return m.Property
+ }
+ return nil
+}
+
+func (m *PropertyFilter) GetOperator() PropertyFilter_Operator {
+ if m != nil && m.Operator != nil {
+ return *m.Operator
+ }
+ return PropertyFilter_LESS_THAN
+}
+
+func (m *PropertyFilter) GetValue() *Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+// A GQL query.
+type GqlQuery struct {
+ QueryString *string `protobuf:"bytes,1,req,name=query_string" json:"query_string,omitempty"`
+ // When false, the query string must not contain a literal.
+ AllowLiteral *bool `protobuf:"varint,2,opt,name=allow_literal,def=0" json:"allow_literal,omitempty"`
+ // A named argument must set field GqlQueryArg.name.
+ // No two named arguments may have the same name.
+ // For each non-reserved named binding site in the query string,
+ // there must be a named argument with that name,
+ // but not necessarily the inverse.
+ NameArg []*GqlQueryArg `protobuf:"bytes,3,rep,name=name_arg" json:"name_arg,omitempty"`
+ // Numbered binding site @1 references the first numbered argument,
+ // effectively using 1-based indexing, rather than the usual 0.
+ // A numbered argument must NOT set field GqlQueryArg.name.
+ // For each binding site numbered i in query_string,
+ // there must be an ith numbered argument.
+ // The inverse must also be true.
+ NumberArg []*GqlQueryArg `protobuf:"bytes,4,rep,name=number_arg" json:"number_arg,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GqlQuery) Reset() { *m = GqlQuery{} }
+func (m *GqlQuery) String() string { return proto.CompactTextString(m) }
+func (*GqlQuery) ProtoMessage() {}
+
+const Default_GqlQuery_AllowLiteral bool = false
+
+func (m *GqlQuery) GetQueryString() string {
+ if m != nil && m.QueryString != nil {
+ return *m.QueryString
+ }
+ return ""
+}
+
+func (m *GqlQuery) GetAllowLiteral() bool {
+ if m != nil && m.AllowLiteral != nil {
+ return *m.AllowLiteral
+ }
+ return Default_GqlQuery_AllowLiteral
+}
+
+func (m *GqlQuery) GetNameArg() []*GqlQueryArg {
+ if m != nil {
+ return m.NameArg
+ }
+ return nil
+}
+
+func (m *GqlQuery) GetNumberArg() []*GqlQueryArg {
+ if m != nil {
+ return m.NumberArg
+ }
+ return nil
+}
+
+// A binding argument for a GQL query.
+// Exactly one of fields value and cursor must be set.
+type GqlQueryArg struct {
+ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
+ // Must not match regex "__.*__".
+ // Must not be "".
+ Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
+ Value *Value `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"`
+ Cursor []byte `protobuf:"bytes,3,opt,name=cursor" json:"cursor,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *GqlQueryArg) Reset() { *m = GqlQueryArg{} }
+func (m *GqlQueryArg) String() string { return proto.CompactTextString(m) }
+func (*GqlQueryArg) ProtoMessage() {}
+
+func (m *GqlQueryArg) GetName() string {
+ if m != nil && m.Name != nil {
+ return *m.Name
+ }
+ return ""
+}
+
+func (m *GqlQueryArg) GetValue() *Value {
+ if m != nil {
+ return m.Value
+ }
+ return nil
+}
+
+func (m *GqlQueryArg) GetCursor() []byte {
+ if m != nil {
+ return m.Cursor
+ }
+ return nil
+}
+
+// A batch of results produced by a query.
+type QueryResultBatch struct {
+ // The result type for every entity in entityResults.
+ EntityResultType *EntityResult_ResultType `protobuf:"varint,1,req,name=entity_result_type,enum=pb.EntityResult_ResultType" json:"entity_result_type,omitempty"`
+ // The results for this batch.
+ EntityResult []*EntityResult `protobuf:"bytes,2,rep,name=entity_result" json:"entity_result,omitempty"`
+ // A cursor that points to the position after the last result in the batch.
+ // May be absent.
+ EndCursor []byte `protobuf:"bytes,4,opt,name=end_cursor" json:"end_cursor,omitempty"`
+ // The state of the query after the current batch.
+ MoreResults *QueryResultBatch_MoreResultsType `protobuf:"varint,5,req,name=more_results,enum=pb.QueryResultBatch_MoreResultsType" json:"more_results,omitempty"`
+ // The number of results skipped because of Query.offset.
+ SkippedResults *int32 `protobuf:"varint,6,opt,name=skipped_results" json:"skipped_results,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *QueryResultBatch) Reset() { *m = QueryResultBatch{} }
+func (m *QueryResultBatch) String() string { return proto.CompactTextString(m) }
+func (*QueryResultBatch) ProtoMessage() {}
+
+func (m *QueryResultBatch) GetEntityResultType() EntityResult_ResultType {
+ if m != nil && m.EntityResultType != nil {
+ return *m.EntityResultType
+ }
+ return EntityResult_FULL
+}
+
+func (m *QueryResultBatch) GetEntityResult() []*EntityResult {
+ if m != nil {
+ return m.EntityResult
+ }
+ return nil
+}
+
+func (m *QueryResultBatch) GetEndCursor() []byte {
+ if m != nil {
+ return m.EndCursor
+ }
+ return nil
+}
+
+func (m *QueryResultBatch) GetMoreResults() QueryResultBatch_MoreResultsType {
+ if m != nil && m.MoreResults != nil {
+ return *m.MoreResults
+ }
+ return QueryResultBatch_NOT_FINISHED
+}
+
+func (m *QueryResultBatch) GetSkippedResults() int32 {
+ if m != nil && m.SkippedResults != nil {
+ return *m.SkippedResults
+ }
+ return 0
+}
+
+// A set of changes to apply.
+//
+// No entity in this message may have a reserved property name,
+// not even a property in an entity in a value.
+// No value in this message may have meaning 18,
+// not even a value in an entity in another value.
+//
+// If entities with duplicate keys are present, an arbitrary choice will
+// be made as to which is written.
+type Mutation struct {
+ // Entities to upsert.
+ // Each upserted entity's key must have a complete path and
+ // must not be reserved/read-only.
+ Upsert []*Entity `protobuf:"bytes,1,rep,name=upsert" json:"upsert,omitempty"`
+ // Entities to update.
+ // Each updated entity's key must have a complete path and
+ // must not be reserved/read-only.
+ Update []*Entity `protobuf:"bytes,2,rep,name=update" json:"update,omitempty"`
+ // Entities to insert.
+ // Each inserted entity's key must have a complete path and
+ // must not be reserved/read-only.
+ Insert []*Entity `protobuf:"bytes,3,rep,name=insert" json:"insert,omitempty"`
+ // Insert entities with a newly allocated ID.
+ // Each inserted entity's key must omit the final identifier in its path and
+ // must not be reserved/read-only.
+ InsertAutoId []*Entity `protobuf:"bytes,4,rep,name=insert_auto_id" json:"insert_auto_id,omitempty"`
+ // Keys of entities to delete.
+ // Each key must have a complete key path and must not be reserved/read-only.
+ Delete []*Key `protobuf:"bytes,5,rep,name=delete" json:"delete,omitempty"`
+ // Ignore a user specified read-only period. Optional.
+ Force *bool `protobuf:"varint,6,opt,name=force" json:"force,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *Mutation) Reset() { *m = Mutation{} }
+func (m *Mutation) String() string { return proto.CompactTextString(m) }
+func (*Mutation) ProtoMessage() {}
+
+func (m *Mutation) GetUpsert() []*Entity {
+ if m != nil {
+ return m.Upsert
+ }
+ return nil
+}
+
+func (m *Mutation) GetUpdate() []*Entity {
+ if m != nil {
+ return m.Update
+ }
+ return nil
+}
+
+func (m *Mutation) GetInsert() []*Entity {
+ if m != nil {
+ return m.Insert
+ }
+ return nil
+}
+
+func (m *Mutation) GetInsertAutoId() []*Entity {
+ if m != nil {
+ return m.InsertAutoId
+ }
+ return nil
+}
+
+func (m *Mutation) GetDelete() []*Key {
+ if m != nil {
+ return m.Delete
+ }
+ return nil
+}
+
+func (m *Mutation) GetForce() bool {
+ if m != nil && m.Force != nil {
+ return *m.Force
+ }
+ return false
+}
+
+// The result of applying a mutation.
+type MutationResult struct {
+ // Number of index writes.
+ IndexUpdates *int32 `protobuf:"varint,1,req,name=index_updates" json:"index_updates,omitempty"`
+ // Keys for insertAutoId entities. One per entity from the
+ // request, in the same order.
+ InsertAutoIdKey []*Key `protobuf:"bytes,2,rep,name=insert_auto_id_key" json:"insert_auto_id_key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *MutationResult) Reset() { *m = MutationResult{} }
+func (m *MutationResult) String() string { return proto.CompactTextString(m) }
+func (*MutationResult) ProtoMessage() {}
+
+func (m *MutationResult) GetIndexUpdates() int32 {
+ if m != nil && m.IndexUpdates != nil {
+ return *m.IndexUpdates
+ }
+ return 0
+}
+
+func (m *MutationResult) GetInsertAutoIdKey() []*Key {
+ if m != nil {
+ return m.InsertAutoIdKey
+ }
+ return nil
+}
+
+// Options shared by read requests.
+type ReadOptions struct {
+ // The read consistency to use.
+ // Cannot be set when transaction is set.
+ // Lookup and ancestor queries default to STRONG, global queries default to
+ // EVENTUAL and cannot be set to STRONG.
+ ReadConsistency *ReadOptions_ReadConsistency `protobuf:"varint,1,opt,name=read_consistency,enum=pb.ReadOptions_ReadConsistency,def=0" json:"read_consistency,omitempty"`
+ // The transaction to use. Optional.
+ Transaction []byte `protobuf:"bytes,2,opt,name=transaction" json:"transaction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *ReadOptions) Reset() { *m = ReadOptions{} }
+func (m *ReadOptions) String() string { return proto.CompactTextString(m) }
+func (*ReadOptions) ProtoMessage() {}
+
+const Default_ReadOptions_ReadConsistency ReadOptions_ReadConsistency = ReadOptions_DEFAULT
+
+func (m *ReadOptions) GetReadConsistency() ReadOptions_ReadConsistency {
+ if m != nil && m.ReadConsistency != nil {
+ return *m.ReadConsistency
+ }
+ return Default_ReadOptions_ReadConsistency
+}
+
+func (m *ReadOptions) GetTransaction() []byte {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+// The request for Lookup.
+type LookupRequest struct {
+ // Options for this lookup request. Optional.
+ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"`
+ // Keys of entities to look up from the datastore.
+ Key []*Key `protobuf:"bytes,3,rep,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LookupRequest) Reset() { *m = LookupRequest{} }
+func (m *LookupRequest) String() string { return proto.CompactTextString(m) }
+func (*LookupRequest) ProtoMessage() {}
+
+func (m *LookupRequest) GetReadOptions() *ReadOptions {
+ if m != nil {
+ return m.ReadOptions
+ }
+ return nil
+}
+
+func (m *LookupRequest) GetKey() []*Key {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+// The response for Lookup.
+type LookupResponse struct {
+ // Entities found as ResultType.FULL entities.
+ Found []*EntityResult `protobuf:"bytes,1,rep,name=found" json:"found,omitempty"`
+ // Entities not found as ResultType.KEY_ONLY entities.
+ Missing []*EntityResult `protobuf:"bytes,2,rep,name=missing" json:"missing,omitempty"`
+ // A list of keys that were not looked up due to resource constraints.
+ Deferred []*Key `protobuf:"bytes,3,rep,name=deferred" json:"deferred,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *LookupResponse) Reset() { *m = LookupResponse{} }
+func (m *LookupResponse) String() string { return proto.CompactTextString(m) }
+func (*LookupResponse) ProtoMessage() {}
+
+func (m *LookupResponse) GetFound() []*EntityResult {
+ if m != nil {
+ return m.Found
+ }
+ return nil
+}
+
+func (m *LookupResponse) GetMissing() []*EntityResult {
+ if m != nil {
+ return m.Missing
+ }
+ return nil
+}
+
+func (m *LookupResponse) GetDeferred() []*Key {
+ if m != nil {
+ return m.Deferred
+ }
+ return nil
+}
+
+// The request for RunQuery.
+type RunQueryRequest struct {
+ // The options for this query.
+ ReadOptions *ReadOptions `protobuf:"bytes,1,opt,name=read_options" json:"read_options,omitempty"`
+ // Entities are partitioned into subsets, identified by a dataset (usually
+ // implicitly specified by the project) and namespace ID. Queries are scoped
+ // to a single partition.
+ // This partition ID is normalized with the standard default context
+ // partition ID, but all other partition IDs in RunQueryRequest are
+ // normalized with this partition ID as the context partition ID.
+ PartitionId *PartitionId `protobuf:"bytes,2,opt,name=partition_id" json:"partition_id,omitempty"`
+ // The query to run.
+ // Either this field or field gql_query must be set, but not both.
+ Query *Query `protobuf:"bytes,3,opt,name=query" json:"query,omitempty"`
+ // The GQL query to run.
+ // Either this field or field query must be set, but not both.
+ GqlQuery *GqlQuery `protobuf:"bytes,7,opt,name=gql_query" json:"gql_query,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RunQueryRequest) Reset() { *m = RunQueryRequest{} }
+func (m *RunQueryRequest) String() string { return proto.CompactTextString(m) }
+func (*RunQueryRequest) ProtoMessage() {}
+
+func (m *RunQueryRequest) GetReadOptions() *ReadOptions {
+ if m != nil {
+ return m.ReadOptions
+ }
+ return nil
+}
+
+func (m *RunQueryRequest) GetPartitionId() *PartitionId {
+ if m != nil {
+ return m.PartitionId
+ }
+ return nil
+}
+
+func (m *RunQueryRequest) GetQuery() *Query {
+ if m != nil {
+ return m.Query
+ }
+ return nil
+}
+
+func (m *RunQueryRequest) GetGqlQuery() *GqlQuery {
+ if m != nil {
+ return m.GqlQuery
+ }
+ return nil
+}
+
+// The response for RunQuery.
+type RunQueryResponse struct {
+ // A batch of query results (always present).
+ Batch *QueryResultBatch `protobuf:"bytes,1,opt,name=batch" json:"batch,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RunQueryResponse) Reset() { *m = RunQueryResponse{} }
+func (m *RunQueryResponse) String() string { return proto.CompactTextString(m) }
+func (*RunQueryResponse) ProtoMessage() {}
+
+func (m *RunQueryResponse) GetBatch() *QueryResultBatch {
+ if m != nil {
+ return m.Batch
+ }
+ return nil
+}
+
+// The request for BeginTransaction.
+type BeginTransactionRequest struct {
+ // The transaction isolation level.
+ IsolationLevel *BeginTransactionRequest_IsolationLevel `protobuf:"varint,1,opt,name=isolation_level,enum=pb.BeginTransactionRequest_IsolationLevel,def=0" json:"isolation_level,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionRequest) Reset() { *m = BeginTransactionRequest{} }
+func (m *BeginTransactionRequest) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionRequest) ProtoMessage() {}
+
+const Default_BeginTransactionRequest_IsolationLevel BeginTransactionRequest_IsolationLevel = BeginTransactionRequest_SNAPSHOT
+
+func (m *BeginTransactionRequest) GetIsolationLevel() BeginTransactionRequest_IsolationLevel {
+ if m != nil && m.IsolationLevel != nil {
+ return *m.IsolationLevel
+ }
+ return Default_BeginTransactionRequest_IsolationLevel
+}
+
+// The response for BeginTransaction.
+type BeginTransactionResponse struct {
+ // The transaction identifier (always present).
+ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *BeginTransactionResponse) Reset() { *m = BeginTransactionResponse{} }
+func (m *BeginTransactionResponse) String() string { return proto.CompactTextString(m) }
+func (*BeginTransactionResponse) ProtoMessage() {}
+
+func (m *BeginTransactionResponse) GetTransaction() []byte {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+// The request for Rollback.
+type RollbackRequest struct {
+ // The transaction identifier, returned by a call to
+ // beginTransaction.
+ Transaction []byte `protobuf:"bytes,1,req,name=transaction" json:"transaction,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RollbackRequest) Reset() { *m = RollbackRequest{} }
+func (m *RollbackRequest) String() string { return proto.CompactTextString(m) }
+func (*RollbackRequest) ProtoMessage() {}
+
+func (m *RollbackRequest) GetTransaction() []byte {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+// The response for Rollback.
+type RollbackResponse struct {
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *RollbackResponse) Reset() { *m = RollbackResponse{} }
+func (m *RollbackResponse) String() string { return proto.CompactTextString(m) }
+func (*RollbackResponse) ProtoMessage() {}
+
+// The request for Commit.
+type CommitRequest struct {
+ // The transaction identifier, returned by a call to
+ // beginTransaction. Must be set when mode is TRANSACTIONAL.
+ Transaction []byte `protobuf:"bytes,1,opt,name=transaction" json:"transaction,omitempty"`
+ // The mutation to perform. Optional.
+ Mutation *Mutation `protobuf:"bytes,2,opt,name=mutation" json:"mutation,omitempty"`
+ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
+ Mode *CommitRequest_Mode `protobuf:"varint,5,opt,name=mode,enum=pb.CommitRequest_Mode,def=1" json:"mode,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitRequest) Reset() { *m = CommitRequest{} }
+func (m *CommitRequest) String() string { return proto.CompactTextString(m) }
+func (*CommitRequest) ProtoMessage() {}
+
+const Default_CommitRequest_Mode CommitRequest_Mode = CommitRequest_TRANSACTIONAL
+
+func (m *CommitRequest) GetTransaction() []byte {
+ if m != nil {
+ return m.Transaction
+ }
+ return nil
+}
+
+func (m *CommitRequest) GetMutation() *Mutation {
+ if m != nil {
+ return m.Mutation
+ }
+ return nil
+}
+
+func (m *CommitRequest) GetMode() CommitRequest_Mode {
+ if m != nil && m.Mode != nil {
+ return *m.Mode
+ }
+ return Default_CommitRequest_Mode
+}
+
+// The response for Commit.
+type CommitResponse struct {
+ // The result of performing the mutation (if any).
+ MutationResult *MutationResult `protobuf:"bytes,1,opt,name=mutation_result" json:"mutation_result,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *CommitResponse) Reset() { *m = CommitResponse{} }
+func (m *CommitResponse) String() string { return proto.CompactTextString(m) }
+func (*CommitResponse) ProtoMessage() {}
+
+func (m *CommitResponse) GetMutationResult() *MutationResult {
+ if m != nil {
+ return m.MutationResult
+ }
+ return nil
+}
+
+// The request for AllocateIds.
+type AllocateIdsRequest struct {
+ // A list of keys with incomplete key paths to allocate IDs for.
+ // No key may be reserved/read-only.
+ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsRequest) Reset() { *m = AllocateIdsRequest{} }
+func (m *AllocateIdsRequest) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsRequest) ProtoMessage() {}
+
+func (m *AllocateIdsRequest) GetKey() []*Key {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+// The response for AllocateIds.
+type AllocateIdsResponse struct {
+ // The keys specified in the request (in the same order), each with
+ // its key path completed with a newly allocated ID.
+ Key []*Key `protobuf:"bytes,1,rep,name=key" json:"key,omitempty"`
+ XXX_unrecognized []byte `json:"-"`
+}
+
+func (m *AllocateIdsResponse) Reset() { *m = AllocateIdsResponse{} }
+func (m *AllocateIdsResponse) String() string { return proto.CompactTextString(m) }
+func (*AllocateIdsResponse) ProtoMessage() {}
+
+func (m *AllocateIdsResponse) GetKey() []*Key {
+ if m != nil {
+ return m.Key
+ }
+ return nil
+}
+
+func init() {
+ proto.RegisterEnum("pb.EntityResult_ResultType", EntityResult_ResultType_name, EntityResult_ResultType_value)
+ proto.RegisterEnum("pb.PropertyExpression_AggregationFunction", PropertyExpression_AggregationFunction_name, PropertyExpression_AggregationFunction_value)
+ proto.RegisterEnum("pb.PropertyOrder_Direction", PropertyOrder_Direction_name, PropertyOrder_Direction_value)
+ proto.RegisterEnum("pb.CompositeFilter_Operator", CompositeFilter_Operator_name, CompositeFilter_Operator_value)
+ proto.RegisterEnum("pb.PropertyFilter_Operator", PropertyFilter_Operator_name, PropertyFilter_Operator_value)
+ proto.RegisterEnum("pb.QueryResultBatch_MoreResultsType", QueryResultBatch_MoreResultsType_name, QueryResultBatch_MoreResultsType_value)
+ proto.RegisterEnum("pb.ReadOptions_ReadConsistency", ReadOptions_ReadConsistency_name, ReadOptions_ReadConsistency_value)
+ proto.RegisterEnum("pb.BeginTransactionRequest_IsolationLevel", BeginTransactionRequest_IsolationLevel_name, BeginTransactionRequest_IsolationLevel_value)
+ proto.RegisterEnum("pb.CommitRequest_Mode", CommitRequest_Mode_name, CommitRequest_Mode_value)
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto
new file mode 100644
index 000000000000..bb4c199b116c
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/datastore/datastore_v1.proto
@@ -0,0 +1,594 @@
+// Copyright 2013 Google Inc. All Rights Reserved.
+//
+// The datastore v1 service proto definitions
+
+syntax = "proto2";
+
+package pb;
+option java_package = "com.google.api.services.datastore";
+
+
+// An identifier for a particular subset of entities.
+//
+// Entities are partitioned into various subsets, each used by different
+// datasets and different namespaces within a dataset and so forth.
+//
+// All input partition IDs are normalized before use.
+// A partition ID is normalized as follows:
+// If the partition ID is unset or is set to an empty partition ID, replace it
+// with the context partition ID.
+// Otherwise, if the partition ID has no dataset ID, assign it the context
+// partition ID's dataset ID.
+// Unless otherwise documented, the context partition ID has the dataset ID set
+// to the context dataset ID and no other partition dimension set.
+//
+// A partition ID is empty if all of its fields are unset.
+//
+// Partition dimension:
+// A dimension may be unset.
+// A dimension's value must never be "".
+// A dimension's value must match [A-Za-z\d\.\-_]{1,100}
+// If the value of any dimension matches regex "__.*__",
+// the partition is reserved/read-only.
+// A reserved/read-only partition ID is forbidden in certain documented contexts.
+//
+// Dataset ID:
+// A dataset id's value must never be "".
+// A dataset id's value must match
+// ([a-z\d\-]{1,100}~)?([a-z\d][a-z\d\-\.]{0,99}:)?([a-z\d][a-z\d\-]{0,99}
+message PartitionId {
+ // The dataset ID.
+ optional string dataset_id = 3;
+ // The namespace.
+ optional string namespace = 4;
+}
+
+// A unique identifier for an entity.
+// If a key's partition id or any of its path kinds or names are
+// reserved/read-only, the key is reserved/read-only.
+// A reserved/read-only key is forbidden in certain documented contexts.
+message Key {
+ // Entities are partitioned into subsets, currently identified by a dataset
+ // (usually implicitly specified by the project) and namespace ID.
+ // Queries are scoped to a single partition.
+ optional PartitionId partition_id = 1;
+
+ // A (kind, ID/name) pair used to construct a key path.
+ //
+ // At most one of name or ID may be set.
+ // If either is set, the element is complete.
+ // If neither is set, the element is incomplete.
+ message PathElement {
+ // The kind of the entity.
+ // A kind matching regex "__.*__" is reserved/read-only.
+ // A kind must not contain more than 500 characters.
+ // Cannot be "".
+ required string kind = 1;
+ // The ID of the entity.
+ // Never equal to zero. Values less than zero are discouraged and will not
+ // be supported in the future.
+ optional int64 id = 2;
+ // The name of the entity.
+ // A name matching regex "__.*__" is reserved/read-only.
+ // A name must not be more than 500 characters.
+ // Cannot be "".
+ optional string name = 3;
+ }
+
+ // The entity path.
+ // An entity path consists of one or more elements composed of a kind and a
+ // string or numerical identifier, which identify entities. The first
+ // element identifies a root entity, the second element identifies
+ // a child of the root entity, the third element a child of the
+ // second entity, and so forth. The entities identified by all prefixes of
+ // the path are called the element's ancestors.
+ // An entity path is always fully complete: ALL of the entity's ancestors
+ // are required to be in the path along with the entity identifier itself.
+ // The only exception is that in some documented cases, the identifier in the
+ // last path element (for the entity) itself may be omitted. A path can never
+ // be empty.
+ repeated PathElement path_element = 2;
+}
+
+// A message that can hold any of the supported value types and associated
+// metadata.
+//
+// At most one of the Value fields may be set.
+// If none are set the value is "null".
+//
+message Value {
+ // A boolean value.
+ optional bool boolean_value = 1;
+ // An integer value.
+ optional int64 integer_value = 2;
+ // A double value.
+ optional double double_value = 3;
+ // A timestamp value.
+ optional int64 timestamp_microseconds_value = 4;
+ // A key value.
+ optional Key key_value = 5;
+ // A blob key value.
+ optional string blob_key_value = 16;
+ // A UTF-8 encoded string value.
+ optional string string_value = 17;
+ // A blob value.
+ optional bytes blob_value = 18;
+ // An entity value.
+ // May have no key.
+ // May have a key with an incomplete key path.
+ // May have a reserved/read-only key.
+ optional Entity entity_value = 6;
+ // A list value.
+ // Cannot contain another list value.
+ // Cannot also have a meaning and indexing set.
+ repeated Value list_value = 7;
+
+ // The meaning field is reserved and should not be used.
+ optional int32 meaning = 14;
+
+ // If the value should be indexed.
+ //
+ // The indexed property may be set for a
+ // null value.
+ // When indexed is true, stringValue
+ // is limited to 500 characters and the blob value is limited to 500 bytes.
+ // Exception: If meaning is set to 2, string_value is limited to 2038
+ // characters regardless of indexed.
+ // When indexed is true, meaning 15 and 22 are not allowed, and meaning 16
+ // will be ignored on input (and will never be set on output).
+ // Input values by default have indexed set to
+ // true; however, you can explicitly set indexed to
+ // true if you want. (An output value never has
+ // indexed explicitly set to true.) If a value is
+ // itself an entity, it cannot have indexed set to
+ // true.
+ // Exception: An entity value with meaning 9, 20 or 21 may be indexed.
+ optional bool indexed = 15 [default = true];
+}
+
+// An entity property.
+message Property {
+ // The name of the property.
+ // A property name matching regex "__.*__" is reserved.
+ // A reserved property name is forbidden in certain documented contexts.
+ // The name must not contain more than 500 characters.
+ // Cannot be "".
+ required string name = 1;
+
+ // The value(s) of the property.
+ // Each value can have only one value property populated. For example,
+ // you cannot have a values list of { value: { integerValue: 22,
+ // stringValue: "a" } }, but you can have { value: { listValue:
+ // [ { integerValue: 22 }, { stringValue: "a" } ] }.
+ required Value value = 4;
+}
+
+// An entity.
+//
+// An entity is limited to 1 megabyte when stored. That roughly
+// corresponds to a limit of 1 megabyte for the serialized form of this
+// message.
+message Entity {
+ // The entity's key.
+ //
+ // An entity must have a key, unless otherwise documented (for example,
+ // an entity in Value.entityValue may have no key).
+ // An entity's kind is its key's path's last element's kind,
+ // or null if it has no key.
+ optional Key key = 1;
+ // The entity's properties.
+ // Each property's name must be unique for its entity.
+ repeated Property property = 2;
+}
+
+// The result of fetching an entity from the datastore.
+message EntityResult {
+ // Specifies what data the 'entity' field contains.
+ // A ResultType is either implied (for example, in LookupResponse.found it
+ // is always FULL) or specified by context (for example, in message
+ // QueryResultBatch, field 'entity_result_type' specifies a ResultType
+ // for all the values in field 'entity_result').
+ enum ResultType {
+ FULL = 1; // The entire entity.
+ PROJECTION = 2; // A projected subset of properties.
+ // The entity may have no key.
+ // A property value may have meaning 18.
+ KEY_ONLY = 3; // Only the key.
+ }
+
+ // The resulting entity.
+ required Entity entity = 1;
+}
+
+// A query.
+message Query {
+ // The projection to return. If not set the entire entity is returned.
+ repeated PropertyExpression projection = 2;
+
+ // The kinds to query (if empty, returns entities from all kinds).
+ repeated KindExpression kind = 3;
+
+ // The filter to apply (optional).
+ optional Filter filter = 4;
+
+ // The order to apply to the query results (if empty, order is unspecified).
+ repeated PropertyOrder order = 5;
+
+ // The properties to group by (if empty, no grouping is applied to the
+ // result set).
+ repeated PropertyReference group_by = 6;
+
+ // A starting point for the query results. Optional. Query cursors are
+ // returned in query result batches.
+ optional bytes /* serialized QueryCursor */ start_cursor = 7;
+
+ // An ending point for the query results. Optional. Query cursors are
+ // returned in query result batches.
+ optional bytes /* serialized QueryCursor */ end_cursor = 8;
+
+ // The number of results to skip. Applies before limit, but after all other
+ // constraints (optional, defaults to 0).
+ optional int32 offset = 10 [default=0];
+
+ // The maximum number of results to return. Applies after all other
+ // constraints. Optional.
+ optional int32 limit = 11;
+}
+
+// A representation of a kind.
+message KindExpression {
+ // The name of the kind.
+ required string name = 1;
+}
+
+// A reference to a property relative to the kind expressions.
+// exactly.
+message PropertyReference {
+ // The name of the property.
+ required string name = 2;
+}
+
+// A representation of a property in a projection.
+message PropertyExpression {
+ enum AggregationFunction {
+ FIRST = 1;
+ }
+ // The property to project.
+ required PropertyReference property = 1;
+ // The aggregation function to apply to the property. Optional.
+ // Can only be used when grouping by at least one property. Must
+ // then be set on all properties in the projection that are not
+ // being grouped by.
+ optional AggregationFunction aggregation_function = 2;
+}
+
+// The desired order for a specific property.
+message PropertyOrder {
+ enum Direction {
+ ASCENDING = 1;
+ DESCENDING = 2;
+ }
+ // The property to order by.
+ required PropertyReference property = 1;
+ // The direction to order by.
+ optional Direction direction = 2 [default=ASCENDING];
+}
+
+// A holder for any type of filter. Exactly one field should be specified.
+message Filter {
+ // A composite filter.
+ optional CompositeFilter composite_filter = 1;
+ // A filter on a property.
+ optional PropertyFilter property_filter = 2;
+}
+
+// A filter that merges the multiple other filters using the given operation.
+message CompositeFilter {
+ enum Operator {
+ AND = 1;
+ }
+
+ // The operator for combining multiple filters.
+ required Operator operator = 1;
+ // The list of filters to combine.
+ // Must contain at least one filter.
+ repeated Filter filter = 2;
+}
+
+// A filter on a specific property.
+message PropertyFilter {
+ enum Operator {
+ LESS_THAN = 1;
+ LESS_THAN_OR_EQUAL = 2;
+ GREATER_THAN = 3;
+ GREATER_THAN_OR_EQUAL = 4;
+ EQUAL = 5;
+
+ HAS_ANCESTOR = 11;
+ }
+
+ // The property to filter by.
+ required PropertyReference property = 1;
+ // The operator to filter by.
+ required Operator operator = 2;
+ // The value to compare the property to.
+ required Value value = 3;
+}
+
+// A GQL query.
+message GqlQuery {
+ required string query_string = 1;
+ // When false, the query string must not contain a literal.
+ optional bool allow_literal = 2 [default = false];
+ // A named argument must set field GqlQueryArg.name.
+ // No two named arguments may have the same name.
+ // For each non-reserved named binding site in the query string,
+ // there must be a named argument with that name,
+ // but not necessarily the inverse.
+ repeated GqlQueryArg name_arg = 3;
+ // Numbered binding site @1 references the first numbered argument,
+ // effectively using 1-based indexing, rather than the usual 0.
+ // A numbered argument must NOT set field GqlQueryArg.name.
+ // For each binding site numbered i in query_string,
+ // there must be an ith numbered argument.
+ // The inverse must also be true.
+ repeated GqlQueryArg number_arg = 4;
+}
+
+// A binding argument for a GQL query.
+// Exactly one of fields value and cursor must be set.
+message GqlQueryArg {
+ // Must match regex "[A-Za-z_$][A-Za-z_$0-9]*".
+ // Must not match regex "__.*__".
+ // Must not be "".
+ optional string name = 1;
+ optional Value value = 2;
+ optional bytes cursor = 3;
+}
+
+// A batch of results produced by a query.
+message QueryResultBatch {
+ // The possible values for the 'more_results' field.
+ enum MoreResultsType {
+ NOT_FINISHED = 1; // There are additional batches to fetch from this query.
+ MORE_RESULTS_AFTER_LIMIT = 2; // The query is finished, but there are more
+ // results after the limit.
+ NO_MORE_RESULTS = 3; // The query has been exhausted.
+ }
+
+ // The result type for every entity in entityResults.
+ required EntityResult.ResultType entity_result_type = 1;
+ // The results for this batch.
+ repeated EntityResult entity_result = 2;
+
+ // A cursor that points to the position after the last result in the batch.
+ // May be absent.
+ optional bytes /* serialized QueryCursor */ end_cursor = 4;
+
+ // The state of the query after the current batch.
+ required MoreResultsType more_results = 5;
+
+ // The number of results skipped because of Query.offset.
+ optional int32 skipped_results = 6;
+}
+
+// A set of changes to apply.
+//
+// No entity in this message may have a reserved property name,
+// not even a property in an entity in a value.
+// No value in this message may have meaning 18,
+// not even a value in an entity in another value.
+//
+// If entities with duplicate keys are present, an arbitrary choice will
+// be made as to which is written.
+message Mutation {
+ // Entities to upsert.
+ // Each upserted entity's key must have a complete path and
+ // must not be reserved/read-only.
+ repeated Entity upsert = 1;
+ // Entities to update.
+ // Each updated entity's key must have a complete path and
+ // must not be reserved/read-only.
+ repeated Entity update = 2;
+ // Entities to insert.
+ // Each inserted entity's key must have a complete path and
+ // must not be reserved/read-only.
+ repeated Entity insert = 3;
+ // Insert entities with a newly allocated ID.
+ // Each inserted entity's key must omit the final identifier in its path and
+ // must not be reserved/read-only.
+ repeated Entity insert_auto_id = 4;
+ // Keys of entities to delete.
+ // Each key must have a complete key path and must not be reserved/read-only.
+ repeated Key delete = 5;
+ // Ignore a user specified read-only period. Optional.
+ optional bool force = 6;
+}
+
+// The result of applying a mutation.
+message MutationResult {
+ // Number of index writes.
+ required int32 index_updates = 1;
+ // Keys for insertAutoId entities. One per entity from the
+ // request, in the same order.
+ repeated Key insert_auto_id_key = 2;
+}
+
+// Options shared by read requests.
+message ReadOptions {
+ enum ReadConsistency {
+ DEFAULT = 0;
+ STRONG = 1;
+ EVENTUAL = 2;
+ }
+
+ // The read consistency to use.
+ // Cannot be set when transaction is set.
+ // Lookup and ancestor queries default to STRONG, global queries default to
+ // EVENTUAL and cannot be set to STRONG.
+ optional ReadConsistency read_consistency = 1 [default=DEFAULT];
+
+ // The transaction to use. Optional.
+ optional bytes /* serialized Transaction */ transaction = 2;
+}
+
+// The request for Lookup.
+message LookupRequest {
+
+ // Options for this lookup request. Optional.
+ optional ReadOptions read_options = 1;
+ // Keys of entities to look up from the datastore.
+ repeated Key key = 3;
+}
+
+// The response for Lookup.
+message LookupResponse {
+
+ // The order of results in these fields is undefined and has no relation to
+ // the order of the keys in the input.
+
+ // Entities found as ResultType.FULL entities.
+ repeated EntityResult found = 1;
+
+ // Entities not found as ResultType.KEY_ONLY entities.
+ repeated EntityResult missing = 2;
+
+ // A list of keys that were not looked up due to resource constraints.
+ repeated Key deferred = 3;
+}
+
+
+// The request for RunQuery.
+message RunQueryRequest {
+
+ // The options for this query.
+ optional ReadOptions read_options = 1;
+
+ // Entities are partitioned into subsets, identified by a dataset (usually
+ // implicitly specified by the project) and namespace ID. Queries are scoped
+ // to a single partition.
+ // This partition ID is normalized with the standard default context
+ // partition ID, but all other partition IDs in RunQueryRequest are
+ // normalized with this partition ID as the context partition ID.
+ optional PartitionId partition_id = 2;
+
+ // The query to run.
+ // Either this field or field gql_query must be set, but not both.
+ optional Query query = 3;
+ // The GQL query to run.
+ // Either this field or field query must be set, but not both.
+ optional GqlQuery gql_query = 7;
+}
+
+// The response for RunQuery.
+message RunQueryResponse {
+
+ // A batch of query results (always present).
+ optional QueryResultBatch batch = 1;
+
+}
+
+// The request for BeginTransaction.
+message BeginTransactionRequest {
+
+ enum IsolationLevel {
+ SNAPSHOT = 0; // Read from a consistent snapshot. Concurrent transactions
+ // conflict if their mutations conflict. For example:
+ // Read(A),Write(B) may not conflict with Read(B),Write(A),
+ // but Read(B),Write(B) does conflict with Read(B),Write(B).
+ SERIALIZABLE = 1; // Read from a consistent snapshot. Concurrent
+ // transactions conflict if they cannot be serialized.
+ // For example Read(A),Write(B) does conflict with
+ // Read(B),Write(A) but Read(A) may not conflict with
+ // Write(A).
+ }
+
+ // The transaction isolation level.
+ optional IsolationLevel isolation_level = 1 [default=SNAPSHOT];
+}
+
+// The response for BeginTransaction.
+message BeginTransactionResponse {
+
+ // The transaction identifier (always present).
+ optional bytes /* serialized Transaction */ transaction = 1;
+}
+
+// The request for Rollback.
+message RollbackRequest {
+
+ // The transaction identifier, returned by a call to
+ // beginTransaction.
+ required bytes /* serialized Transaction */ transaction = 1;
+}
+
+// The response for Rollback.
+message RollbackResponse {
+// Empty
+}
+
+// The request for Commit.
+message CommitRequest {
+
+ enum Mode {
+ TRANSACTIONAL = 1;
+ NON_TRANSACTIONAL = 2;
+ }
+
+ // The transaction identifier, returned by a call to
+ // beginTransaction. Must be set when mode is TRANSACTIONAL.
+ optional bytes /* serialized Transaction */ transaction = 1;
+ // The mutation to perform. Optional.
+ optional Mutation mutation = 2;
+ // The type of commit to perform. Either TRANSACTIONAL or NON_TRANSACTIONAL.
+ optional Mode mode = 5 [default=TRANSACTIONAL];
+}
+
+// The response for Commit.
+message CommitResponse {
+
+ // The result of performing the mutation (if any).
+ optional MutationResult mutation_result = 1;
+}
+
+// The request for AllocateIds.
+message AllocateIdsRequest {
+
+ // A list of keys with incomplete key paths to allocate IDs for.
+ // No key may be reserved/read-only.
+ repeated Key key = 1;
+}
+
+// The response for AllocateIds.
+message AllocateIdsResponse {
+
+ // The keys specified in the request (in the same order), each with
+ // its key path completed with a newly allocated ID.
+ repeated Key key = 1;
+}
+
+// Each rpc normalizes the partition IDs of the keys in its input entities,
+// and always returns entities with keys with normalized partition IDs.
+// (Note that applies to all entities, including entities in values.)
+service DatastoreService {
+ // Look up some entities by key.
+ rpc Lookup(LookupRequest) returns (LookupResponse) {
+ };
+ // Query for entities.
+ rpc RunQuery(RunQueryRequest) returns (RunQueryResponse) {
+ };
+ // Begin a new transaction.
+ rpc BeginTransaction(BeginTransactionRequest) returns (BeginTransactionResponse) {
+ };
+ // Commit a transaction, optionally creating, deleting or modifying some
+ // entities.
+ rpc Commit(CommitRequest) returns (CommitResponse) {
+ };
+ // Roll back a transaction.
+ rpc Rollback(RollbackRequest) returns (RollbackResponse) {
+ };
+ // Allocate IDs for incomplete keys (useful for referencing an entity before
+ // it is inserted).
+ rpc AllocateIds(AllocateIdsRequest) returns (AllocateIdsResponse) {
+ };
+}
diff --git a/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go
new file mode 100644
index 000000000000..aafd68387e63
--- /dev/null
+++ b/Godeps/_workspace/src/google.golang.org/cloud/internal/testutil/context.go
@@ -0,0 +1,57 @@
+// Copyright 2014 Google Inc. All Rights Reserved.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+// Package testutil contains helper functions for writing tests.
+package testutil
+
+import (
+ "io/ioutil"
+ "log"
+ "net/http"
+ "os"
+
+ "golang.org/x/net/context"
+ "golang.org/x/oauth2"
+ "golang.org/x/oauth2/google"
+ "google.golang.org/cloud"
+)
+
+const (
+ envProjID = "GCLOUD_TESTS_GOLANG_PROJECT_ID"
+ envPrivateKey = "GCLOUD_TESTS_GOLANG_KEY"
+)
+
+func Context(scopes ...string) context.Context {
+ key, projID := os.Getenv(envPrivateKey), os.Getenv(envProjID)
+ if key == "" || projID == "" {
+ log.Fatal("GCLOUD_TESTS_GOLANG_KEY and GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
+ }
+ jsonKey, err := ioutil.ReadFile(key)
+ if err != nil {
+ log.Fatalf("Cannot read the JSON key file, err: %v", err)
+ }
+ conf, err := google.JWTConfigFromJSON(jsonKey, scopes...)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return cloud.NewContext(projID, conf.Client(oauth2.NoContext))
+}
+
+func NoAuthContext() context.Context {
+ projID := os.Getenv(envProjID)
+ if projID == "" {
+ log.Fatal("GCLOUD_TESTS_GOLANG_PROJECT_ID must be set. See CONTRIBUTING.md for details.")
+ }
+ return cloud.NewContext(projID, &http.Client{Transport: http.DefaultTransport})
+}
From 126fdc9dcb2d4ad658c72c925b87dff270eaec53 Mon Sep 17 00:00:00 2001
From: Clayton Coleman
Date: Fri, 30 Jan 2015 17:59:16 -0500
Subject: [PATCH 2/5] UPSTREAM: Allow namespace short to be set
---
.../kubernetes/pkg/client/clientcmd/overrides.go | 11 ++++++-----
1 file changed, 6 insertions(+), 5 deletions(-)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
index f6309c98c5ec..b22e31664b6d 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd/overrides.go
@@ -51,10 +51,11 @@ type AuthOverrideFlags struct {
// ContextOverrideFlags holds the flag names to be used for binding command line flags for Cluster objects
type ContextOverrideFlags struct {
- ClusterName string
- AuthInfoName string
- Namespace string
- NamespacePath string
+ ClusterName string
+ AuthInfoName string
+ Namespace string
+ NamespaceShort string
+ NamespacePath string
}
// ClusterOverride holds the flag names to be used for binding command line flags for Cluster objects
@@ -150,6 +151,6 @@ func BindOverrideFlags(overrides *ConfigOverrides, flags *pflag.FlagSet, flagNam
func BindContextFlags(contextInfo *clientcmdapi.Context, flags *pflag.FlagSet, flagNames ContextOverrideFlags) {
flags.StringVar(&contextInfo.Cluster, flagNames.ClusterName, "", "The name of the kubeconfig cluster to use")
flags.StringVar(&contextInfo.AuthInfo, flagNames.AuthInfoName, "", "The name of the kubeconfig user to use")
- flags.StringVar(&contextInfo.Namespace, flagNames.Namespace, "", "If present, the namespace scope for this CLI request.")
+ flags.StringVarP(&contextInfo.Namespace, flagNames.Namespace, flagNames.NamespaceShort, "", "If present, the namespace scope for this CLI request.")
flags.StringVar(&contextInfo.NamespacePath, flagNames.NamespacePath, "", "Path to the namespace info file that holds the namespace context to use for CLI requests.")
}
From 9dde706f2c373ca737187dcd8a4e4df29bbfa20f Mon Sep 17 00:00:00 2001
From: fabianofranz
Date: Tue, 13 Jan 2015 17:48:04 -0200
Subject: [PATCH 3/5] UPSTREAM: api registration right on mux makes it
invisible to container
Conflicts:
Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
---
.../GoogleCloudPlatform/kubernetes/pkg/master/master.go | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
index be5ad949b412..cadb2183ef01 100644
--- a/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
+++ b/Godeps/_workspace/src/github.com/GoogleCloudPlatform/kubernetes/pkg/master/master.go
@@ -382,7 +382,7 @@ func (m *Master) init(c *Config) {
// Register root handler.
// We do not register this using restful Webservice since we do not want to surface this in api docs.
- m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper))
+ //m.mux.HandleFunc("/", apiserver.IndexHandler(m.handlerContainer, m.muxHelper))
// TODO: use go-restful
apiserver.InstallValidator(m.muxHelper, func() map[string]apiserver.Server { return m.getServersToValidate(c) })
From 30102671f39cdd6d2d6f65ea8fa7c2d360e4bea8 Mon Sep 17 00:00:00 2001
From: Clayton Coleman
Date: Fri, 30 Jan 2015 19:00:33 -0500
Subject: [PATCH 4/5] Rebase fixes
Includes changes from Jhon Honce, Mark Turansky, and David Eads
---
.gitignore | 3 +-
.../oauthpassword/registry/registry.go | 2 +-
.../identitymapper/identitymapper.go | 2 +-
pkg/build/api/register.go | 2 +
pkg/build/api/types.go | 6 +++
pkg/build/api/v1beta1/register.go | 3 +-
pkg/build/api/v1beta1/types.go | 8 +++-
pkg/build/controller/strategy/util.go | 8 ++--
pkg/build/controller/strategy/util_test.go | 21 +++++++--
pkg/build/registry/buildlog/rest.go | 25 +----------
pkg/cmd/cli/cli.go | 4 +-
pkg/cmd/cli/cmd/buildlogs.go | 3 +-
pkg/cmd/cli/cmd/cancelbuild.go | 3 +-
pkg/cmd/cli/cmd/helpers.go | 20 ---------
pkg/cmd/cli/cmd/process.go | 7 ++-
pkg/cmd/cli/cmd/rollback.go | 3 +-
pkg/cmd/cli/cmd/startbuild.go | 3 +-
pkg/cmd/server/kubernetes/node.go | 7 ++-
pkg/cmd/server/origin/master.go | 6 ++-
pkg/kubelet/app/plugins.go | 43 +++++++++++++++++++
pkg/user/registry/etcd/etcd.go | 5 ++-
test/integration/buildclient_test.go | 4 +-
test/integration/deploy_trigger_test.go | 4 +-
test/integration/imageclient_test.go | 4 +-
test/integration/userclient_test.go | 2 +-
test/templates/templates_test.go | 2 +-
26 files changed, 127 insertions(+), 73 deletions(-)
create mode 100644 pkg/kubelet/app/plugins.go
diff --git a/.gitignore b/.gitignore
index 2f0b1e161fb5..ae438674d3af 100644
--- a/.gitignore
+++ b/.gitignore
@@ -8,4 +8,5 @@
.vimrc
.vagrant-openshift.json*
.DS_Store
-*.pyc
+.idea
+origin.iml
diff --git a/pkg/auth/authenticator/password/oauthpassword/registry/registry.go b/pkg/auth/authenticator/password/oauthpassword/registry/registry.go
index 09e11a05df87..477d6f9505ca 100644
--- a/pkg/auth/authenticator/password/oauthpassword/registry/registry.go
+++ b/pkg/auth/authenticator/password/oauthpassword/registry/registry.go
@@ -45,7 +45,7 @@ func (a *Authenticator) AuthenticatePassword(username, password string) (api.Use
info := &api.DefaultUserInfo{
Name: user.Name,
- UID: user.UID,
+ UID: string(user.UID),
}
return info, true, nil
diff --git a/pkg/auth/userregistry/identitymapper/identitymapper.go b/pkg/auth/userregistry/identitymapper/identitymapper.go
index b3ad14540c1c..c3ed0f2001ed 100644
--- a/pkg/auth/userregistry/identitymapper/identitymapper.go
+++ b/pkg/auth/userregistry/identitymapper/identitymapper.go
@@ -32,7 +32,7 @@ func (p *alwaysCreateUserIdentityToUserMapper) UserFor(identityInfo authapi.User
ret := &authapi.DefaultUserInfo{
Name: authoritativeMapping.User.Name,
- UID: authoritativeMapping.User.UID,
+ UID: string(authoritativeMapping.User.UID),
Extra: authoritativeMapping.Identity.Extra,
}
return ret, err
diff --git a/pkg/build/api/register.go b/pkg/build/api/register.go
index 4d7cf3c143a3..c2cc406317ea 100644
--- a/pkg/build/api/register.go
+++ b/pkg/build/api/register.go
@@ -10,6 +10,7 @@ func init() {
&BuildList{},
&BuildConfig{},
&BuildConfigList{},
+ &BuildLog{},
)
}
@@ -17,3 +18,4 @@ func (*Build) IsAnAPIObject() {}
func (*BuildList) IsAnAPIObject() {}
func (*BuildConfig) IsAnAPIObject() {}
func (*BuildConfigList) IsAnAPIObject() {}
+func (*BuildLog) IsAnAPIObject() {}
diff --git a/pkg/build/api/types.go b/pkg/build/api/types.go
index 4581871962d9..b72ee0675f0d 100644
--- a/pkg/build/api/types.go
+++ b/pkg/build/api/types.go
@@ -332,3 +332,9 @@ type GitInfo struct {
GitBuildSource `json:",inline"`
GitSourceRevision `json:",inline"`
}
+
+// BuildLog is the (unused) resource associated with the build log redirector
+type BuildLog struct {
+ kapi.TypeMeta `json:",inline"`
+ kapi.ListMeta `json:"metadata,omitempty"`
+}
diff --git a/pkg/build/api/v1beta1/register.go b/pkg/build/api/v1beta1/register.go
index 80ac64294bcc..a25e3514c3d3 100644
--- a/pkg/build/api/v1beta1/register.go
+++ b/pkg/build/api/v1beta1/register.go
@@ -10,11 +10,12 @@ func init() {
&BuildList{},
&BuildConfig{},
&BuildConfigList{},
+ &BuildLog{},
)
- api.Scheme.AddKnownTypeWithName("v1beta1", "BuildLog", &Build{})
}
func (*Build) IsAnAPIObject() {}
func (*BuildList) IsAnAPIObject() {}
func (*BuildConfig) IsAnAPIObject() {}
func (*BuildConfigList) IsAnAPIObject() {}
+func (*BuildLog) IsAnAPIObject() {}
diff --git a/pkg/build/api/v1beta1/types.go b/pkg/build/api/v1beta1/types.go
index 37a182eaa768..a5ab55bc0d96 100644
--- a/pkg/build/api/v1beta1/types.go
+++ b/pkg/build/api/v1beta1/types.go
@@ -57,8 +57,8 @@ const (
// BuildStatusRunning indicates that a pod has been created and a build is running.
BuildStatusRunning BuildStatus = "Running"
- BuildStatusComplete BuildStatus = "Complete"
// BuildStatusComplete indicates that a build has been successful.
+ BuildStatusComplete BuildStatus = "Complete"
// BuildStatusFailed indicates that a build has executed and failed.
BuildStatusFailed BuildStatus = "Failed"
@@ -340,3 +340,9 @@ type GitInfo struct {
GitBuildSource `json:",inline"`
GitSourceRevision `json:",inline"`
}
+
+// BuildLog is the (unused) resource associated with the build log redirector
+type BuildLog struct {
+ kapi.TypeMeta `json:",inline"`
+ kapi.ListMeta `json:"metadata,omitempty"`
+}
diff --git a/pkg/build/controller/strategy/util.go b/pkg/build/controller/strategy/util.go
index d98451392a19..1f4550210862 100644
--- a/pkg/build/controller/strategy/util.go
+++ b/pkg/build/controller/strategy/util.go
@@ -17,8 +17,8 @@ const dockerSocketPath = "/var/run/docker.sock"
func setupDockerSocket(podSpec *kapi.Pod) {
dockerSocketVolume := kapi.Volume{
Name: "docker-socket",
- Source: &kapi.VolumeSource{
- HostDir: &kapi.HostDir{
+ Source: kapi.VolumeSource{
+ HostPath: &kapi.HostPath{
Path: dockerSocketPath,
},
},
@@ -44,8 +44,8 @@ func setupDockerConfig(podSpec *kapi.Pod) {
}
dockerConfigVolume := kapi.Volume{
Name: "docker-cfg",
- Source: &kapi.VolumeSource{
- HostDir: &kapi.HostDir{
+ Source: kapi.VolumeSource{
+ HostPath: &kapi.HostPath{
Path: dockerConfig,
},
},
diff --git a/pkg/build/controller/strategy/util_test.go b/pkg/build/controller/strategy/util_test.go
index 203b53c5df16..a44d7c7071b4 100644
--- a/pkg/build/controller/strategy/util_test.go
+++ b/pkg/build/controller/strategy/util_test.go
@@ -24,16 +24,19 @@ func TestSetupDockerSocketHostSocket(t *testing.T) {
if e, a := "docker-socket", volume.Name; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
- if volume.Source == nil {
+ if volume.Name == "" {
+ t.Fatalf("Unexpected empty volume source name")
+ }
+ if isVolumeSourceEmpty(volume.Source) {
t.Fatalf("Unexpected nil volume source")
}
- if volume.Source.HostDir == nil {
+ if volume.Source.HostPath == nil {
t.Fatalf("Unexpected nil host directory")
}
if volume.Source.EmptyDir != nil {
t.Errorf("Unexpected non-nil empty directory: %#v", volume.Source.EmptyDir)
}
- if e, a := "/var/run/docker.sock", volume.Source.HostDir.Path; e != a {
+ if e, a := "/var/run/docker.sock", volume.Source.HostPath.Path; e != a {
t.Errorf("Expected %s, got %s", e, a)
}
@@ -52,6 +55,18 @@ func TestSetupDockerSocketHostSocket(t *testing.T) {
}
}
+func isVolumeSourceEmpty(volumeSource kapi.VolumeSource) bool {
+
+ if volumeSource.EmptyDir == nil &&
+ volumeSource.HostPath == nil &&
+ volumeSource.GCEPersistentDisk == nil &&
+ volumeSource.GitRepo == nil {
+ return true
+ }
+
+ return false
+}
+
func TestSetupBuildEnvFails(t *testing.T) {
build := mockCustomBuild()
containerEnv := []kapi.EnvVar{
diff --git a/pkg/build/registry/buildlog/rest.go b/pkg/build/registry/buildlog/rest.go
index b4e7b37c1535..2f27a9b1857d 100644
--- a/pkg/build/registry/buildlog/rest.go
+++ b/pkg/build/registry/buildlog/rest.go
@@ -7,7 +7,6 @@ import (
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/labels"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api/errors"
@@ -35,7 +34,7 @@ func (r RealPodControl) getPod(namespace, name string) (*kapi.Pod, error) {
}
// NewREST creates a new REST for BuildLog
-// Takes build registry and pod client to get neccessary attibutes to assamble
+// Takes build registry and pod client to get necessary attributes to assemble
// URL to which the request shall be redirected in order to get build logs.
func NewREST(b build.Registry, pn kclient.PodsNamespacer) apiserver.RESTStorage {
return &REST{
@@ -99,25 +98,5 @@ func (r *REST) Get(ctx kapi.Context, id string) (runtime.Object, error) {
}
func (r *REST) New() runtime.Object {
- return nil
-}
-
-func (*REST) NewList() runtime.Object {
- return nil
-}
-
-func (r *REST) List(ctx kapi.Context, selector, fields labels.Selector) (runtime.Object, error) {
- return nil, fmt.Errorf("BuildLog can't be listed")
-}
-
-func (r *REST) Delete(ctx kapi.Context, id string) (<-chan apiserver.RESTResult, error) {
- return nil, fmt.Errorf("BuildLog can't be deleted")
-}
-
-func (r *REST) Create(ctx kapi.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) {
- return nil, fmt.Errorf("BuildLog can't be created")
-}
-
-func (r *REST) Update(ctx kapi.Context, obj runtime.Object) (<-chan apiserver.RESTResult, error) {
- return nil, fmt.Errorf("BuildLog can't be updated")
+ return &api.BuildLog{}
}
diff --git a/pkg/cmd/cli/cli.go b/pkg/cmd/cli/cli.go
index 391f54f18f04..e1a806ef1035 100644
--- a/pkg/cmd/cli/cli.go
+++ b/pkg/cmd/cli/cli.go
@@ -98,7 +98,9 @@ func defaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
flags.StringVar(&loadingRules.CommandLinePath, "kubeconfig", "", "Path to the kubeconfig file to use for CLI requests.")
overrides := &clientcmd.ConfigOverrides{}
- clientcmd.BindOverrideFlags(overrides, flags, clientcmd.RecommendedConfigOverrideFlags(""))
+ overrideFlags := clientcmd.RecommendedConfigOverrideFlags("")
+ overrideFlags.ContextOverrideFlags.NamespaceShort = "n"
+ clientcmd.BindOverrideFlags(overrides, flags, overrideFlags)
clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
return clientConfig
diff --git a/pkg/cmd/cli/cmd/buildlogs.go b/pkg/cmd/cli/cmd/buildlogs.go
index 57797a993b29..7eafebb19592 100644
--- a/pkg/cmd/cli/cmd/buildlogs.go
+++ b/pkg/cmd/cli/cmd/buildlogs.go
@@ -23,7 +23,8 @@ $ kubectl build-logs 566bed879d2d
usageError(cmd, " is a required argument")
}
- namespace := getOriginNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
mapper, _ := f.Object(cmd)
mapping, err := mapper.RESTMapping("BuildLog", kubecmd.GetFlagString(cmd, "api-version"))
diff --git a/pkg/cmd/cli/cmd/cancelbuild.go b/pkg/cmd/cli/cmd/cancelbuild.go
index f9d157f2472e..b67a695dbd18 100644
--- a/pkg/cmd/cli/cmd/cancelbuild.go
+++ b/pkg/cmd/cli/cmd/cancelbuild.go
@@ -40,7 +40,8 @@ Examples:
}
buildName := args[0]
- namespace := getOriginNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
client, _, err := f.Clients(cmd)
checkErr(err)
diff --git a/pkg/cmd/cli/cmd/helpers.go b/pkg/cmd/cli/cmd/helpers.go
index 6458749850b4..28a1b8a122e4 100644
--- a/pkg/cmd/cli/cmd/helpers.go
+++ b/pkg/cmd/cli/cmd/helpers.go
@@ -3,9 +3,6 @@ package cmd
import (
"os"
- kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
- "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl"
- kubecmd "github.com/GoogleCloudPlatform/kubernetes/pkg/kubectl/cmd"
"github.com/golang/glog"
"github.com/spf13/cobra"
)
@@ -21,20 +18,3 @@ func checkErr(err error) {
glog.FatalDepth(1, err)
}
}
-
-func getOriginNamespace(cmd *cobra.Command) string {
- result := kapi.NamespaceDefault
- if ns := kubecmd.GetFlagString(cmd, "namespace"); len(ns) > 0 {
- result = ns
- glog.V(2).Infof("Using namespace from -ns flag")
- } else {
- nsPath := kubecmd.GetFlagString(cmd, "ns-path")
- nsInfo, err := kubectl.LoadNamespaceInfo(nsPath)
- if err != nil {
- glog.Fatalf("Error loading current namespace: %v", err)
- }
- result = nsInfo.Namespace
- }
- glog.V(2).Infof("Using namespace %s", result)
- return result
-}
diff --git a/pkg/cmd/cli/cmd/process.go b/pkg/cmd/cli/cmd/process.go
index bdb5fb5ce7ad..dccf33ab7ec0 100644
--- a/pkg/cmd/cli/cmd/process.go
+++ b/pkg/cmd/cli/cmd/process.go
@@ -57,10 +57,13 @@ Examples:
schema, err := f.Validator(cmd)
checkErr(err)
- namespace := getOriginNamespace(cmd)
+ cfg, err := f.ClientConfig(cmd)
+ checkErr(err)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
mapper, typer := f.Object(cmd)
- mapping, _, _, data := kubecmd.ResourceFromFile(cmd, filename, typer, mapper, schema)
+ mapping, _, _, data := kubecmd.ResourceFromFile(filename, typer, mapper, schema, cfg.Version)
obj, err := mapping.Codec.Decode(data)
checkErr(err)
diff --git a/pkg/cmd/cli/cmd/rollback.go b/pkg/cmd/cli/cmd/rollback.go
index 101da569ad7c..50b062fea059 100644
--- a/pkg/cmd/cli/cmd/rollback.go
+++ b/pkg/cmd/cli/cmd/rollback.go
@@ -67,7 +67,8 @@ func NewCmdRollback(parentName string, name string, f *Factory, out io.Writer) *
osClient, _, err := f.Clients(cmd)
checkErr(err)
- namespace := getOriginNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
// Generate the rollback config
newConfig, err := osClient.DeploymentConfigs(namespace).Rollback(rollback)
diff --git a/pkg/cmd/cli/cmd/startbuild.go b/pkg/cmd/cli/cmd/startbuild.go
index 107183582eea..5a1cdb4d041e 100644
--- a/pkg/cmd/cli/cmd/startbuild.go
+++ b/pkg/cmd/cli/cmd/startbuild.go
@@ -36,7 +36,8 @@ Examples:
client, _, err := f.Clients(cmd)
checkErr(err)
- namespace := getOriginNamespace(cmd)
+ namespace, err := f.DefaultNamespace(cmd)
+ checkErr(err)
var newBuild *build.Build
if len(buildName) == 0 {
diff --git a/pkg/cmd/server/kubernetes/node.go b/pkg/cmd/server/kubernetes/node.go
index d01651b697c5..6b87b5bdf01d 100644
--- a/pkg/cmd/server/kubernetes/node.go
+++ b/pkg/cmd/server/kubernetes/node.go
@@ -7,6 +7,7 @@ import (
"reflect"
"time"
+ kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
kconfig "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config"
"github.com/GoogleCloudPlatform/kubernetes/pkg/proxy"
@@ -14,6 +15,7 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/iptables"
+
"github.com/coreos/go-etcd/etcd"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
@@ -21,6 +23,7 @@ import (
dockerutil "github.com/openshift/origin/pkg/cmd/util/docker"
+ "github.com/openshift/origin/pkg/kubelet/app"
"github.com/openshift/origin/pkg/service"
)
@@ -103,7 +106,9 @@ func (c *NodeConfig) RunKubelet() {
5,
cfg.IsSourceSeen,
"",
- net.IP(util.IP{}))
+ net.IP(util.IP{}),
+ kapi.NamespaceDefault,
+ app.ProbeVolumePlugins())
if err != nil {
glog.Fatalf("Couldn't run kubelet: %s", err)
}
diff --git a/pkg/cmd/server/origin/master.go b/pkg/cmd/server/origin/master.go
index 2ed2669d1b0e..0b016c948bae 100644
--- a/pkg/cmd/server/origin/master.go
+++ b/pkg/cmd/server/origin/master.go
@@ -12,7 +12,7 @@ import (
etcdclient "github.com/coreos/go-etcd/etcd"
"github.com/elazarl/go-bindata-assetfs"
- "github.com/emicklei/go-restful"
+ restful "github.com/emicklei/go-restful"
"github.com/golang/glog"
"github.com/GoogleCloudPlatform/kubernetes/pkg/api"
@@ -285,7 +285,9 @@ func (c *MasterConfig) InstallAPI(container *restful.Container) []string {
admissionControl := admit.NewAlwaysAdmit()
- apiserver.NewAPIGroupVersion(storage, v1beta1.Codec, OpenShiftAPIPrefixV1Beta1, latest.SelfLinker, admissionControl).InstallREST(container, OpenShiftAPIPrefix, "v1beta1")
+ if err := apiserver.NewAPIGroupVersion(storage, v1beta1.Codec, OpenShiftAPIPrefixV1Beta1, latest.SelfLinker, admissionControl).InstallREST(container, container.ServeMux, OpenShiftAPIPrefix, "v1beta1"); err != nil {
+ glog.Fatalf("Unable to initialize API: %v", err)
+ }
var root *restful.WebService
for _, svc := range container.RegisteredWebServices() {
diff --git a/pkg/kubelet/app/plugins.go b/pkg/kubelet/app/plugins.go
new file mode 100644
index 000000000000..a8fc7ed0e6e8
--- /dev/null
+++ b/pkg/kubelet/app/plugins.go
@@ -0,0 +1,43 @@
+/*
+Copyright 2014 Google Inc. All rights reserved.
+
+Licensed under the Apache License, Version 2.0 (the "License");
+you may not use this file except in compliance with the License.
+You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing, software
+distributed under the License is distributed on an "AS IS" BASIS,
+WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+See the License for the specific language governing permissions and
+limitations under the License.
+*/
+
+package app
+
+// This file exists to force the desired plugin implementations to be linked.
+import (
+ // Credential providers
+ _ "github.com/GoogleCloudPlatform/kubernetes/pkg/credentialprovider/gcp"
+ // Volume plugins
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/empty_dir"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/gce_pd"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/git_repo"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/volume/host_path"
+)
+
+func ProbeVolumePlugins() []volume.Plugin {
+ allPlugins := []volume.Plugin{}
+
+ // The list of plugins to probe is decided by the kubelet binary, not
+ // by dynamic linking or other "magic". Plugins will be analyzed and
+ // initialized later.
+ allPlugins = append(allPlugins, empty_dir.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, git_repo.ProbeVolumePlugins()...)
+ allPlugins = append(allPlugins, host_path.ProbeVolumePlugins()...)
+
+ return allPlugins
+}
diff --git a/pkg/user/registry/etcd/etcd.go b/pkg/user/registry/etcd/etcd.go
index 9aa9a9b0e961..8032c49ce1a2 100644
--- a/pkg/user/registry/etcd/etcd.go
+++ b/pkg/user/registry/etcd/etcd.go
@@ -7,6 +7,7 @@ import (
"code.google.com/p/go-uuid/uuid"
"github.com/GoogleCloudPlatform/kubernetes/pkg/runtime"
"github.com/GoogleCloudPlatform/kubernetes/pkg/tools"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/types"
"github.com/openshift/origin/pkg/user"
"github.com/openshift/origin/pkg/user/api"
@@ -62,14 +63,14 @@ func (r *Etcd) CreateOrUpdateUserIdentityMapping(mapping *api.UserIdentityMappin
// did not previously exist
if existing.Identity.Name == "" {
uid := uuid.New()
- existing.User.UID = uid
+ existing.User.UID = types.UID(uid)
existing.User.Name = name
if err := r.initializer.InitializeUser(&mapping.Identity, &existing.User); err != nil {
return in, err
}
// set these again to prevent bad initialization from messing up data
- existing.User.UID = uid
+ existing.User.UID = types.UID(uid)
existing.User.Name = name
existing.Identity = mapping.Identity
diff --git a/test/integration/buildclient_test.go b/test/integration/buildclient_test.go
index 5707ad77ed81..4aae63d3ab3e 100644
--- a/test/integration/buildclient_test.go
+++ b/test/integration/buildclient_test.go
@@ -35,6 +35,7 @@ func init() {
}
func TestListBuilds(t *testing.T) {
+
deleteAllEtcdKeys()
openshift := NewTestBuildOpenshift(t)
defer openshift.Close()
@@ -49,6 +50,7 @@ func TestListBuilds(t *testing.T) {
}
func TestCreateBuild(t *testing.T) {
+
deleteAllEtcdKeys()
openshift := NewTestBuildOpenshift(t)
defer openshift.Close()
@@ -192,7 +194,7 @@ func NewTestBuildOpenshift(t *testing.T) *testBuildOpenshift {
}
osPrefix := "/osapi/v1beta1"
- apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, "/osapi", "v1beta1")
+ apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, osMux, "/osapi", "v1beta1")
openshift.whPrefix = osPrefix + "/buildConfigHooks/"
osMux.Handle(openshift.whPrefix, http.StripPrefix(openshift.whPrefix,
diff --git a/test/integration/deploy_trigger_test.go b/test/integration/deploy_trigger_test.go
index 86d662d17471..461e2003a39c 100644
--- a/test/integration/deploy_trigger_test.go
+++ b/test/integration/deploy_trigger_test.go
@@ -324,6 +324,7 @@ func NewTestOpenshift(t *testing.T) *testOpenshift {
etcdHelper, _ := master.NewEtcdHelper(etcdClient, klatest.Version)
osMux := http.NewServeMux()
+ muxHelper := &apiserver.MuxHelper{osMux, []string{}}
openshift.Server = httptest.NewServer(osMux)
kubeClient := client.NewOrDie(&client.Config{Host: openshift.Server.URL, Version: klatest.Version})
@@ -376,7 +377,7 @@ func NewTestOpenshift(t *testing.T) *testOpenshift {
}
osPrefix := "/osapi/v1beta1"
- apiserver.NewAPIGroupVersion(storage, v1beta1.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, "/osapi", "v1beta1")
+ apiserver.NewAPIGroupVersion(storage, v1beta1.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, muxHelper, "/osapi", "v1beta1")
dccFactory := deploycontrollerfactory.DeploymentConfigControllerFactory{
Client: osClient,
@@ -409,6 +410,7 @@ func NewTestOpenshift(t *testing.T) *testOpenshift {
}
biccFactory.Create().Run()
+
return openshift
}
diff --git a/test/integration/imageclient_test.go b/test/integration/imageclient_test.go
index e06fd4bb8183..3563d8839e66 100644
--- a/test/integration/imageclient_test.go
+++ b/test/integration/imageclient_test.go
@@ -232,7 +232,7 @@ func NewTestImageOpenShift(t *testing.T) *testImageOpenshift {
EtcdHelper: etcdHelper,
HealthCheckMinions: false,
KubeletClient: kubeletClient,
- APIPrefix: "/api/v1beta1",
+ APIPrefix: "/api",
RestfulContainer: handlerContainer,
})
@@ -248,7 +248,7 @@ func NewTestImageOpenShift(t *testing.T) *testImageOpenshift {
}
osPrefix := "/osapi/v1beta1"
- apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, "/osapi", "v1beta1")
+ apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, osMux, "/osapi", "v1beta1")
return openshift
}
diff --git a/test/integration/userclient_test.go b/test/integration/userclient_test.go
index cfed45470edd..80af178ac4bb 100644
--- a/test/integration/userclient_test.go
+++ b/test/integration/userclient_test.go
@@ -199,7 +199,7 @@ func TestUserLookup(t *testing.T) {
if !ok {
t.Fatalf("should have been authenticated")
}
- if user.Name != info.GetName() || user.UID != info.GetUID() {
+ if user.Name != info.GetName() || string(user.UID) != info.GetUID() {
t.Errorf("unexpected user info", info)
}
}
diff --git a/test/templates/templates_test.go b/test/templates/templates_test.go
index 37067cf1ec5c..1486cb43db0f 100644
--- a/test/templates/templates_test.go
+++ b/test/templates/templates_test.go
@@ -59,7 +59,7 @@ func TestTemplateTransformationFromConfig(t *testing.T) {
interfaces, _ := latest.InterfacesFor(latest.Version)
osPrefix := "/osapi/v1beta1"
handlerContainer := master.NewHandlerContainer(osMux)
- apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, "/osapi", "v1beta1")
+ apiserver.NewAPIGroupVersion(storage, latest.Codec, osPrefix, interfaces.MetadataAccessor, admit.NewAlwaysAdmit()).InstallREST(handlerContainer, osMux, "/osapi", "v1beta1")
walkJSONFiles("fixtures", func(name, path string, _ []byte) {
config := &config.Config{}
From 4aec0c55ccf3e8a6578294d604a61dac7c94890a Mon Sep 17 00:00:00 2001
From: Clayton Coleman
Date: Thu, 29 Jan 2015 19:21:07 -0500
Subject: [PATCH 5/5] Connect the node to the master via and a built in client
Prepare, but do not enable, the Kubelet to serve over TLS as well.
---
pkg/cmd/server/kubernetes/master.go | 6 +++
pkg/cmd/server/kubernetes/node.go | 72 +++++++++++++++++++++--------
pkg/cmd/server/origin/master.go | 4 +-
pkg/cmd/server/start.go | 71 +++++++++++++++++++++++-----
4 files changed, 120 insertions(+), 33 deletions(-)
diff --git a/pkg/cmd/server/kubernetes/master.go b/pkg/cmd/server/kubernetes/master.go
index dc7a7d535416..d2b30a53bf50 100644
--- a/pkg/cmd/server/kubernetes/master.go
+++ b/pkg/cmd/server/kubernetes/master.go
@@ -29,6 +29,7 @@ const (
KubeAPIPrefix = "/api"
KubeAPIPrefixV1Beta1 = "/api/v1beta1"
KubeAPIPrefixV1Beta2 = "/api/v1beta2"
+ KubeAPIPrefixV1Beta3 = "/api/v1beta3"
)
// MasterConfig defines the required values to start a Kubernetes master
@@ -81,6 +82,10 @@ func (c *MasterConfig) InstallAPI(container *restful.Container) []string {
EtcdHelper: c.EtcdHelper,
HealthCheckMinions: true,
+ EventTTL: 2 * time.Hour,
+
+ EnableV1Beta3: true,
+
PortalNet: c.PortalNet,
RestfulContainer: container,
@@ -95,6 +100,7 @@ func (c *MasterConfig) InstallAPI(container *restful.Container) []string {
return []string{
fmt.Sprintf("Started Kubernetes API at %%s%s", KubeAPIPrefixV1Beta1),
fmt.Sprintf("Started Kubernetes API at %%s%s", KubeAPIPrefixV1Beta2),
+ fmt.Sprintf("Started Kubernetes API at %%s%s (experimental)", KubeAPIPrefixV1Beta3),
}
}
diff --git a/pkg/cmd/server/kubernetes/node.go b/pkg/cmd/server/kubernetes/node.go
index 6b87b5bdf01d..742147c69f8d 100644
--- a/pkg/cmd/server/kubernetes/node.go
+++ b/pkg/cmd/server/kubernetes/node.go
@@ -1,13 +1,17 @@
package kubernetes
import (
+ "crypto/tls"
"net"
+ "net/http"
"os"
"path/filepath"
"reflect"
+ "strconv"
"time"
kapi "github.com/GoogleCloudPlatform/kubernetes/pkg/api"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
kconfig "github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet/config"
"github.com/GoogleCloudPlatform/kubernetes/pkg/proxy"
@@ -16,7 +20,6 @@ import (
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/exec"
"github.com/GoogleCloudPlatform/kubernetes/pkg/util/iptables"
- "github.com/coreos/go-etcd/etcd"
"github.com/fsouza/go-dockerclient"
"github.com/golang/glog"
cadvisor "github.com/google/cadvisor/client"
@@ -49,8 +52,14 @@ type NodeConfig struct {
// The image used as the Kubelet network namespace and volume container.
NetworkContainerImage string
- // A client to connect to etcd
- EtcdClient *etcd.Client
+ // Whether to enable TLS serving
+ TLS bool
+
+ KubeletCertFile string
+ KubeletKeyFile string
+
+ // A client to connect to the master.
+ Client *client.Client
// A client to connect to Docker
DockerClient *docker.Client
}
@@ -91,12 +100,12 @@ func (c *NodeConfig) RunKubelet() {
// TODO: make this configurable and not the default https://github.com/openshift/origin/issues/662
kubelet.SetupCapabilities(true)
cfg := kconfig.NewPodConfig(kconfig.PodConfigNotificationSnapshotAndUpdates)
- kconfig.NewSourceEtcd(kconfig.EtcdKeyForHost(c.NodeHost), c.EtcdClient, cfg.Channel("etcd"))
+ kconfig.NewSourceApiserver(c.Client, c.NodeHost, cfg.Channel("api"))
k, err := kubelet.NewMainKubelet(
c.NodeHost,
c.DockerClient,
- c.EtcdClient,
nil,
+ c.Client,
c.VolumeDir,
c.NetworkContainerImage,
30*time.Second,
@@ -114,15 +123,33 @@ func (c *NodeConfig) RunKubelet() {
}
go util.Forever(func() { k.Run(cfg.Updates()) }, 0)
- // this parameter must be true, otherwise buildLogs won't work
- enableDebuggingHandlers := true
+ handler := kubelet.NewServer(k, true)
+
+ server := &http.Server{
+ Addr: net.JoinHostPort(c.BindHost, strconv.Itoa(NodePort)),
+ Handler: &handler,
+ ReadTimeout: 5 * time.Minute,
+ WriteTimeout: 5 * time.Minute,
+ MaxHeaderBytes: 1 << 20,
+ }
+
go util.Forever(func() {
glog.Infof("Started Kubelet for node %s, server at %s:%d", c.NodeHost, c.BindHost, NodePort)
- kubelet.ListenAndServeKubeletServer(k, net.ParseIP(c.BindHost), uint(NodePort), enableDebuggingHandlers)
+
+ if c.TLS {
+ server.TLSConfig = &tls.Config{
+ // Change default from SSLv3 to TLSv1.0 (because of POODLE vulnerability)
+ MinVersion: tls.VersionTLS10,
+ // Populate PeerCertificates in requests, but don't reject connections without certificates
+ // This allows certificates to be validated by authenticators, while still allowing other auth types
+ ClientAuth: tls.RequestClientCert,
+ }
+ glog.Fatal(server.ListenAndServeTLS(c.KubeletCertFile, c.KubeletKeyFile))
+ } else {
+ glog.Fatal(server.ListenAndServe())
+ }
}, 0)
- // this mirrors 1fc92bef53fdd1bc70f623c0693736c763cff45f
- // I don't fully understand what a cadvisor is, but it seems that we're supposed to run it separately from the rest of the kubelet
go func() {
defer util.HandleCrash()
// TODO: Monitor this connection, reconnect if needed?
@@ -134,7 +161,7 @@ func (c *NodeConfig) RunKubelet() {
return
}
glog.V(1).Infof("Successfully created cadvisor client.")
- // this binds the cadvisor to the kubelet for later references
+ // this binds the cadvisor to the kubelet for later reference
k.SetCadvisorClient(cadvisorClient)
}()
@@ -145,20 +172,27 @@ func (c *NodeConfig) RunProxy() {
// initialize kube proxy
serviceConfig := pconfig.NewServiceConfig()
endpointsConfig := pconfig.NewEndpointsConfig()
- pconfig.NewConfigSourceEtcd(c.EtcdClient,
- serviceConfig.Channel("etcd"),
- endpointsConfig.Channel("etcd"))
+ pconfig.NewSourceAPI(
+ c.Client.Services(kapi.NamespaceAll),
+ c.Client.Endpoints(kapi.NamespaceAll),
+ 30*time.Second,
+ serviceConfig.Channel("api"),
+ endpointsConfig.Channel("api"))
loadBalancer := proxy.NewLoadBalancerRR()
endpointsConfig.RegisterHandler(loadBalancer)
- // TODO clearly this needs fixing
+ ip := net.ParseIP(c.BindHost)
+ if ip == nil {
+ glog.Fatalf("The provided value to bind to must be an IP: %q", c.BindHost)
+ }
+
protocol := iptables.ProtocolIpv4
- // if net.IP(c.BindHost).To4() == nil {
- // protocol = iptables.ProtocolIpv6
- // }
+ if ip.To4() == nil {
+ protocol = iptables.ProtocolIpv6
+ }
var proxier pconfig.ServiceConfigHandler
- proxier = proxy.NewProxier(loadBalancer, net.ParseIP(c.BindHost), iptables.New(exec.New(), protocol))
+ proxier = proxy.NewProxier(loadBalancer, ip, iptables.New(exec.New(), protocol))
if proxier == nil || reflect.ValueOf(proxier).IsNil() { // explicitly declared interfaces aren't plain nil, you must reflect inside to see if it's really nil or not
glog.Errorf("WARNING: Could not modify iptables. iptables must be mutable by this process to use services. Do you have root permissions?")
proxier = &service.FailingServiceConfigProxy{}
diff --git a/pkg/cmd/server/origin/master.go b/pkg/cmd/server/origin/master.go
index 0b016c948bae..147dacde27eb 100644
--- a/pkg/cmd/server/origin/master.go
+++ b/pkg/cmd/server/origin/master.go
@@ -93,8 +93,6 @@ type MasterConfig struct {
KubernetesPublicAddr string
AssetPublicAddr string
- TLS bool
-
CORSAllowedOrigins []*regexp.Regexp
Authenticator authenticator.Request
@@ -103,6 +101,8 @@ type MasterConfig struct {
Authorizer authorizer.Authorizer
AdmissionControl admission.Interface
+ TLS bool
+
MasterCertFile string
MasterKeyFile string
AssetCertFile string
diff --git a/pkg/cmd/server/start.go b/pkg/cmd/server/start.go
index 4f65e7f1b058..e662f770d635 100644
--- a/pkg/cmd/server/start.go
+++ b/pkg/cmd/server/start.go
@@ -17,6 +17,7 @@ import (
klatest "github.com/GoogleCloudPlatform/kubernetes/pkg/api/latest"
"github.com/GoogleCloudPlatform/kubernetes/pkg/apiserver"
kclient "github.com/GoogleCloudPlatform/kubernetes/pkg/client"
+ "github.com/GoogleCloudPlatform/kubernetes/pkg/client/clientcmd"
"github.com/GoogleCloudPlatform/kubernetes/pkg/client/record"
"github.com/GoogleCloudPlatform/kubernetes/pkg/kubelet"
kmaster "github.com/GoogleCloudPlatform/kubernetes/pkg/master"
@@ -25,6 +26,7 @@ import (
etcdclient "github.com/coreos/go-etcd/etcd"
"github.com/golang/glog"
"github.com/spf13/cobra"
+ "github.com/spf13/pflag"
"github.com/openshift/origin/pkg/api/latest"
"github.com/openshift/origin/pkg/auth/api"
@@ -106,6 +108,11 @@ type config struct {
NodeList flagtypes.StringList
+ // ClientConfig is used when connecting to Kubernetes from the master, or
+ // when connecting to the master from a detached node. If the server is an
+ // all-in-one, this value is not used.
+ ClientConfig clientcmd.ClientConfig
+
CORSAllowedOrigins flagtypes.StringList
}
@@ -161,11 +168,27 @@ func NewCommandStartServer(name string) *cobra.Command {
flag.Var(&cfg.NodeList, "nodes", "The hostnames of each node. This currently must be specified up front. Comma delimited list")
flag.Var(&cfg.CORSAllowedOrigins, "cors-allowed-origins", "List of allowed origins for CORS, comma separated. An allowed origin can be a regular expression to support subdomain matching. CORS is enabled for localhost, 127.0.0.1, and the asset server by default.")
+ cfg.ClientConfig = defaultClientConfig(flag)
+
cfg.Docker.InstallFlags(flag)
return cmd
}
+// Copy of kubectl/cmd/DefaultClientConfig, using NewNonInteractiveDeferredLoadingClientConfig
+// TODO: there should be two client configs, one for OpenShift, and one for Kubernetes
+func defaultClientConfig(flags *pflag.FlagSet) clientcmd.ClientConfig {
+ clientcmd.DefaultCluster.Server = "https://localhost:8443"
+ loadingRules := clientcmd.NewClientConfigLoadingRules()
+ loadingRules.EnvVarPath = os.Getenv(clientcmd.RecommendedConfigPathEnvVar)
+ flags.StringVar(&loadingRules.CommandLinePath, "kubeconfig", "", "Path to the kubeconfig file to use for connecting to the master.")
+
+ overrides := &clientcmd.ConfigOverrides{}
+ //clientcmd.BindOverrideFlags(overrides, flags, clientcmd.RecommendedConfigOverrideFlags(""))
+ clientConfig := clientcmd.NewNonInteractiveDeferredLoadingClientConfig(loadingRules, overrides)
+ return clientConfig
+}
+
// run launches the appropriate startup modes or returns an error.
func start(cfg *config, args []string) error {
if len(args) > 1 {
@@ -192,7 +215,10 @@ func start(cfg *config, args []string) error {
if err := defaultMasterAddress(cfg); err != nil {
return err
}
- glog.Infof("Starting an OpenShift node, connecting to %s (etcd: %s)", cfg.MasterAddr.String(), cfg.EtcdAddr.String())
+ if !cfg.KubernetesAddr.Provided {
+ cfg.KubernetesAddr = cfg.MasterAddr
+ }
+ glog.Infof("Starting an OpenShift node, connecting to %s", cfg.MasterAddr.String())
default:
return errors.New("You may start an OpenShift all-in-one server with no arguments, or pass 'master' or 'node' to run in that role.")
@@ -225,6 +251,9 @@ func start(cfg *config, args []string) error {
}()
}
+ // the node can reuse an existing client
+ var existingKubeClient *kclient.Client
+
if startMaster {
if len(cfg.NodeList) == 1 && cfg.NodeList[0] == "127.0.0.1" {
cfg.NodeList[0] = cfg.Hostname
@@ -310,11 +339,7 @@ func start(cfg *config, args []string) error {
}
} else {
// We're running against another kubernetes server
- // TODO: configure external kubernetes credentials
- osmaster.KubeClientConfig = kclient.Config{
- Host: cfg.KubernetesAddr.URL.String(),
- Version: klatest.Version,
- }
+ osmaster.KubeClientConfig = *clientConfigFromKubeConfig(cfg)
}
// Build token auth for user's OAuth tokens
@@ -495,18 +520,23 @@ func start(cfg *config, args []string) error {
osmaster.RunDeploymentConfigController()
osmaster.RunDeploymentConfigChangeController()
osmaster.RunDeploymentImageChangeTriggerController()
+
+ existingKubeClient = osmaster.KubeClient()
}
if startNode {
- etcdClient, err := getEtcdClient(cfg)
- if err != nil {
- return err
+ if existingKubeClient == nil {
+ config := clientConfigFromKubeConfig(cfg)
+ cli, err := kclient.New(config)
+ if err != nil {
+ glog.Fatalf("Unable to create a client: %v", err)
+ }
+ existingKubeClient = cli
}
if !startMaster {
// TODO: recording should occur in individual components
- // TODO: need an API client in the Kubelet
- // record.StartRecording(osmaster.KubeClient().Events(""), kapi.EventSource{Component: "node"})
+ record.StartRecording(existingKubeClient.Events(""), kapi.EventSource{Component: "node"})
}
nodeConfig := &kubernetes.NodeConfig{
@@ -518,7 +548,7 @@ func start(cfg *config, args []string) error {
NetworkContainerImage: env("KUBERNETES_NETWORK_CONTAINER_IMAGE", kubelet.NetworkContainerImage),
- EtcdClient: etcdClient,
+ Client: existingKubeClient,
}
nodeConfig.EnsureVolumeDir()
@@ -611,6 +641,23 @@ func defaultMasterAddress(cfg *config) error {
return nil
}
+// clientConfigFromKubeConfig reads the client configuration settings for connecting to
+// a Kubernetes master.
+func clientConfigFromKubeConfig(cfg *config) *kclient.Config {
+ config, err := cfg.ClientConfig.ClientConfig()
+ if err != nil {
+ glog.Fatalf("Unable to read client configuration: %v", err)
+ }
+ if len(config.Version) == 0 {
+ config.Version = klatest.Version
+ }
+ kclient.SetKubernetesDefaults(config)
+ if cfg.KubernetesAddr.Provided {
+ config.Host = cfg.KubernetesAddr.URL.String()
+ }
+ return config
+}
+
// env returns an environment variable or a default value if not specified.
func env(key string, defaultValue string) string {
val := os.Getenv(key)