mirror of
https://github.com/rclone/rclone.git
synced 2025-06-23 21:41:36 +02:00
vendor: update all dependencies
This commit is contained in:
parent
940df88eb2
commit
d64789528d
107
Gopkg.lock
generated
107
Gopkg.lock
generated
@ -14,14 +14,17 @@
|
||||
[[projects]]
|
||||
name = "cloud.google.com/go"
|
||||
packages = ["compute/metadata"]
|
||||
revision = "050b16d2314d5fc3d4c9a51e4cd5c7468e77f162"
|
||||
version = "v0.17.0"
|
||||
revision = "20d4028b8a750c2aca76bf9fefa8ed2d0109b573"
|
||||
version = "v0.19.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = ["storage"]
|
||||
revision = "eae258195456be76b2ec9ad2ee2ab63cdda365d9"
|
||||
version = "v12.2.0-beta"
|
||||
packages = [
|
||||
"storage",
|
||||
"version"
|
||||
]
|
||||
revision = "e67cd39e942c417ae5e9ae1165f778d9fe8996e0"
|
||||
version = "v14.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
@ -31,8 +34,8 @@
|
||||
"autorest/azure",
|
||||
"autorest/date"
|
||||
]
|
||||
revision = "6311d7a76f54cf2b6dea03d737d9bd9a6022ac5f"
|
||||
version = "v9.7.1"
|
||||
revision = "0ae36a9e544696de46fdadb7b0d5fb38af48c063"
|
||||
version = "v10.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -78,6 +81,8 @@
|
||||
"aws/request",
|
||||
"aws/session",
|
||||
"aws/signer/v4",
|
||||
"internal/sdkio",
|
||||
"internal/sdkrand",
|
||||
"internal/shareddefaults",
|
||||
"private/protocol",
|
||||
"private/protocol/query",
|
||||
@ -90,7 +95,7 @@
|
||||
"service/s3/s3manager",
|
||||
"service/sts"
|
||||
]
|
||||
revision = "2fe57096de348e6cff4031af99254613f8ef73ea"
|
||||
revision = "12fe7d35f8ad5f7f2715d414624b0723737de1f7"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/billziss-gh/cgofuse"
|
||||
@ -102,13 +107,13 @@
|
||||
branch = "master"
|
||||
name = "github.com/coreos/bbolt"
|
||||
packages = ["."]
|
||||
revision = "ee30b748bcfbd74ec1d8439ae8fd4f9123a5c94e"
|
||||
revision = "af9db2027c98c61ecd8e17caa5bd265792b9b9a2"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/cpuguy83/go-md2man"
|
||||
packages = ["md2man"]
|
||||
revision = "1d903dcb749992f3741d744c0f8376b4bd7eb3e1"
|
||||
version = "v1.0.7"
|
||||
revision = "20f5889cbdc3c73dbd2862796665e7c465ade7d1"
|
||||
version = "v1.0.8"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/davecgh/go-spew"
|
||||
@ -119,8 +124,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/dgrijalva/jwt-go"
|
||||
packages = ["."]
|
||||
revision = "dbeaa9332f19a944acb5736b4456cfcc02140e29"
|
||||
version = "v3.1.0"
|
||||
revision = "06ea1031745cb8b3dab3f6a236daf2b0aa468b7e"
|
||||
version = "v3.2.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/djherbis/times"
|
||||
@ -137,19 +142,19 @@
|
||||
"dropbox/file_properties",
|
||||
"dropbox/files"
|
||||
]
|
||||
revision = "9c27e83ceccc8f8bbc9afdc17c50798529d608b1"
|
||||
revision = "f0b3f3ded6d415a94e83e9a514fb8025e4e6be31"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/go-ini/ini"
|
||||
packages = ["."]
|
||||
revision = "32e4c1e6bc4e7d0d8451aa6b75200d19e37a536a"
|
||||
version = "v1.32.0"
|
||||
revision = "6333e38ac20b8949a8dd68baa3650f4dee8f39f0"
|
||||
version = "v1.33.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/golang/protobuf"
|
||||
packages = ["proto"]
|
||||
revision = "1e59b77b52bf8e4b449a57e6f79f21226d571845"
|
||||
revision = "925541529c1fa6821df4e44ce2723319eb2be768"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -189,7 +194,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/marstr/guid"
|
||||
packages = ["."]
|
||||
revision = "8bdf7d1a087ccc975cf37dd6507da50698fd19ca"
|
||||
revision = "8bd9a64bf37eb297b492a4101fb28e80ac0b290f"
|
||||
version = "v1.1.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/mattn/go-runewidth"
|
||||
@ -213,7 +219,7 @@
|
||||
branch = "master"
|
||||
name = "github.com/nsf/termbox-go"
|
||||
packages = ["."]
|
||||
revision = "8c5e0793e04afcda7fe23d0751791e7321df4265"
|
||||
revision = "e2050e41c8847748ec5288741c0b19a8cb26d084"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -243,13 +249,13 @@
|
||||
branch = "master"
|
||||
name = "github.com/pkg/errors"
|
||||
packages = ["."]
|
||||
revision = "e881fd58d78e04cf6d0de1217f8707c8cc2249bc"
|
||||
revision = "816c9085562cd7ee03e7f8188a1cfd942858cded"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/pkg/sftp"
|
||||
packages = ["."]
|
||||
revision = "72ec6e85598d2480c30f633c154b07b6c112eade"
|
||||
revision = "49488377fa2f14143ba3067cf7555f60f6c7b550"
|
||||
version = "1.5.0"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/pmezard/go-difflib"
|
||||
@ -266,8 +272,8 @@
|
||||
[[projects]]
|
||||
name = "github.com/russross/blackfriday"
|
||||
packages = ["."]
|
||||
revision = "4048872b16cc0fc2c5fd9eacf0ed2c2fedaa0c8c"
|
||||
version = "v1.5"
|
||||
revision = "55d61fa8aa702f59229e6cff85793c22e580eaf5"
|
||||
version = "v1.5.1"
|
||||
|
||||
[[projects]]
|
||||
name = "github.com/satori/go.uuid"
|
||||
@ -276,10 +282,10 @@
|
||||
version = "v1.2.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
packages = ["."]
|
||||
revision = "e49ef56654f54139c4dc0285f973f74e9649e729"
|
||||
version = "v0.1.2"
|
||||
revision = "32749a731f76154d29bc6a547e6585f320eb235e"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -294,13 +300,13 @@
|
||||
".",
|
||||
"doc"
|
||||
]
|
||||
revision = "0c34d16c3123764e413b9ed982ada58b1c3d53ea"
|
||||
revision = "c439c4fa093711d42e1b01acb1235b52004753c1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
packages = ["."]
|
||||
revision = "4c012f6dcd9546820e378d0bdda4d8fc772cdfea"
|
||||
revision = "ee5fd03fd6acfd43e44aea0b4135958546ed8e73"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -309,7 +315,7 @@
|
||||
"assert",
|
||||
"require"
|
||||
]
|
||||
revision = "87b1dfb5b2fa649f52695dd9eae19abe404a4308"
|
||||
revision = "380174f817a09abe5982a82f94ad50938a8df65d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -318,6 +324,7 @@
|
||||
revision = "ba9c9e33906f58169366275e3450db66139a31a9"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "github.com/yunify/qingstor-sdk-go"
|
||||
packages = [
|
||||
".",
|
||||
@ -332,8 +339,7 @@
|
||||
"service",
|
||||
"utils"
|
||||
]
|
||||
revision = "51fa3b6bb3c24f4d646eefff251cd2e6ba716600"
|
||||
version = "v2.2.9"
|
||||
revision = "a3cbaaf92247eaf55751a7ff37c126c511757492"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -344,6 +350,7 @@
|
||||
"curve25519",
|
||||
"ed25519",
|
||||
"ed25519/internal/edwards25519",
|
||||
"internal/chacha20",
|
||||
"nacl/secretbox",
|
||||
"pbkdf2",
|
||||
"poly1305",
|
||||
@ -353,7 +360,7 @@
|
||||
"ssh/agent",
|
||||
"ssh/terminal"
|
||||
]
|
||||
revision = "13931e22f9e72ea58bb73048bc752b48c6d4d4ac"
|
||||
revision = "c3a3ad6d03f7a915c0f7e194b7152974bb73d287"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -363,10 +370,14 @@
|
||||
"context/ctxhttp",
|
||||
"html",
|
||||
"html/atom",
|
||||
"http2",
|
||||
"http2/hpack",
|
||||
"idna",
|
||||
"lex/httplex",
|
||||
"webdav",
|
||||
"webdav/internal/xml"
|
||||
]
|
||||
revision = "5ccada7d0a7ba9aeb5d3aca8d3501b4c2a509fec"
|
||||
revision = "92b859f39abd2d91a854c9f9c4621b2f5054a92d"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -378,7 +389,7 @@
|
||||
"jws",
|
||||
"jwt"
|
||||
]
|
||||
revision = "30785a2c434e431ef7c507b54617d6a951d5f2b4"
|
||||
revision = "fdc9e635145ae97e6c2cb777c48305600cf515cb"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -387,26 +398,36 @@
|
||||
"unix",
|
||||
"windows"
|
||||
]
|
||||
revision = "fff93fa7cd278d84afc205751523809c464168ab"
|
||||
revision = "d8e400bc7db4870d786864138af681469693d18c"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"collate",
|
||||
"collate/build",
|
||||
"internal/colltab",
|
||||
"internal/gen",
|
||||
"internal/language",
|
||||
"internal/language/compact",
|
||||
"internal/tag",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"language",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
"unicode/cldr",
|
||||
"unicode/norm"
|
||||
"unicode/norm",
|
||||
"unicode/rangetable"
|
||||
]
|
||||
revision = "e19ae1496984b1c655b8044a65c0300a3c878dd3"
|
||||
revision = "8c34f848e18c4bd34d02db7f19a0ed1a0a8f5852"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
name = "golang.org/x/time"
|
||||
packages = ["rate"]
|
||||
revision = "6dc17368e09b0e8634d71cac8168d853e869a0c7"
|
||||
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
@ -418,7 +439,7 @@
|
||||
"googleapi/internal/uritemplates",
|
||||
"storage/v1"
|
||||
]
|
||||
revision = "de3aa2cfa7f1c18dcb7f91738099bad280117b8e"
|
||||
revision = "55e9fb4044f4757138d4273ace23060d022d18f9"
|
||||
|
||||
[[projects]]
|
||||
name = "google.golang.org/appengine"
|
||||
@ -439,14 +460,14 @@
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "v2"
|
||||
name = "gopkg.in/yaml.v2"
|
||||
packages = ["."]
|
||||
revision = "d670f9405373e636a5a2765eea47fac0c9bc91a4"
|
||||
revision = "7f97868eec74b32b0982dd158a51a446d1da7eb5"
|
||||
version = "v2.1.1"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "f9e9adb0675a970e6b6a9f28fa75e5bbee74d001359c688bd37c78f035be565a"
|
||||
inputs-digest = "a91eff17e50d8733124875c68dbe771833cb9d8d07d545ba75fc4dd561f9bfe2"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
@ -97,10 +97,6 @@
|
||||
branch = "master"
|
||||
name = "github.com/spf13/pflag"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stacktic/dropbox"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/stretchr/testify"
|
||||
@ -150,5 +146,5 @@
|
||||
name = "github.com/okzk/sdnotify"
|
||||
|
||||
[[constraint]]
|
||||
branch = "master"
|
||||
name = "github.com/sevlyar/go-daemon"
|
||||
version = "0.1.2"
|
||||
|
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
1
vendor/cloud.google.com/go/CONTRIBUTORS
generated
vendored
@ -27,6 +27,7 @@ Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Magnus Hiie <magnus.hiie@gmail.com>
|
||||
Mario Castro <mariocaster@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
Omar Jarjur <ojarjur@google.com>
|
||||
Paweł Knap <pawelknap88@gmail.com>
|
||||
|
2
vendor/cloud.google.com/go/LICENSE
generated
vendored
2
vendor/cloud.google.com/go/LICENSE
generated
vendored
@ -187,7 +187,7 @@
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2014 Google Inc.
|
||||
Copyright [yyyy] [name of copyright owner]
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
143
vendor/cloud.google.com/go/README.md
generated
vendored
143
vendor/cloud.google.com/go/README.md
generated
vendored
@ -33,6 +33,76 @@ make backwards-incompatible changes.
|
||||
|
||||
## News
|
||||
|
||||
_February 26, 2018_
|
||||
|
||||
*v0.19.0*
|
||||
|
||||
- bigquery:
|
||||
- Support customer-managed encryption keys.
|
||||
|
||||
- bigtable:
|
||||
- Improved emulator support.
|
||||
- Support GetCluster.
|
||||
|
||||
- datastore:
|
||||
- Add general mutations.
|
||||
- Support pointer struct fields.
|
||||
- Support transaction options.
|
||||
|
||||
- firestore:
|
||||
- Add Transaction.GetAll.
|
||||
- Support document cursors.
|
||||
|
||||
- logging:
|
||||
- Support concurrent RPCs to the service.
|
||||
- Support per-entry resources.
|
||||
|
||||
- profiler:
|
||||
- Add config options to disable heap and thread profiling.
|
||||
- Read the project ID from $GOOGLE_CLOUD_PROJECT when it's set.
|
||||
|
||||
- pubsub:
|
||||
- BEHAVIOR CHANGE: Release flow control after ack/nack (instead of after the
|
||||
callback returns).
|
||||
- Add SubscriptionInProject.
|
||||
- Add OpenCensus instrumentation for streaming pull.
|
||||
|
||||
- storage:
|
||||
- Support CORS.
|
||||
|
||||
|
||||
_January 18, 2018_
|
||||
|
||||
*v0.18.0*
|
||||
|
||||
- bigquery:
|
||||
- Marked stable.
|
||||
- Schema inference of nullable fields supported.
|
||||
- Added TimePartitioning to QueryConfig.
|
||||
|
||||
- firestore: Data provided to DocumentRef.Set with a Merge option can contain
|
||||
Delete sentinels.
|
||||
|
||||
- logging: Clients can accept parent resources other than projects.
|
||||
|
||||
- pubsub:
|
||||
- pubsub/pstest: A lighweight fake for pubsub. Experimental; feedback welcome.
|
||||
- Support updating more subscription metadata: AckDeadline,
|
||||
RetainAckedMessages and RetentionDuration.
|
||||
|
||||
- oslogin/apiv1beta: New client for the Cloud OS Login API.
|
||||
|
||||
- rpcreplay: A package for recording and replaying gRPC traffic.
|
||||
|
||||
- spanner:
|
||||
- Add a ReadWithOptions that supports a row limit, as well as an index.
|
||||
- Support query plan and execution statistics.
|
||||
- Added [OpenCensus](http://opencensus.io) support.
|
||||
|
||||
- storage: Clarify checksum validation for gzipped files (it is not validated
|
||||
when the file is served uncompressed).
|
||||
|
||||
|
||||
_December 11, 2017_
|
||||
|
||||
*v0.17.0*
|
||||
@ -65,67 +135,31 @@ _December 11, 2017_
|
||||
- TimePartitioning supports "Field".
|
||||
|
||||
|
||||
_October 30, 2017_
|
||||
|
||||
*v0.16.0*
|
||||
|
||||
- Other bigquery changes:
|
||||
- `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
|
||||
- UseStandardSQL is deprecated; set UseLegacySQL to true if you need
|
||||
Legacy SQL.
|
||||
- Uploader.Put will generate a random insert ID if you do not provide one.
|
||||
- Support time partitioning for load jobs.
|
||||
- Support dry-run queries.
|
||||
- A `Job` remembers its last retrieved status.
|
||||
- Support retrieving job configuration.
|
||||
- Support labels for jobs and tables.
|
||||
- Support dataset access lists.
|
||||
- Improve support for external data sources, including data from Bigtable and
|
||||
Google Sheets, and tables with external data.
|
||||
- Support updating a table's view configuration.
|
||||
- Fix uploading civil times with nanoseconds.
|
||||
|
||||
- storage:
|
||||
- Support PubSub notifications.
|
||||
- Support Requester Pays buckets.
|
||||
|
||||
- profiler: Support goroutine and mutex profile types.
|
||||
|
||||
|
||||
_October 3, 2017_
|
||||
|
||||
*v0.15.0*
|
||||
|
||||
- firestore: beta release. See the
|
||||
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
|
||||
|
||||
- errorreporting: The existing package has been redesigned.
|
||||
|
||||
- errors: This package has been removed. Use errorreporting.
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
||||
## Supported APIs
|
||||
|
||||
Google API | Status | Package
|
||||
---------------------------------|--------------|-----------------------------------------------------------
|
||||
[BigQuery][cloud-bigquery] | stable | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Bigtable][cloud-bigtable] | stable | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[Container][cloud-container] | alpha | [`cloud.google.com/go/container/apiv1`][cloud-container-ref]
|
||||
[Data Loss Prevention][cloud-dlp]| alpha | [`cloud.google.com/go/dlp/apiv2beta1`][cloud-dlp-ref]
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Debugger][cloud-debugger] | alpha | [`cloud.google.com/go/debugger/apiv2`][cloud-debugger-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
|
||||
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
|
||||
[OS Login][cloud-oslogin] | alpha | [`cloud.google.com/compute/docs/oslogin/rest`][cloud-oslogin-ref]
|
||||
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
|
||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
|
||||
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
|
||||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
|
||||
|
||||
> **Alpha status**: the API is still being actively developed. As a
|
||||
@ -480,6 +514,9 @@ for more information.
|
||||
[cloud-language]: https://cloud.google.com/natural-language
|
||||
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
|
||||
|
||||
[cloud-oslogin]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
[cloud-oslogin-ref]: https://cloud.google.com/compute/docs/oslogin/rest
|
||||
|
||||
[cloud-speech]: https://cloud.google.com/speech
|
||||
[cloud-speech-ref]: https://godoc.org/cloud.google.com/go/speech/apiv1
|
||||
|
||||
@ -490,13 +527,19 @@ for more information.
|
||||
[cloud-translation]: https://cloud.google.com/translation
|
||||
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
|
||||
|
||||
[cloud-trace]: https://cloud.google.com/trace/
|
||||
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
|
||||
|
||||
[cloud-video]: https://cloud.google.com/video-intelligence/
|
||||
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
|
||||
|
||||
[cloud-errors]: https://cloud.google.com/error-reporting/
|
||||
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
|
||||
|
||||
[cloud-container]: https://cloud.google.com/containers/
|
||||
[cloud-container-ref]: https://godoc.org/cloud.google.com/go/container/apiv1
|
||||
|
||||
[cloud-debugger]: https://cloud.google.com/debugger/
|
||||
[cloud-debugger-ref]: https://godoc.org/cloud.google.com/go/debugger/apiv2
|
||||
|
||||
[cloud-dlp]: https://cloud.google.com/dlp/
|
||||
[cloud-dlp-ref]: https://godoc.org/cloud.google.com/go/dlp/apiv2beta1
|
||||
|
||||
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials
|
||||
|
2
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
2
vendor/cloud.google.com/go/bigquery/bigquery.go
generated
vendored
@ -152,5 +152,5 @@ func retryableError(err error) bool {
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
||||
return e.Code == http.StatusBadGateway || reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
||||
|
13
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
13
vendor/cloud.google.com/go/bigquery/copy.go
generated
vendored
@ -37,6 +37,9 @@ type CopyConfig struct {
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
}
|
||||
|
||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||
@ -47,10 +50,11 @@ func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||
return &bq.JobConfiguration{
|
||||
Labels: c.Labels,
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.toBQ(),
|
||||
SourceTables: ts,
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.toBQ(),
|
||||
DestinationEncryptionConfiguration: c.DestinationEncryptionConfig.toBQ(),
|
||||
SourceTables: ts,
|
||||
},
|
||||
}
|
||||
}
|
||||
@ -61,6 +65,7 @@ func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
|
||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
||||
Dst: bqToTable(q.Copy.DestinationTable, c),
|
||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Copy.DestinationEncryptionConfiguration),
|
||||
}
|
||||
for _, t := range q.Copy.SourceTables {
|
||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
||||
|
8
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
8
vendor/cloud.google.com/go/bigquery/copy_test.go
generated
vendored
@ -84,15 +84,17 @@ func TestCopy(t *testing.T) {
|
||||
},
|
||||
},
|
||||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Copy.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
|
67
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
Normal file
67
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/ListDataSources_smoke_test.go
generated
vendored
Normal file
@ -0,0 +1,67 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer
|
||||
|
||||
import (
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestDataTransferServiceSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", projectId, "us-central1")
|
||||
var request = &datatransferpb.ListDataSourcesRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
|
||||
iter := c.ListDataSources(ctx, request)
|
||||
if _, err := iter.Next(); err != nil && err != iterator.Done {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
92
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
92
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -144,94 +144,6 @@ func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ProjectPath returns the path for the project resource.
|
||||
func ProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationPath returns the path for the location resource.
|
||||
func LocationPath(project, location string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationDataSourcePath returns the path for the location data source resource.
|
||||
func LocationDataSourcePath(project, location, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationTransferConfigPath returns the path for the location transfer config resource.
|
||||
func LocationTransferConfigPath(project, location, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationRunPath returns the path for the location run resource.
|
||||
func LocationRunPath(project, location, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// DataSourcePath returns the path for the data source resource.
|
||||
func DataSourcePath(project, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// TransferConfigPath returns the path for the transfer config resource.
|
||||
func TransferConfigPath(project, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// RunPath returns the path for the run resource.
|
||||
func RunPath(project, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// GetDataSource retrieves a supported data source and returns its settings,
|
||||
// which can be used for UI rendering.
|
||||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -41,7 +41,7 @@ func ExampleClient_GetDataSource() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetDataSourceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetDataSource(ctx, req)
|
||||
if err != nil {
|
||||
@ -59,7 +59,7 @@ func ExampleClient_ListDataSources() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListDataSourcesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListDataSources(ctx, req)
|
||||
for {
|
||||
@ -83,7 +83,7 @@ func ExampleClient_CreateTransferConfig() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.CreateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -101,7 +101,7 @@ func ExampleClient_UpdateTransferConfig() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.UpdateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -119,7 +119,7 @@ func ExampleClient_DeleteTransferConfig() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -135,7 +135,7 @@ func ExampleClient_GetTransferConfig() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -153,7 +153,7 @@ func ExampleClient_ListTransferConfigs() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferConfigsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferConfigs(ctx, req)
|
||||
for {
|
||||
@ -177,7 +177,7 @@ func ExampleClient_ScheduleTransferRuns() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.ScheduleTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ScheduleTransferRuns(ctx, req)
|
||||
if err != nil {
|
||||
@ -195,7 +195,7 @@ func ExampleClient_GetTransferRun() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
@ -213,7 +213,7 @@ func ExampleClient_DeleteTransferRun() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
@ -229,7 +229,7 @@ func ExampleClient_ListTransferRuns() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferRuns(ctx, req)
|
||||
for {
|
||||
@ -253,7 +253,7 @@ func ExampleClient_ListTransferLogs() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferLogsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferLogs(ctx, req)
|
||||
for {
|
||||
@ -277,7 +277,7 @@ func ExampleClient_CheckValidCreds() {
|
||||
}
|
||||
|
||||
req := &datatransferpb.CheckValidCredsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CheckValidCreds(ctx, req)
|
||||
if err != nil {
|
||||
|
4
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
4
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/doc.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
62
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
62
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -259,7 +259,7 @@ func TestDataTransferServiceGetDataSource(t *testing.T) {
|
||||
var defaultSchedule string = "defaultSchedule-800168235"
|
||||
var supportsCustomSchedule bool = true
|
||||
var helpUrl string = "helpUrl-789431439"
|
||||
var defaultDataRefreshWindowDays int32 = -1804935157
|
||||
var defaultDataRefreshWindowDays int32 = 1804935157
|
||||
var manualRunsDisabled bool = true
|
||||
var expectedResponse = &datatransferpb.DataSource{
|
||||
Name: name2,
|
||||
@ -281,7 +281,7 @@ func TestDataTransferServiceGetDataSource(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var request = &datatransferpb.GetDataSourceRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -310,7 +310,7 @@ func TestDataTransferServiceGetDataSourceError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var request = &datatransferpb.GetDataSourceRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -343,7 +343,7 @@ func TestDataTransferServiceListDataSources(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var request = &datatransferpb.ListDataSourcesRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -382,7 +382,7 @@ func TestDataTransferServiceListDataSourcesError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var request = &datatransferpb.ListDataSourcesRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -409,7 +409,7 @@ func TestDataTransferServiceCreateTransferConfig(t *testing.T) {
|
||||
var schedule string = "schedule-697920873"
|
||||
var dataRefreshWindowDays int32 = 327632845
|
||||
var disabled bool = true
|
||||
var userId int64 = -147132913
|
||||
var userId int64 = 147132913
|
||||
var datasetRegion string = "datasetRegion959248539"
|
||||
var expectedResponse = &datatransferpb.TransferConfig{
|
||||
Name: name,
|
||||
@ -428,7 +428,7 @@ func TestDataTransferServiceCreateTransferConfig(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
||||
var request = &datatransferpb.CreateTransferConfigRequest{
|
||||
Parent: formattedParent,
|
||||
@ -459,7 +459,7 @@ func TestDataTransferServiceCreateTransferConfigError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var transferConfig *datatransferpb.TransferConfig = &datatransferpb.TransferConfig{}
|
||||
var request = &datatransferpb.CreateTransferConfigRequest{
|
||||
Parent: formattedParent,
|
||||
@ -488,7 +488,7 @@ func TestDataTransferServiceUpdateTransferConfig(t *testing.T) {
|
||||
var schedule string = "schedule-697920873"
|
||||
var dataRefreshWindowDays int32 = 327632845
|
||||
var disabled bool = true
|
||||
var userId int64 = -147132913
|
||||
var userId int64 = 147132913
|
||||
var datasetRegion string = "datasetRegion959248539"
|
||||
var expectedResponse = &datatransferpb.TransferConfig{
|
||||
Name: name,
|
||||
@ -567,7 +567,7 @@ func TestDataTransferServiceDeleteTransferConfig(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.DeleteTransferConfigRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -593,7 +593,7 @@ func TestDataTransferServiceDeleteTransferConfigError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.DeleteTransferConfigRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -619,7 +619,7 @@ func TestDataTransferServiceGetTransferConfig(t *testing.T) {
|
||||
var schedule string = "schedule-697920873"
|
||||
var dataRefreshWindowDays int32 = 327632845
|
||||
var disabled bool = true
|
||||
var userId int64 = -147132913
|
||||
var userId int64 = 147132913
|
||||
var datasetRegion string = "datasetRegion959248539"
|
||||
var expectedResponse = &datatransferpb.TransferConfig{
|
||||
Name: name2,
|
||||
@ -638,7 +638,7 @@ func TestDataTransferServiceGetTransferConfig(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.GetTransferConfigRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -667,7 +667,7 @@ func TestDataTransferServiceGetTransferConfigError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.GetTransferConfigRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -700,7 +700,7 @@ func TestDataTransferServiceListTransferConfigs(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var request = &datatransferpb.ListTransferConfigsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -739,7 +739,7 @@ func TestDataTransferServiceListTransferConfigsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationPath("[PROJECT]", "[LOCATION]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s", "[PROJECT]", "[LOCATION]")
|
||||
var request = &datatransferpb.ListTransferConfigsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -766,7 +766,7 @@ func TestDataTransferServiceScheduleTransferRuns(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
||||
@ -799,7 +799,7 @@ func TestDataTransferServiceScheduleTransferRunsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var startTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||
var endTime *timestamppb.Timestamp = ×tamppb.Timestamp{}
|
||||
var request = &datatransferpb.ScheduleTransferRunsRequest{
|
||||
@ -826,7 +826,7 @@ func TestDataTransferServiceGetTransferRun(t *testing.T) {
|
||||
var name2 string = "name2-1052831874"
|
||||
var destinationDatasetId string = "destinationDatasetId1541564179"
|
||||
var dataSourceId string = "dataSourceId-1015796374"
|
||||
var userId int64 = -147132913
|
||||
var userId int64 = 147132913
|
||||
var schedule string = "schedule-697920873"
|
||||
var expectedResponse = &datatransferpb.TransferRun{
|
||||
Name: name2,
|
||||
@ -841,7 +841,7 @@ func TestDataTransferServiceGetTransferRun(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.GetTransferRunRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -870,7 +870,7 @@ func TestDataTransferServiceGetTransferRunError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.GetTransferRunRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -897,7 +897,7 @@ func TestDataTransferServiceDeleteTransferRun(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.DeleteTransferRunRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -923,7 +923,7 @@ func TestDataTransferServiceDeleteTransferRunError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.DeleteTransferRunRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -955,7 +955,7 @@ func TestDataTransferServiceListTransferRuns(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.ListTransferRunsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -994,7 +994,7 @@ func TestDataTransferServiceListTransferRunsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationTransferConfigPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]")
|
||||
var request = &datatransferpb.ListTransferRunsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -1027,7 +1027,7 @@ func TestDataTransferServiceListTransferLogs(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedParent string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.ListTransferLogsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -1066,7 +1066,7 @@ func TestDataTransferServiceListTransferLogsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedParent string = LocationRunPath("[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var formattedParent string = fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", "[PROJECT]", "[LOCATION]", "[TRANSFER_CONFIG]", "[RUN]")
|
||||
var request = &datatransferpb.ListTransferLogsRequest{
|
||||
Parent: formattedParent,
|
||||
}
|
||||
@ -1096,7 +1096,7 @@ func TestDataTransferServiceCheckValidCreds(t *testing.T) {
|
||||
|
||||
mockDataTransfer.resps = append(mockDataTransfer.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var request = &datatransferpb.CheckValidCredsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -1125,7 +1125,7 @@ func TestDataTransferServiceCheckValidCredsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDataTransfer.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = LocationDataSourcePath("[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var formattedName string = fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", "[PROJECT]", "[LOCATION]", "[DATA_SOURCE]")
|
||||
var request = &datatransferpb.CheckValidCredsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
135
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
generated
vendored
Normal file
135
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/path_funcs.go
generated
vendored
Normal file
@ -0,0 +1,135 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datatransfer
|
||||
|
||||
// ProjectPath returns the path for the project resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s", project)
|
||||
// instead.
|
||||
func ProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationPath returns the path for the location resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s", project, location)
|
||||
// instead.
|
||||
func LocationPath(project, location string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationDataSourcePath returns the path for the location data source resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/dataSources/%s", project, location, dataSource)
|
||||
// instead.
|
||||
func LocationDataSourcePath(project, location, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationTransferConfigPath returns the path for the location transfer config resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s", project, location, transferConfig)
|
||||
// instead.
|
||||
func LocationTransferConfigPath(project, location, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationRunPath returns the path for the location run resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/locations/%s/transferConfigs/%s/runs/%s", project, location, transferConfig, run)
|
||||
// instead.
|
||||
func LocationRunPath(project, location, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// DataSourcePath returns the path for the data source resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/dataSources/%s", project, dataSource)
|
||||
// instead.
|
||||
func DataSourcePath(project, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// TransferConfigPath returns the path for the transfer config resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/transferConfigs/%s", project, transferConfig)
|
||||
// instead.
|
||||
func TransferConfigPath(project, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// RunPath returns the path for the run resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s/transferConfigs/%s/runs/%s", project, transferConfig, run)
|
||||
// instead.
|
||||
func RunPath(project, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
18
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
18
vendor/cloud.google.com/go/bigquery/doc.go
generated
vendored
@ -150,8 +150,9 @@ There are two ways to construct schemas with this package.
|
||||
You can build a schema by hand, like so:
|
||||
|
||||
schema1 := bigquery.Schema{
|
||||
&bigquery.FieldSchema{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
&bigquery.FieldSchema{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
{Name: "Name", Required: true, Type: bigquery.StringFieldType},
|
||||
{Name: "Grades", Repeated: true, Type: bigquery.IntegerFieldType},
|
||||
{Name: "Optional", Required: false, Type: bigquery.IntegerFieldType},
|
||||
}
|
||||
|
||||
Or you can infer the schema from a struct:
|
||||
@ -159,6 +160,7 @@ Or you can infer the schema from a struct:
|
||||
type student struct {
|
||||
Name string
|
||||
Grades []int
|
||||
Optional bigquery.NullInt64
|
||||
}
|
||||
schema2, err := bigquery.InferSchema(student{})
|
||||
if err != nil {
|
||||
@ -166,20 +168,24 @@ Or you can infer the schema from a struct:
|
||||
}
|
||||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package,
|
||||
so you can change names, ignore fields, or mark a field as nullable (non-required):
|
||||
Struct inference supports tags like those of the encoding/json package, so you can
|
||||
change names, ignore fields, or mark a field as nullable (non-required). Fields
|
||||
declared as on of the Null types (NullInt64, NullFloat64, NullString, NullBool,
|
||||
NullTimestamp, NullDate, NullTime and NullDateTime) are automatically inferred as
|
||||
nullable, so the "nullable" tag is only needed for []byte and pointer-to-struct
|
||||
fields.
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
Optional int `bigquery:",nullable"
|
||||
Optional []byte `bigquery:",nullable"
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has required fields "full_name", "Grade" and nullable field "Optional".
|
||||
// schema3 has required fields "full_name" and "Grade", and nullable BYTES field "Optional".
|
||||
|
||||
Having constructed a schema, you can create a table with it like so:
|
||||
|
||||
|
72
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
72
vendor/cloud.google.com/go/bigquery/examples_test.go
generated
vendored
@ -131,6 +131,23 @@ func ExampleClient_Query_parameters() {
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
// This example demonstrates how to run a query job on a table
|
||||
// with a customer-managed encryption key. The same
|
||||
// applies to load and copy jobs as well.
|
||||
func ExampleClient_Query_encryptionKey() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
q := client.Query("select name, num from t1")
|
||||
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||
q.DestinationEncryptionConfig = &bigquery.EncryptionConfig{KMSKeyName: keyName}
|
||||
// TODO: set other options on the Query.
|
||||
// TODO: Call Query.Run or Query.Read.
|
||||
}
|
||||
|
||||
func ExampleQuery_Read() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
@ -399,7 +416,8 @@ func ExampleInferSchema_tags() {
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
Optional bool `bigquery:",nullable"`
|
||||
Optional bigquery.NullBool
|
||||
OptBytes []byte `bigquery:",nullable"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
@ -414,6 +432,7 @@ func ExampleInferSchema_tags() {
|
||||
// Size FLOAT true
|
||||
// number INTEGER true
|
||||
// Optional BOOLEAN false
|
||||
// OptBytes BYTES false
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
@ -451,6 +470,33 @@ func ExampleTable_Create_initialize() {
|
||||
}
|
||||
}
|
||||
|
||||
// This example demonstrates how to create a table with
|
||||
// a customer-managed encryption key.
|
||||
func ExampleTable_Create_encryptionKey() {
|
||||
ctx := context.Background()
|
||||
// Infer table schema from a Go type.
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
t := client.Dataset("my_dataset").Table("new-table")
|
||||
|
||||
// TODO: Replace this key with a key you have created in Cloud KMS.
|
||||
keyName := "projects/P/locations/L/keyRings/R/cryptoKeys/K"
|
||||
if err := t.Create(ctx,
|
||||
&bigquery.TableMetadata{
|
||||
Name: "My New Table",
|
||||
Schema: schema,
|
||||
EncryptionConfig: &bigquery.EncryptionConfig{KMSKeyName: keyName},
|
||||
}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleTable_Delete() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
@ -757,3 +803,27 @@ func ExampleUploader_Put_struct() {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleUploader_Put_valuesSaver() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
u := client.Dataset("my_dataset").Table("my_table").Uploader()
|
||||
|
||||
var vss []*bigquery.ValuesSaver
|
||||
for i, name := range []string{"n1", "n2", "n3"} {
|
||||
// Assume schema holds the table's schema.
|
||||
vss = append(vss, &bigquery.ValuesSaver{
|
||||
Schema: schema,
|
||||
InsertID: name,
|
||||
Row: []bigquery.Value{name, int64(i)},
|
||||
})
|
||||
}
|
||||
|
||||
if err := u.Put(ctx, vss); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
250
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
250
vendor/cloud.google.com/go/bigquery/integration_test.go
generated
vendored
@ -600,7 +600,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
||||
}
|
||||
|
||||
// Test reading directly into a []Value.
|
||||
valueLists, err := readAll(table.Read(ctx))
|
||||
valueLists, schema, _, err := readAll(table.Read(ctx))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
@ -610,6 +610,9 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
||||
if err := it.Next(&got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(it.Schema, schema) {
|
||||
t.Fatalf("got schema %v, want %v", it.Schema, schema)
|
||||
}
|
||||
want := []Value(vl)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%d: got %v, want %v", i, got, want)
|
||||
@ -671,6 +674,10 @@ type TestStruct struct {
|
||||
RecordArray []SubTestStruct
|
||||
}
|
||||
|
||||
// Round times to the microsecond for comparison purposes.
|
||||
var roundToMicros = cmp.Transformer("RoundToMicros",
|
||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||
|
||||
func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
@ -684,14 +691,14 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 4, 5, 6000}
|
||||
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||
tm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
|
||||
dtm := civil.DateTime{d, tm}
|
||||
d2 := civil.Date{1994, 5, 15}
|
||||
tm2 := civil.Time{1, 2, 4, 0}
|
||||
dtm := civil.DateTime{Date: d, Time: tm}
|
||||
d2 := civil.Date{Year: 1994, Month: 5, Day: 15}
|
||||
tm2 := civil.Time{Hour: 1, Minute: 2, Second: 4, Nanosecond: 0}
|
||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||
dtm2 := civil.DateTime{d2, tm2}
|
||||
dtm2 := civil.DateTime{Date: d2, Time: tm2}
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
@ -770,9 +777,6 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
||||
}
|
||||
sort.Sort(byName(got))
|
||||
|
||||
// Round times to the microsecond.
|
||||
roundToMicros := cmp.Transformer("RoundToMicros",
|
||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||
// BigQuery does not elide nils. It reports an error for nil fields.
|
||||
for i, g := range got {
|
||||
if i >= len(want) {
|
||||
@ -789,6 +793,69 @@ func (b byName) Len() int { return len(b) }
|
||||
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
|
||||
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
|
||||
|
||||
func TestIntegration_UploadAndReadNullable(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctm := civil.Time{Hour: 15, Minute: 4, Second: 5, Nanosecond: 6000}
|
||||
cdt := civil.DateTime{Date: testDate, Time: ctm}
|
||||
testUploadAndReadNullable(t, testStructNullable{}, make([]Value, len(testStructNullableSchema)))
|
||||
testUploadAndReadNullable(t, testStructNullable{
|
||||
String: NullString{"x", true},
|
||||
Bytes: []byte{1, 2, 3},
|
||||
Integer: NullInt64{1, true},
|
||||
Float: NullFloat64{2.3, true},
|
||||
Boolean: NullBool{true, true},
|
||||
Timestamp: NullTimestamp{testTimestamp, true},
|
||||
Date: NullDate{testDate, true},
|
||||
Time: NullTime{ctm, true},
|
||||
DateTime: NullDateTime{cdt, true},
|
||||
Record: &subNullable{X: NullInt64{4, true}},
|
||||
},
|
||||
[]Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, ctm, cdt, []Value{int64(4)}})
|
||||
}
|
||||
|
||||
func testUploadAndReadNullable(t *testing.T, ts testStructNullable, wantRow []Value) {
|
||||
ctx := context.Background()
|
||||
table := newTable(t, testStructNullableSchema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Populate the table.
|
||||
upl := table.Uploader()
|
||||
if err := upl.Put(ctx, []*StructSaver{{Schema: testStructNullableSchema, Struct: ts}}); err != nil {
|
||||
t.Fatal(putError(err))
|
||||
}
|
||||
// Wait until the data has been uploaded. This can take a few seconds, according
|
||||
// to https://cloud.google.com/bigquery/streaming-data-into-bigquery.
|
||||
if err := waitForRow(ctx, table); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
// Read into a []Value.
|
||||
iter := table.Read(ctx)
|
||||
gotRows, _, _, err := readAll(iter)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if len(gotRows) != 1 {
|
||||
t.Fatalf("got %d rows, want 1", len(gotRows))
|
||||
}
|
||||
if diff := testutil.Diff(gotRows[0], wantRow, roundToMicros); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
// Read into a struct.
|
||||
want := ts
|
||||
var sn testStructNullable
|
||||
it := table.Read(ctx)
|
||||
if err := it.Next(&sn); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(sn, want, roundToMicros); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TableUpdate(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
@ -954,7 +1021,7 @@ func TestIntegration_Load(t *testing.T) {
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "reader load", table.Read(ctx), wantRows)
|
||||
checkReadAndTotalRows(t, "reader load", table.Read(ctx), wantRows)
|
||||
|
||||
}
|
||||
|
||||
@ -1018,9 +1085,9 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
||||
table := newTable(t, dtSchema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{12, 30, 0, 6000}
|
||||
dtm := civil.DateTime{d, tm}
|
||||
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||
tm := civil.Time{Hour: 12, Minute: 30, Second: 0, Nanosecond: 6000}
|
||||
dtm := civil.DateTime{Date: d, Time: tm}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
wantRows := [][]Value{
|
||||
[]Value{d, tm, dtm, ts},
|
||||
@ -1054,8 +1121,8 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 04, 05, 0}
|
||||
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||
tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
dtm := ts.Format("2006-01-02 15:04:05")
|
||||
|
||||
@ -1080,7 +1147,7 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
||||
{fmt.Sprintf("SELECT TIMESTAMP '%s'", dtm), []Value{ts}},
|
||||
{fmt.Sprintf("SELECT [TIMESTAMP '%s', TIMESTAMP '%s']", dtm, dtm), []Value{[]Value{ts, ts}}},
|
||||
{fmt.Sprintf("SELECT ('hello', TIMESTAMP '%s')", dtm), []Value{[]Value{"hello", ts}}},
|
||||
{fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{d, tm}}},
|
||||
{fmt.Sprintf("SELECT DATETIME(TIMESTAMP '%s')", dtm), []Value{civil.DateTime{Date: d, Time: tm}}},
|
||||
{fmt.Sprintf("SELECT DATE(TIMESTAMP '%s')", dtm), []Value{d}},
|
||||
{fmt.Sprintf("SELECT TIME(TIMESTAMP '%s')", dtm), []Value{tm}},
|
||||
{"SELECT (1, 2)", []Value{ints(1, 2)}},
|
||||
@ -1139,9 +1206,11 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||
}
|
||||
ctx := context.Background()
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 04, 05, 0}
|
||||
dtm := civil.DateTime{d, tm}
|
||||
d := civil.Date{Year: 2016, Month: 3, Day: 20}
|
||||
tm := civil.Time{Hour: 15, Minute: 04, Second: 05, Nanosecond: 3008}
|
||||
rtm := tm
|
||||
rtm.Nanosecond = 3000 // round to microseconds
|
||||
dtm := civil.DateTime{Date: d, Time: tm}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
|
||||
type ss struct {
|
||||
@ -1159,20 +1228,93 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||
query string
|
||||
parameters []QueryParameter
|
||||
wantRow []Value
|
||||
wantConfig interface{}
|
||||
}{
|
||||
{"SELECT @val", []QueryParameter{{"val", 1}}, []Value{int64(1)}},
|
||||
{"SELECT @val", []QueryParameter{{"val", 1.3}}, []Value{1.3}},
|
||||
{"SELECT @val", []QueryParameter{{"val", true}}, []Value{true}},
|
||||
{"SELECT @val", []QueryParameter{{"val", "ABC"}}, []Value{"ABC"}},
|
||||
{"SELECT @val", []QueryParameter{{"val", []byte("foo")}}, []Value{[]byte("foo")}},
|
||||
{"SELECT @val", []QueryParameter{{"val", ts}}, []Value{ts}},
|
||||
{"SELECT @val", []QueryParameter{{"val", []time.Time{ts, ts}}}, []Value{[]Value{ts, ts}}},
|
||||
{"SELECT @val", []QueryParameter{{"val", dtm}}, []Value{dtm}},
|
||||
{"SELECT @val", []QueryParameter{{"val", d}}, []Value{d}},
|
||||
{"SELECT @val", []QueryParameter{{"val", tm}}, []Value{tm}},
|
||||
{"SELECT @val", []QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
|
||||
[]Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}}},
|
||||
{"SELECT @val.Timestamp, @val.SubStruct.String", []QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}}, []Value{ts, "a"}},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", 1}},
|
||||
[]Value{int64(1)},
|
||||
int64(1),
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", 1.3}},
|
||||
[]Value{1.3},
|
||||
1.3,
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", true}},
|
||||
[]Value{true},
|
||||
true,
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", "ABC"}},
|
||||
[]Value{"ABC"},
|
||||
"ABC",
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", []byte("foo")}},
|
||||
[]Value{[]byte("foo")},
|
||||
[]byte("foo"),
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", ts}},
|
||||
[]Value{ts},
|
||||
ts,
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", []time.Time{ts, ts}}},
|
||||
[]Value{[]Value{ts, ts}},
|
||||
[]interface{}{ts, ts},
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", dtm}},
|
||||
[]Value{civil.DateTime{Date: d, Time: rtm}},
|
||||
civil.DateTime{Date: d, Time: rtm},
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", d}},
|
||||
[]Value{d},
|
||||
d,
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", tm}},
|
||||
[]Value{rtm},
|
||||
rtm,
|
||||
},
|
||||
{
|
||||
"SELECT @val",
|
||||
[]QueryParameter{{"val", s{ts, []string{"a", "b"}, ss{"c"}, []ss{{"d"}, {"e"}}}}},
|
||||
[]Value{[]Value{ts, []Value{"a", "b"}, []Value{"c"}, []Value{[]Value{"d"}, []Value{"e"}}}},
|
||||
map[string]interface{}{
|
||||
"Timestamp": ts,
|
||||
"StringArray": []interface{}{"a", "b"},
|
||||
"SubStruct": map[string]interface{}{"String": "c"},
|
||||
"SubStructArray": []interface{}{
|
||||
map[string]interface{}{"String": "d"},
|
||||
map[string]interface{}{"String": "e"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
"SELECT @val.Timestamp, @val.SubStruct.String",
|
||||
[]QueryParameter{{"val", s{Timestamp: ts, SubStruct: ss{"a"}}}},
|
||||
[]Value{ts, "a"},
|
||||
map[string]interface{}{
|
||||
"Timestamp": ts,
|
||||
"SubStruct": map[string]interface{}{"String": "a"},
|
||||
"StringArray": nil,
|
||||
"SubStructArray": nil,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
@ -1189,6 +1331,15 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "QueryParameters", it, [][]Value{c.wantRow})
|
||||
config, err := job.Config()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := config.(*QueryConfig).Parameters[0].Value
|
||||
if !testutil.Equal(got, c.wantConfig) {
|
||||
t.Errorf("param %[1]v (%[1]T): config:\ngot %[2]v (%[2]T)\nwant %[3]v (%[3]T)",
|
||||
c.parameters[0].Value, got, c.wantConfig)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1284,7 +1435,7 @@ func TestIntegration_ExtractExternal(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external query", iter, wantRows)
|
||||
checkReadAndTotalRows(t, "external query", iter, wantRows)
|
||||
|
||||
// Make a table pointing to the file, and query it.
|
||||
// BigQuery does not allow a Table.Read on an external table.
|
||||
@ -1302,7 +1453,7 @@ func TestIntegration_ExtractExternal(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external table", iter, wantRows)
|
||||
checkReadAndTotalRows(t, "external table", iter, wantRows)
|
||||
|
||||
// While we're here, check that the table metadata is correct.
|
||||
md, err := table.Metadata(ctx)
|
||||
@ -1466,19 +1617,28 @@ func newTable(t *testing.T, s Schema) *Table {
|
||||
}
|
||||
|
||||
func checkRead(t *testing.T, msg string, it *RowIterator, want [][]Value) {
|
||||
if msg2, ok := compareRead(it, want); !ok {
|
||||
if msg2, ok := compareRead(it, want, false); !ok {
|
||||
t.Errorf("%s: %s", msg, msg2)
|
||||
}
|
||||
}
|
||||
|
||||
func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
|
||||
got, err := readAll(it)
|
||||
func checkReadAndTotalRows(t *testing.T, msg string, it *RowIterator, want [][]Value) {
|
||||
if msg2, ok := compareRead(it, want, true); !ok {
|
||||
t.Errorf("%s: %s", msg, msg2)
|
||||
}
|
||||
}
|
||||
|
||||
func compareRead(it *RowIterator, want [][]Value, compareTotalRows bool) (msg string, ok bool) {
|
||||
got, _, totalRows, err := readAll(it)
|
||||
if err != nil {
|
||||
return err.Error(), false
|
||||
}
|
||||
if len(got) != len(want) {
|
||||
return fmt.Sprintf("got %d rows, want %d", len(got), len(want)), false
|
||||
}
|
||||
if compareTotalRows && len(got) != int(totalRows) {
|
||||
return fmt.Sprintf("got %d rows, but totalRows = %d", len(got), totalRows), false
|
||||
}
|
||||
sort.Sort(byCol0(got))
|
||||
for i, r := range got {
|
||||
gotRow := []Value(r)
|
||||
@ -1490,18 +1650,24 @@ func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
|
||||
return "", true
|
||||
}
|
||||
|
||||
func readAll(it *RowIterator) ([][]Value, error) {
|
||||
var rows [][]Value
|
||||
func readAll(it *RowIterator) ([][]Value, Schema, uint64, error) {
|
||||
var (
|
||||
rows [][]Value
|
||||
schema Schema
|
||||
totalRows uint64
|
||||
)
|
||||
for {
|
||||
var vals []Value
|
||||
err := it.Next(&vals)
|
||||
if err == iterator.Done {
|
||||
return rows, nil
|
||||
return rows, schema, totalRows, nil
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, 0, err
|
||||
}
|
||||
rows = append(rows, vals)
|
||||
schema = it.Schema
|
||||
totalRows = it.TotalRows
|
||||
}
|
||||
}
|
||||
|
||||
|
25
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
25
vendor/cloud.google.com/go/bigquery/iterator.go
generated
vendored
@ -48,9 +48,14 @@ type RowIterator struct {
|
||||
// is also set, StartIndex is ignored.
|
||||
StartIndex uint64
|
||||
|
||||
rows [][]Value
|
||||
// The schema of the table. Available after the first call to Next.
|
||||
Schema Schema
|
||||
|
||||
schema Schema // populated on first call to fetch
|
||||
// The total number of rows in the result. Available after the first call to Next.
|
||||
// May be zero just after rows were inserted.
|
||||
TotalRows uint64
|
||||
|
||||
rows [][]Value
|
||||
structLoader structLoader // used to populate a pointer to a struct
|
||||
}
|
||||
|
||||
@ -88,8 +93,11 @@ type RowIterator struct {
|
||||
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
|
||||
// All calls to Next on the same iterator must use the same struct type.
|
||||
//
|
||||
// It is an error to attempt to read a BigQuery NULL value into a struct field.
|
||||
// If your table contains NULLs, use a *[]Value or *map[string]Value.
|
||||
// It is an error to attempt to read a BigQuery NULL value into a struct field,
|
||||
// unless the field is of type []byte or is one of the special Null types: NullInt64,
|
||||
// NullFloat64, NullBool, NullString, NullTimestamp, NullDate, NullTime or
|
||||
// NullDateTime. You can also use a *[]Value or *map[string]Value to read from a
|
||||
// table with NULLs.
|
||||
func (it *RowIterator) Next(dst interface{}) error {
|
||||
var vl ValueLoader
|
||||
switch dst := dst.(type) {
|
||||
@ -113,12 +121,12 @@ func (it *RowIterator) Next(dst interface{}) error {
|
||||
if vl == nil {
|
||||
// This can only happen if dst is a pointer to a struct. We couldn't
|
||||
// set vl above because we need the schema.
|
||||
if err := it.structLoader.set(dst, it.schema); err != nil {
|
||||
if err := it.structLoader.set(dst, it.Schema); err != nil {
|
||||
return err
|
||||
}
|
||||
vl = &it.structLoader
|
||||
}
|
||||
return vl.Load(row, it.schema)
|
||||
return vl.Load(row, it.Schema)
|
||||
}
|
||||
|
||||
func isStructPtr(x interface{}) bool {
|
||||
@ -130,12 +138,13 @@ func isStructPtr(x interface{}) bool {
|
||||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
|
||||
res, err := it.pf(it.ctx, it.table, it.Schema, it.StartIndex, int64(pageSize), pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.rows = append(it.rows, res.rows...)
|
||||
it.schema = res.schema
|
||||
it.Schema = res.schema
|
||||
it.TotalRows = res.totalRows
|
||||
return res.pageToken, nil
|
||||
}
|
||||
|
||||
|
52
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
52
vendor/cloud.google.com/go/bigquery/iterator_test.go
generated
vendored
@ -64,6 +64,7 @@ func TestIterator(t *testing.T) {
|
||||
want [][]Value
|
||||
wantErr error
|
||||
wantSchema Schema
|
||||
wantTotalRows uint64
|
||||
}{
|
||||
{
|
||||
desc: "Iteration over single empty page",
|
||||
@ -87,11 +88,13 @@ func TestIterator(t *testing.T) {
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
want: [][]Value{{1, 2}, {11, 12}},
|
||||
wantSchema: iiSchema,
|
||||
wantTotalRows: 4,
|
||||
},
|
||||
{
|
||||
desc: "Iteration over single page with different schema",
|
||||
@ -115,6 +118,7 @@ func TestIterator(t *testing.T) {
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
@ -122,11 +126,13 @@ func TestIterator(t *testing.T) {
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
totalRows: 4,
|
||||
},
|
||||
},
|
||||
},
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
want: [][]Value{{1, 2}, {11, 12}, {101, 102}, {111, 112}},
|
||||
wantSchema: iiSchema,
|
||||
wantTotalRows: 4,
|
||||
},
|
||||
{
|
||||
desc: "Server response includes empty page",
|
||||
@ -240,7 +246,7 @@ func TestIterator(t *testing.T) {
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
values, schema, totalRows, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
|
||||
}
|
||||
@ -250,35 +256,31 @@ func TestIterator(t *testing.T) {
|
||||
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
|
||||
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
|
||||
}
|
||||
if totalRows != tc.wantTotalRows {
|
||||
t.Errorf("%s: totalRows: got %d, want %d", tc.desc, totalRows, tc.wantTotalRows)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type valueListWithSchema struct {
|
||||
vals valueList
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (v *valueListWithSchema) Load(vs []Value, s Schema) error {
|
||||
v.vals.Load(vs, s)
|
||||
v.schema = s
|
||||
return nil
|
||||
}
|
||||
|
||||
// consumeRowIterator reads the schema and all values from a RowIterator and returns them.
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, error) {
|
||||
var got [][]Value
|
||||
var schema Schema
|
||||
func consumeRowIterator(it *RowIterator) ([][]Value, Schema, uint64, error) {
|
||||
var (
|
||||
got [][]Value
|
||||
schema Schema
|
||||
totalRows uint64
|
||||
)
|
||||
for {
|
||||
var vls valueListWithSchema
|
||||
var vls []Value
|
||||
err := it.Next(&vls)
|
||||
if err == iterator.Done {
|
||||
return got, schema, nil
|
||||
return got, schema, totalRows, nil
|
||||
}
|
||||
if err != nil {
|
||||
return got, schema, err
|
||||
return got, schema, totalRows, err
|
||||
}
|
||||
got = append(got, vls.vals)
|
||||
schema = vls.schema
|
||||
got = append(got, vls)
|
||||
schema = it.Schema
|
||||
totalRows = it.TotalRows
|
||||
}
|
||||
}
|
||||
|
||||
@ -333,7 +335,7 @@ func TestNextAfterFinished(t *testing.T) {
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
values, _, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
11
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
11
vendor/cloud.google.com/go/bigquery/job.go
generated
vendored
@ -261,17 +261,20 @@ func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, strin
|
||||
}
|
||||
destTable := j.config.Query.DestinationTable
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if destTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
projectID := j.projectID
|
||||
if destTable != nil && projectID != destTable.ProjectId {
|
||||
return nil, fmt.Errorf("bigquery: job project ID is %q, but destination table's is %q", projectID, destTable.ProjectId)
|
||||
}
|
||||
projectID := destTable.ProjectId
|
||||
schema, err := waitForQuery(ctx, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if destTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
dt := bqToTable(destTable, j.c)
|
||||
it := newRowIterator(ctx, dt, pf)
|
||||
it.schema = schema
|
||||
it.Schema = schema
|
||||
return it, nil
|
||||
}
|
||||
|
||||
|
23
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
23
vendor/cloud.google.com/go/bigquery/load.go
generated
vendored
@ -42,16 +42,20 @@ type LoadConfig struct {
|
||||
|
||||
// If non-nil, the destination table is partitioned by time.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
}
|
||||
|
||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||
config := &bq.JobConfiguration{
|
||||
Labels: l.Labels,
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
DestinationTable: l.Dst.toBQ(),
|
||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
DestinationTable: l.Dst.toBQ(),
|
||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||
DestinationEncryptionConfiguration: l.DestinationEncryptionConfig.toBQ(),
|
||||
},
|
||||
}
|
||||
media := l.Src.populateLoadConfig(config.Load)
|
||||
@ -60,11 +64,12 @@ func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||
|
||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
||||
lc := &LoadConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||
DestinationEncryptionConfig: bqToEncryptionConfig(q.Load.DestinationEncryptionConfiguration),
|
||||
}
|
||||
var fc *FileConfig
|
||||
if len(q.Load.SourceUris) == 0 {
|
||||
|
10
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
10
vendor/cloud.google.com/go/bigquery/load_test.go
generated
vendored
@ -89,10 +89,11 @@ func TestLoad(t *testing.T) {
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobID: "ajob",
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||
DestinationEncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
@ -104,6 +105,7 @@ func TestLoad(t *testing.T) {
|
||||
Type: "DAY",
|
||||
ExpirationMs: 1234,
|
||||
}
|
||||
j.Configuration.Load.DestinationEncryptionConfiguration = &bq.EncryptionConfiguration{KmsKeyName: "keyName"}
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "client-project-id",
|
||||
|
299
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
Normal file
299
vendor/cloud.google.com/go/bigquery/nulls.go
generated
vendored
Normal file
@ -0,0 +1,299 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
)
|
||||
|
||||
// NullInt64 represents a BigQuery INT64 that may be NULL.
|
||||
type NullInt64 struct {
|
||||
Int64 int64
|
||||
Valid bool // Valid is true if Int64 is not NULL.
|
||||
}
|
||||
|
||||
func (n NullInt64) String() string { return nullstr(n.Valid, n.Int64) }
|
||||
|
||||
// NullString represents a BigQuery STRING that may be NULL.
|
||||
type NullString struct {
|
||||
StringVal string
|
||||
Valid bool // Valid is true if StringVal is not NULL.
|
||||
}
|
||||
|
||||
func (n NullString) String() string { return nullstr(n.Valid, n.StringVal) }
|
||||
|
||||
// NullFloat64 represents a BigQuery FLOAT64 that may be NULL.
|
||||
type NullFloat64 struct {
|
||||
Float64 float64
|
||||
Valid bool // Valid is true if Float64 is not NULL.
|
||||
}
|
||||
|
||||
func (n NullFloat64) String() string { return nullstr(n.Valid, n.Float64) }
|
||||
|
||||
// NullBool represents a BigQuery BOOL that may be NULL.
|
||||
type NullBool struct {
|
||||
Bool bool
|
||||
Valid bool // Valid is true if Bool is not NULL.
|
||||
}
|
||||
|
||||
func (n NullBool) String() string { return nullstr(n.Valid, n.Bool) }
|
||||
|
||||
// NullTimestamp represents a BigQuery TIMESTAMP that may be null.
|
||||
type NullTimestamp struct {
|
||||
Timestamp time.Time
|
||||
Valid bool // Valid is true if Time is not NULL.
|
||||
}
|
||||
|
||||
func (n NullTimestamp) String() string { return nullstr(n.Valid, n.Timestamp) }
|
||||
|
||||
// NullDate represents a BigQuery DATE that may be null.
|
||||
type NullDate struct {
|
||||
Date civil.Date
|
||||
Valid bool // Valid is true if Date is not NULL.
|
||||
}
|
||||
|
||||
func (n NullDate) String() string { return nullstr(n.Valid, n.Date) }
|
||||
|
||||
// NullTime represents a BigQuery TIME that may be null.
|
||||
type NullTime struct {
|
||||
Time civil.Time
|
||||
Valid bool // Valid is true if Time is not NULL.
|
||||
}
|
||||
|
||||
func (n NullTime) String() string {
|
||||
if !n.Valid {
|
||||
return "<null>"
|
||||
}
|
||||
return CivilTimeString(n.Time)
|
||||
}
|
||||
|
||||
// NullDateTime represents a BigQuery DATETIME that may be null.
|
||||
type NullDateTime struct {
|
||||
DateTime civil.DateTime
|
||||
Valid bool // Valid is true if DateTime is not NULL.
|
||||
}
|
||||
|
||||
func (n NullDateTime) String() string {
|
||||
if !n.Valid {
|
||||
return "<null>"
|
||||
}
|
||||
return CivilDateTimeString(n.DateTime)
|
||||
}
|
||||
|
||||
func (n NullInt64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Int64) }
|
||||
func (n NullFloat64) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Float64) }
|
||||
func (n NullBool) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Bool) }
|
||||
func (n NullString) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.StringVal) }
|
||||
func (n NullTimestamp) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Timestamp) }
|
||||
func (n NullDate) MarshalJSON() ([]byte, error) { return nulljson(n.Valid, n.Date) }
|
||||
|
||||
func (n NullTime) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return []byte(`"` + CivilTimeString(n.Time) + `"`), nil
|
||||
}
|
||||
|
||||
func (n NullDateTime) MarshalJSON() ([]byte, error) {
|
||||
if !n.Valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return []byte(`"` + CivilDateTimeString(n.DateTime) + `"`), nil
|
||||
}
|
||||
|
||||
func nullstr(valid bool, v interface{}) string {
|
||||
if !valid {
|
||||
return "NULL"
|
||||
}
|
||||
return fmt.Sprint(v)
|
||||
}
|
||||
|
||||
var jsonNull = []byte("null")
|
||||
|
||||
func nulljson(valid bool, v interface{}) ([]byte, error) {
|
||||
if !valid {
|
||||
return jsonNull, nil
|
||||
}
|
||||
return json.Marshal(v)
|
||||
}
|
||||
|
||||
func (n *NullInt64) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Int64 = 0
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Int64); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullFloat64) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Float64 = 0
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Float64); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullBool) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Bool = false
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Bool); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullString) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.StringVal = ""
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.StringVal); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullTimestamp) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Timestamp = time.Time{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Timestamp); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullDate) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Date = civil.Date{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := json.Unmarshal(b, &n.Date); err != nil {
|
||||
return err
|
||||
}
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullTime) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.Time = civil.Time{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
t, err := civil.ParseTime(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.Time = t
|
||||
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NullDateTime) UnmarshalJSON(b []byte) error {
|
||||
n.Valid = false
|
||||
n.DateTime = civil.DateTime{}
|
||||
if bytes.Equal(b, jsonNull) {
|
||||
return nil
|
||||
}
|
||||
|
||||
s, err := strconv.Unquote(string(b))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dt, err := parseCivilDateTime(s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
n.DateTime = dt
|
||||
|
||||
n.Valid = true
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
typeOfNullInt64 = reflect.TypeOf(NullInt64{})
|
||||
typeOfNullFloat64 = reflect.TypeOf(NullFloat64{})
|
||||
typeOfNullBool = reflect.TypeOf(NullBool{})
|
||||
typeOfNullString = reflect.TypeOf(NullString{})
|
||||
typeOfNullTimestamp = reflect.TypeOf(NullTimestamp{})
|
||||
typeOfNullDate = reflect.TypeOf(NullDate{})
|
||||
typeOfNullTime = reflect.TypeOf(NullTime{})
|
||||
typeOfNullDateTime = reflect.TypeOf(NullDateTime{})
|
||||
)
|
||||
|
||||
func nullableFieldType(t reflect.Type) FieldType {
|
||||
switch t {
|
||||
case typeOfNullInt64:
|
||||
return IntegerFieldType
|
||||
case typeOfNullFloat64:
|
||||
return FloatFieldType
|
||||
case typeOfNullBool:
|
||||
return BooleanFieldType
|
||||
case typeOfNullString:
|
||||
return StringFieldType
|
||||
case typeOfNullTimestamp:
|
||||
return TimestampFieldType
|
||||
case typeOfNullDate:
|
||||
return DateFieldType
|
||||
case typeOfNullTime:
|
||||
return TimeFieldType
|
||||
case typeOfNullDateTime:
|
||||
return DateTimeFieldType
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
}
|
73
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
Normal file
73
vendor/cloud.google.com/go/bigquery/nulls_test.go
generated
vendored
Normal file
@ -0,0 +1,73 @@
|
||||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
)
|
||||
|
||||
var (
|
||||
nullsTestTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 1000}
|
||||
nullsTestDateTime = civil.DateTime{Date: civil.Date{Year: 2016, Month: 11, Day: 5}, Time: nullsTestTime}
|
||||
)
|
||||
|
||||
func TestNullsJSON(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in interface{}
|
||||
want string
|
||||
}{
|
||||
{&NullInt64{Valid: true, Int64: 3}, `3`},
|
||||
{&NullFloat64{Valid: true, Float64: 3.14}, `3.14`},
|
||||
{&NullBool{Valid: true, Bool: true}, `true`},
|
||||
{&NullString{Valid: true, StringVal: "foo"}, `"foo"`},
|
||||
{&NullTimestamp{Valid: true, Timestamp: testTimestamp}, `"2016-11-05T07:50:22.000000008Z"`},
|
||||
{&NullDate{Valid: true, Date: testDate}, `"2016-11-05"`},
|
||||
{&NullTime{Valid: true, Time: nullsTestTime}, `"07:50:22.000001"`},
|
||||
{&NullDateTime{Valid: true, DateTime: nullsTestDateTime}, `"2016-11-05 07:50:22.000001"`},
|
||||
|
||||
{&NullInt64{}, `null`},
|
||||
{&NullFloat64{}, `null`},
|
||||
{&NullBool{}, `null`},
|
||||
{&NullString{}, `null`},
|
||||
{&NullTimestamp{}, `null`},
|
||||
{&NullDate{}, `null`},
|
||||
{&NullTime{}, `null`},
|
||||
{&NullDateTime{}, `null`},
|
||||
} {
|
||||
bytes, err := json.Marshal(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := string(bytes), test.want; got != want {
|
||||
t.Errorf("%#v: got %s, want %s", test.in, got, want)
|
||||
}
|
||||
|
||||
typ := reflect.Indirect(reflect.ValueOf(test.in)).Type()
|
||||
value := reflect.New(typ).Interface()
|
||||
err = json.Unmarshal(bytes, value)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if !testutil.Equal(value, test.in) {
|
||||
t.Errorf("%#v: got %#v, want %#v", test.in, value, test.in)
|
||||
}
|
||||
}
|
||||
}
|
9
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
9
vendor/cloud.google.com/go/bigquery/params.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
@ -205,6 +204,8 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
||||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
// (If we send nanoseconds, then when we try to read the result we get "query job
|
||||
// missing destination table").
|
||||
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
@ -306,11 +307,7 @@ func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterTyp
|
||||
case "TIMESTAMP":
|
||||
return time.Parse(timestampFormat, qval.Value)
|
||||
case "DATETIME":
|
||||
parts := strings.Fields(qval.Value)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
|
||||
}
|
||||
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
||||
return parseCivilDateTime(qval.Value)
|
||||
default:
|
||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
||||
}
|
||||
|
6
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/params_test.go
generated
vendored
@ -45,9 +45,9 @@ var scalarTests = []struct {
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02",
|
||||
timestampParamType},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20", dateParamType},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}},
|
||||
{civil.Date{Year: 2016, Month: 3, Day: 20}, "2016-03-20", dateParamType},
|
||||
{civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}, "04:05:06.789000", timeParamType},
|
||||
{civil.DateTime{Date: civil.Date{Year: 2016, Month: 3, Day: 20}, Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 789000000}},
|
||||
"2016-03-20 04:05:06.789000",
|
||||
dateTimeParamType},
|
||||
}
|
||||
|
22
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
22
vendor/cloud.google.com/go/bigquery/query.go
generated
vendored
@ -100,6 +100,10 @@ type QueryConfig struct {
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
|
||||
// TimePartitioning specifies time-based partitioning
|
||||
// for the destination table.
|
||||
TimePartitioning *TimePartitioning
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
@ -111,16 +115,21 @@ type QueryConfig struct {
|
||||
// call LastStatus on the returned job to get statistics. Calling Status on a
|
||||
// dry-run job will fail.
|
||||
DryRun bool
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
DestinationEncryptionConfig *EncryptionConfig
|
||||
}
|
||||
|
||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
qconf := &bq.JobConfigurationQuery{
|
||||
Query: qc.Q,
|
||||
CreateDisposition: string(qc.CreateDisposition),
|
||||
WriteDisposition: string(qc.WriteDisposition),
|
||||
AllowLargeResults: qc.AllowLargeResults,
|
||||
Priority: string(qc.Priority),
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
Query: qc.Q,
|
||||
CreateDisposition: string(qc.CreateDisposition),
|
||||
WriteDisposition: string(qc.WriteDisposition),
|
||||
AllowLargeResults: qc.AllowLargeResults,
|
||||
Priority: string(qc.Priority),
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
TimePartitioning: qc.TimePartitioning.toBQ(),
|
||||
DestinationEncryptionConfiguration: qc.DestinationEncryptionConfig.toBQ(),
|
||||
}
|
||||
if len(qc.TableDefinitions) > 0 {
|
||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
@ -188,6 +197,7 @@ func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql,
|
||||
UseStandardSQL: !qq.UseLegacySql,
|
||||
TimePartitioning: bqToTimePartitioning(qq.TimePartitioning),
|
||||
}
|
||||
if len(qq.TableDefinitions) > 0 {
|
||||
qc.TableDefinitions = make(map[string]ExternalData)
|
||||
|
9
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
9
vendor/cloud.google.com/go/bigquery/query_test.go
generated
vendored
@ -16,6 +16,7 @@ package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
@ -350,6 +351,8 @@ func TestConfiguringQuery(t *testing.T) {
|
||||
query.JobID = "ajob"
|
||||
query.DefaultProjectID = "def-project-id"
|
||||
query.DefaultDatasetID = "def-dataset-id"
|
||||
query.TimePartitioning = &TimePartitioning{Expiration: 1234 * time.Second, Field: "f"}
|
||||
query.DestinationEncryptionConfig = &EncryptionConfig{KMSKeyName: "keyName"}
|
||||
// Note: Other configuration fields are tested in other tests above.
|
||||
// A lot of that can be consolidated once Client.Copy is gone.
|
||||
|
||||
@ -361,8 +364,10 @@ func TestConfiguringQuery(t *testing.T) {
|
||||
ProjectId: "def-project-id",
|
||||
DatasetId: "def-dataset-id",
|
||||
},
|
||||
UseLegacySql: false,
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
UseLegacySql: false,
|
||||
TimePartitioning: &bq.TimePartitioning{ExpirationMs: 1234000, Field: "f", Type: "DAY"},
|
||||
DestinationEncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
},
|
||||
},
|
||||
JobReference: &bq.JobReference{
|
||||
|
108
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
108
vendor/cloud.google.com/go/bigquery/schema.go
generated
vendored
@ -122,21 +122,66 @@ var (
|
||||
errNoStruct = errors.New("bigquery: can only infer schema from struct or pointer to struct")
|
||||
errUnsupportedFieldType = errors.New("bigquery: unsupported type of field in struct")
|
||||
errInvalidFieldName = errors.New("bigquery: invalid name of field in struct")
|
||||
errBadNullable = errors.New(`bigquery: use "nullable" only for []byte and struct pointers; for all other types, use a NullXXX type`)
|
||||
)
|
||||
|
||||
var typeOfByteSlice = reflect.TypeOf([]byte{})
|
||||
|
||||
// InferSchema tries to derive a BigQuery schema from the supplied struct value.
|
||||
// NOTE: All fields in the returned Schema are configured to be required,
|
||||
// unless the corresponding field in the supplied struct is a slice or array.
|
||||
// Each exported struct field is mapped to a field in the schema.
|
||||
//
|
||||
// It is considered an error if the struct (including nested structs) contains
|
||||
// any exported fields that are pointers or one of the following types:
|
||||
// uint, uint64, uintptr, map, interface, complex64, complex128, func, chan.
|
||||
// In these cases, an error will be returned.
|
||||
// Future versions may handle these cases without error.
|
||||
// The following BigQuery types are inferred from the corresponding Go types.
|
||||
// (This is the same mapping as that used for RowIterator.Next.) Fields inferred
|
||||
// from these types are marked required (non-nullable).
|
||||
//
|
||||
// STRING string
|
||||
// BOOL bool
|
||||
// INTEGER int, int8, int16, int32, int64, uint8, uint16, uint32
|
||||
// FLOAT float32, float64
|
||||
// BYTES []byte
|
||||
// TIMESTAMP time.Time
|
||||
// DATE civil.Date
|
||||
// TIME civil.Time
|
||||
// DATETIME civil.DateTime
|
||||
//
|
||||
// A Go slice or array type is inferred to be a BigQuery repeated field of the
|
||||
// element type. The element type must be one of the above listed types.
|
||||
//
|
||||
// Nullable fields are inferred from the NullXXX types, declared in this package:
|
||||
//
|
||||
// STRING NullString
|
||||
// BOOL NullBool
|
||||
// INTEGER NullInt64
|
||||
// FLOAT NullFloat64
|
||||
// TIMESTAMP NullTimestamp
|
||||
// DATE NullDate
|
||||
// TIME NullTime
|
||||
// DATETIME NullDateTime
|
||||
|
||||
// For a nullable BYTES field, use the type []byte and tag the field "nullable" (see below).
|
||||
//
|
||||
// A struct field that is of struct type is inferred to be a required field of type
|
||||
// RECORD with a schema inferred recursively. For backwards compatibility, a field of
|
||||
// type pointer to struct is also inferred to be required. To get a nullable RECORD
|
||||
// field, use the "nullable" tag (see below).
|
||||
//
|
||||
// InferSchema returns an error if any of the examined fields is of type uint,
|
||||
// uint64, uintptr, map, interface, complex64, complex128, func, or chan. Future
|
||||
// versions may handle these cases without error.
|
||||
//
|
||||
// Recursively defined structs are also disallowed.
|
||||
//
|
||||
// Struct fields may be tagged in a way similar to the encoding/json package.
|
||||
// A tag of the form
|
||||
// bigquery:"name"
|
||||
// uses "name" instead of the struct field name as the BigQuery field name.
|
||||
// A tag of the form
|
||||
// bigquery:"-"
|
||||
// omits the field from the inferred schema.
|
||||
// The "nullable" option marks the field as nullable (not required). It is only
|
||||
// needed for []byte and pointer-to-struct fields, and cannot appear on other
|
||||
// fields. In this example, the Go name of the field is retained:
|
||||
// bigquery:",nullable"
|
||||
func InferSchema(st interface{}) (Schema, error) {
|
||||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
@ -186,20 +231,27 @@ func inferStruct(t reflect.Type) (Schema, error) {
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||
// Only []byte and struct pointers can be tagged nullable.
|
||||
if nullable && !(rt == typeOfByteSlice || rt.Kind() == reflect.Ptr && rt.Elem().Kind() == reflect.Struct) {
|
||||
return nil, errBadNullable
|
||||
}
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: !nullable, Type: TimestampFieldType}, nil
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: !nullable, Type: DateFieldType}, nil
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: !nullable, Type: TimeFieldType}, nil
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: !nullable, Type: DateTimeFieldType}, nil
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) {
|
||||
return &FieldSchema{Required: !nullable, Type: IntegerFieldType}, nil
|
||||
if ft := nullableFieldType(rt); ft != "" {
|
||||
return &FieldSchema{Required: false, Type: ft}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) || isSupportedUintType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
@ -208,7 +260,10 @@ func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||
// Multi dimensional slices/arrays are not supported by BigQuery
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
||||
if nullableFieldType(et) != "" {
|
||||
// Repeated nullable types are not supported by BigQuery.
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
f, err := inferFieldSchema(et, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -216,7 +271,12 @@ func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||
f.Repeated = true
|
||||
f.Required = false
|
||||
return f, nil
|
||||
case reflect.Struct, reflect.Ptr:
|
||||
case reflect.Ptr:
|
||||
if rt.Elem().Kind() != reflect.Struct {
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
fallthrough
|
||||
case reflect.Struct:
|
||||
nested, err := inferStruct(rt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -258,12 +318,22 @@ func inferFields(rt reflect.Type) (Schema, error) {
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t can be properly represented by the
|
||||
// BigQuery INTEGER/INT64 type.
|
||||
// isSupportedIntType reports whether t is an int type that can be properly
|
||||
// represented by the BigQuery INTEGER/INT64 type.
|
||||
func isSupportedIntType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int,
|
||||
reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isSupportedIntType reports whether t is a uint type that can be properly
|
||||
// represented by the BigQuery INTEGER/INT64 type.
|
||||
func isSupportedUintType(t reflect.Type) bool {
|
||||
switch t.Kind() {
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32:
|
||||
return true
|
||||
default:
|
||||
return false
|
||||
|
111
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
111
vendor/cloud.google.com/go/bigquery/schema_test.go
generated
vendored
@ -248,6 +248,14 @@ func reqField(name, typ string) *FieldSchema {
|
||||
}
|
||||
}
|
||||
|
||||
func optField(name, typ string) *FieldSchema {
|
||||
return &FieldSchema{
|
||||
Name: name,
|
||||
Type: FieldType(typ),
|
||||
Required: false,
|
||||
}
|
||||
}
|
||||
|
||||
func TestSimpleInference(t *testing.T) {
|
||||
testCases := []struct {
|
||||
in interface{}
|
||||
@ -491,6 +499,37 @@ func TestRepeatedInference(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type allNulls struct {
|
||||
A NullInt64
|
||||
B NullFloat64
|
||||
C NullBool
|
||||
D NullString
|
||||
E NullTimestamp
|
||||
F NullTime
|
||||
G NullDate
|
||||
H NullDateTime
|
||||
}
|
||||
|
||||
func TestNullInference(t *testing.T) {
|
||||
got, err := InferSchema(allNulls{})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := Schema{
|
||||
optField("A", "INTEGER"),
|
||||
optField("B", "FLOAT"),
|
||||
optField("C", "BOOLEAN"),
|
||||
optField("D", "STRING"),
|
||||
optField("E", "TIMESTAMP"),
|
||||
optField("F", "TIME"),
|
||||
optField("G", "DATE"),
|
||||
optField("H", "DATETIME"),
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
|
||||
type Embedded struct {
|
||||
Embedded int
|
||||
}
|
||||
@ -532,11 +571,11 @@ func TestRecursiveInference(t *testing.T) {
|
||||
|
||||
type withTags struct {
|
||||
NoTag int
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
Nullable int `bigquery:",nullable"`
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
Nullable []byte `bigquery:",nullable"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
@ -545,6 +584,8 @@ type withTagsNested struct {
|
||||
ExcludeTag int `bigquery:"-"`
|
||||
Inside int `bigquery:"inside"`
|
||||
} `bigquery:"anon"`
|
||||
PNested *struct{ X int } // not nullable, for backwards compatibility
|
||||
PNestedNullable *struct{ X int } `bigquery:",nullable"`
|
||||
}
|
||||
|
||||
type withTagsRepeated struct {
|
||||
@ -564,7 +605,7 @@ var withTagsSchema = Schema{
|
||||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
{Name: "Nullable", Type: FieldType("INTEGER"), Required: false},
|
||||
optField("Nullable", "BYTES"),
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
@ -591,6 +632,18 @@ func TestTagInference(t *testing.T) {
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("inside", "INTEGER")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "PNested",
|
||||
Required: true,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("X", "INTEGER")},
|
||||
},
|
||||
&FieldSchema{
|
||||
Name: "PNestedNullable",
|
||||
Required: false,
|
||||
Type: "RECORD",
|
||||
Schema: Schema{reqField("X", "INTEGER")},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
@ -724,7 +777,7 @@ func TestSchemaErrors(t *testing.T) {
|
||||
},
|
||||
{
|
||||
in: struct{ Ptr *int }{},
|
||||
err: errNoStruct,
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ Interface interface{} }{},
|
||||
@ -738,6 +791,14 @@ func TestSchemaErrors(t *testing.T) {
|
||||
in: struct{ MultiDimensional [][][]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ SliceOfPointer []*int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ SliceOfNull []NullInt64 }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ ChanSlice []chan bool }{},
|
||||
err: errUnsupportedFieldType,
|
||||
@ -746,6 +807,42 @@ func TestSchemaErrors(t *testing.T) {
|
||||
in: struct{ NestedChan struct{ Chan []chan bool } }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X int `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X bool `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X struct{ N int } `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
X []int `bigquery:",nullable"`
|
||||
}{},
|
||||
err: errBadNullable,
|
||||
},
|
||||
{
|
||||
in: struct{ X *[]byte }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ X *[]int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
{
|
||||
in: struct{ X *int }{},
|
||||
err: errUnsupportedFieldType,
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
want := tc.err
|
||||
|
31
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
31
vendor/cloud.google.com/go/bigquery/table.go
generated
vendored
@ -76,6 +76,9 @@ type TableMetadata struct {
|
||||
// Information about a table stored outside of BigQuery.
|
||||
ExternalDataConfig *ExternalDataConfig
|
||||
|
||||
// Custom encryption configuration (e.g., Cloud KMS keys).
|
||||
EncryptionConfig *EncryptionConfig
|
||||
|
||||
// All the fields below are read-only.
|
||||
|
||||
FullID string // An opaque ID uniquely identifying the table.
|
||||
@ -175,6 +178,32 @@ func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
||||
}
|
||||
}
|
||||
|
||||
// EncryptionConfig configures customer-managed encryption on tables.
|
||||
type EncryptionConfig struct {
|
||||
// Describes the Cloud KMS encryption key that will be used to protect
|
||||
// destination BigQuery table. The BigQuery Service Account associated with your
|
||||
// project requires access to this encryption key.
|
||||
KMSKeyName string
|
||||
}
|
||||
|
||||
func (e *EncryptionConfig) toBQ() *bq.EncryptionConfiguration {
|
||||
if e == nil {
|
||||
return nil
|
||||
}
|
||||
return &bq.EncryptionConfiguration{
|
||||
KmsKeyName: e.KMSKeyName,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToEncryptionConfig(q *bq.EncryptionConfiguration) *EncryptionConfig {
|
||||
if q == nil {
|
||||
return nil
|
||||
}
|
||||
return &EncryptionConfig{
|
||||
KMSKeyName: q.KmsKeyName,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
type StreamingBuffer struct {
|
||||
// A lower-bound estimate of the number of bytes currently in the streaming
|
||||
@ -265,6 +294,7 @@ func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
||||
edc := tm.ExternalDataConfig.toBQ()
|
||||
t.ExternalDataConfiguration = &edc
|
||||
}
|
||||
t.EncryptionConfiguration = tm.EncryptionConfig.toBQ()
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
@ -320,6 +350,7 @@ func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
EncryptionConfig: bqToEncryptionConfig(t.EncryptionConfiguration),
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = bqToSchema(t.Schema)
|
||||
|
6
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
6
vendor/cloud.google.com/go/bigquery/table_test.go
generated
vendored
@ -53,6 +53,7 @@ func TestBQToTableMetadata(t *testing.T) {
|
||||
Type: "DAY",
|
||||
Field: "pfield",
|
||||
},
|
||||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
Labels: map[string]string{"a": "b"},
|
||||
@ -82,7 +83,8 @@ func TestBQToTableMetadata(t *testing.T) {
|
||||
EstimatedRows: 3,
|
||||
OldestEntryTime: aTime,
|
||||
},
|
||||
ETag: "etag",
|
||||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
ETag: "etag",
|
||||
},
|
||||
},
|
||||
} {
|
||||
@ -115,6 +117,7 @@ func TestTableMetadataToBQ(t *testing.T) {
|
||||
ExpirationTime: aTime,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
|
||||
EncryptionConfig: &EncryptionConfig{KMSKeyName: "keyName"},
|
||||
},
|
||||
&bq.Table{
|
||||
FriendlyName: "n",
|
||||
@ -127,6 +130,7 @@ func TestTableMetadataToBQ(t *testing.T) {
|
||||
ExpirationTime: aTimeMillis,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
|
||||
EncryptionConfiguration: &bq.EncryptionConfiguration{KmsKeyName: "keyName"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
3
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
3
vendor/cloud.google.com/go/bigquery/uploader.go
generated
vendored
@ -54,6 +54,9 @@ type Uploader struct {
|
||||
|
||||
// Uploader returns an Uploader that can be used to append rows to t.
|
||||
// The returned Uploader may optionally be further configured before its Put method is called.
|
||||
//
|
||||
// To stream rows into a date-partitioned table at a particular date, add the
|
||||
// $yyyymmdd suffix to the table name when constructing the Table.
|
||||
func (t *Table) Uploader() *Uploader {
|
||||
return &Uploader{t: t}
|
||||
}
|
||||
|
130
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
130
vendor/cloud.google.com/go/bigquery/value.go
generated
vendored
@ -18,8 +18,10 @@ import (
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
@ -63,6 +65,8 @@ func loadMap(m map[string]Value, vals []Value, s Schema) {
|
||||
val := vals[i]
|
||||
var v interface{}
|
||||
switch {
|
||||
case val == nil:
|
||||
v = val
|
||||
case f.Schema == nil:
|
||||
v = val
|
||||
case !f.Repeated:
|
||||
@ -79,6 +83,7 @@ func loadMap(m map[string]Value, vals []Value, s Schema) {
|
||||
}
|
||||
v = vs
|
||||
}
|
||||
|
||||
m[f.Name] = v
|
||||
}
|
||||
}
|
||||
@ -125,6 +130,18 @@ func setInt(v reflect.Value, x interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setUint(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
}
|
||||
xx := x.(int64)
|
||||
if xx < 0 || v.OverflowUint(uint64(xx)) {
|
||||
return fmt.Errorf("bigquery: value %v overflows struct field of type %v", xx, v.Type())
|
||||
}
|
||||
v.SetUint(uint64(xx))
|
||||
return nil
|
||||
}
|
||||
|
||||
func setFloat(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
@ -155,9 +172,20 @@ func setString(v reflect.Value, x interface{}) error {
|
||||
|
||||
func setBytes(v reflect.Value, x interface{}) error {
|
||||
if x == nil {
|
||||
return errNoNulls
|
||||
v.SetBytes(nil)
|
||||
} else {
|
||||
v.SetBytes(x.([]byte))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func setNull(v reflect.Value, x interface{}, build func() interface{}) error {
|
||||
if x == nil {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
} else {
|
||||
n := build()
|
||||
v.Set(reflect.ValueOf(n))
|
||||
}
|
||||
v.SetBytes(x.([]byte))
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -228,7 +256,7 @@ func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, err
|
||||
return nil, err
|
||||
}
|
||||
op.setFunc = func(v reflect.Value, val interface{}) error {
|
||||
return setNested(nested, v, val.([]Value))
|
||||
return setNested(nested, v, val)
|
||||
}
|
||||
} else {
|
||||
op.setFunc = determineSetFunc(t, schemaField.Type)
|
||||
@ -253,6 +281,13 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
||||
if ftype.Kind() == reflect.String {
|
||||
return setString
|
||||
}
|
||||
if ftype == typeOfNullString {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullString{StringVal: x.(string), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case BytesFieldType:
|
||||
if ftype == typeOfByteSlice {
|
||||
@ -260,40 +295,91 @@ func determineSetFunc(ftype reflect.Type, stype FieldType) setFunc {
|
||||
}
|
||||
|
||||
case IntegerFieldType:
|
||||
if isSupportedIntType(ftype) {
|
||||
if isSupportedUintType(ftype) {
|
||||
return setUint
|
||||
} else if isSupportedIntType(ftype) {
|
||||
return setInt
|
||||
}
|
||||
if ftype == typeOfNullInt64 {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullInt64{Int64: x.(int64), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case FloatFieldType:
|
||||
switch ftype.Kind() {
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return setFloat
|
||||
}
|
||||
if ftype == typeOfNullFloat64 {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullFloat64{Float64: x.(float64), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case BooleanFieldType:
|
||||
if ftype.Kind() == reflect.Bool {
|
||||
return setBool
|
||||
}
|
||||
if ftype == typeOfNullBool {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullBool{Bool: x.(bool), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case TimestampFieldType:
|
||||
if ftype == typeOfGoTime {
|
||||
return setAny
|
||||
}
|
||||
if ftype == typeOfNullTimestamp {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullTimestamp{Timestamp: x.(time.Time), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case DateFieldType:
|
||||
if ftype == typeOfDate {
|
||||
return setAny
|
||||
}
|
||||
if ftype == typeOfNullDate {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullDate{Date: x.(civil.Date), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case TimeFieldType:
|
||||
if ftype == typeOfTime {
|
||||
return setAny
|
||||
}
|
||||
if ftype == typeOfNullTime {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullTime{Time: x.(civil.Time), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
case DateTimeFieldType:
|
||||
if ftype == typeOfDateTime {
|
||||
return setAny
|
||||
}
|
||||
if ftype == typeOfNullDateTime {
|
||||
return func(v reflect.Value, x interface{}) error {
|
||||
return setNull(v, x, func() interface{} {
|
||||
return NullDateTime{DateTime: x.(civil.DateTime), Valid: true}
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -323,16 +409,21 @@ func runOps(ops []structLoaderOp, vstruct reflect.Value, values []Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func setNested(ops []structLoaderOp, v reflect.Value, vals []Value) error {
|
||||
func setNested(ops []structLoaderOp, v reflect.Value, val interface{}) error {
|
||||
// v is either a struct or a pointer to a struct.
|
||||
if v.Kind() == reflect.Ptr {
|
||||
// If the value is nil, set the pointer to nil.
|
||||
if val == nil {
|
||||
v.Set(reflect.Zero(v.Type()))
|
||||
return nil
|
||||
}
|
||||
// If the pointer is nil, set it to a zero struct value.
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
v = v.Elem()
|
||||
}
|
||||
return runOps(ops, v, vals)
|
||||
return runOps(ops, v, val.([]Value))
|
||||
}
|
||||
|
||||
func setRepeated(field reflect.Value, vslice []Value, setElem setFunc) error {
|
||||
@ -404,6 +495,10 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
||||
|
||||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if vs[i] == nil {
|
||||
m[fieldSchema.Name] = nil
|
||||
continue
|
||||
}
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
|
||||
continue
|
||||
@ -550,10 +645,16 @@ func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
|
||||
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
|
||||
switch fs.Type {
|
||||
case TimeFieldType:
|
||||
if v.Type() == typeOfNullTime {
|
||||
return v.Interface()
|
||||
}
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilTimeString(v.Interface().(civil.Time))
|
||||
})
|
||||
case DateTimeFieldType:
|
||||
if v.Type() == typeOfNullDateTime {
|
||||
return v.Interface()
|
||||
}
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
})
|
||||
@ -607,6 +708,16 @@ func CivilDateTimeString(dt civil.DateTime) string {
|
||||
return dt.Date.String() + " " + CivilTimeString(dt.Time)
|
||||
}
|
||||
|
||||
// parseCivilDateTime parses a date-time represented in a BigQuery SQL
|
||||
// compatible format and returns a civil.DateTime.
|
||||
func parseCivilDateTime(s string) (civil.DateTime, error) {
|
||||
parts := strings.Fields(s)
|
||||
if len(parts) != 2 {
|
||||
return civil.DateTime{}, fmt.Errorf("bigquery: bad DATETIME value %q", s)
|
||||
}
|
||||
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
@ -705,7 +816,12 @@ func convertBasicType(val string, typ FieldType) (Value, error) {
|
||||
return strconv.ParseBool(val)
|
||||
case TimestampFieldType:
|
||||
f, err := strconv.ParseFloat(val, 64)
|
||||
return Value(time.Unix(0, int64(f*1e9)).UTC()), err
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secs := math.Trunc(f)
|
||||
nanos := (f - secs) * 1e9
|
||||
return Value(time.Unix(int64(secs), int64(nanos)).UTC()), nil
|
||||
case DateFieldType:
|
||||
return civil.ParseDate(val)
|
||||
case TimeFieldType:
|
||||
|
240
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
240
vendor/cloud.google.com/go/bigquery/value_test.go
generated
vendored
@ -24,7 +24,6 @@ import (
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
@ -89,6 +88,20 @@ func TestConvertTime(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertSmallTimes(t *testing.T) {
|
||||
for _, year := range []int{1600, 1066, 1} {
|
||||
want := time.Date(year, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||
s := fmt.Sprintf("%.10f", float64(want.Unix()))
|
||||
got, err := convertBasicType(s, TimestampFieldType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !got.(time.Time).Equal(want) {
|
||||
t.Errorf("got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertNullValues(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
@ -393,7 +406,10 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a",
|
||||
civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}},
|
||||
civil.DateTime{
|
||||
Date: civil.Date{Year: 1, Month: 2, Day: 3},
|
||||
Time: civil.Time{Hour: 4, Minute: 5, Second: 6, Nanosecond: 7000}},
|
||||
},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{"intField": 1, "strField": "a",
|
||||
@ -512,6 +528,7 @@ func TestStructSaver(t *testing.T) {
|
||||
{Name: "rnested", Type: RecordFieldType, Repeated: true, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
{Name: "p", Type: IntegerFieldType, Required: false},
|
||||
}
|
||||
|
||||
type (
|
||||
@ -523,6 +540,7 @@ func TestStructSaver(t *testing.T) {
|
||||
TR []civil.Time
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
P NullInt64
|
||||
}
|
||||
)
|
||||
|
||||
@ -539,12 +557,13 @@ func TestStructSaver(t *testing.T) {
|
||||
if wantIID := "iid"; gotIID != wantIID {
|
||||
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("%s: %s", msg, diff)
|
||||
}
|
||||
}
|
||||
ct1 := civil.Time{1, 2, 3, 4000}
|
||||
ct2 := civil.Time{5, 6, 7, 8000}
|
||||
|
||||
ct1 := civil.Time{Hour: 1, Minute: 2, Second: 3, Nanosecond: 4000}
|
||||
ct2 := civil.Time{Hour: 5, Minute: 6, Second: 7, Nanosecond: 8000}
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
@ -552,6 +571,7 @@ func TestStructSaver(t *testing.T) {
|
||||
TR: []civil.Time{ct1, ct2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
P: NullInt64{Valid: true, Int64: 17},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
@ -560,10 +580,11 @@ func TestStructSaver(t *testing.T) {
|
||||
"tr": []string{"01:02:03.000004", "05:06:07.000008"},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
"p": NullInt64{Valid: true, Int64: 17},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"})
|
||||
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00", "p": NullInt64{}})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
@ -577,6 +598,7 @@ func TestStructSaver(t *testing.T) {
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"t": "00:00:00",
|
||||
"p": NullInt64{},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
@ -713,6 +735,22 @@ func TestValueMap(t *testing.T) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
|
||||
}
|
||||
|
||||
in = make([]Value, len(schema))
|
||||
want = map[string]Value{
|
||||
"s": nil,
|
||||
"i": nil,
|
||||
"f": nil,
|
||||
"b": nil,
|
||||
"n": nil,
|
||||
"rn": nil,
|
||||
}
|
||||
var vm2 valueMap
|
||||
if err := vm2.Load(in, schema); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(vm2, valueMap(want)) {
|
||||
t.Errorf("got\n%+v\nwant\n%+v", vm2, want)
|
||||
}
|
||||
}
|
||||
|
||||
var (
|
||||
@ -722,6 +760,7 @@ var (
|
||||
{Name: "s2", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "U", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
{Name: "B", Type: BooleanFieldType},
|
||||
{Name: "TS", Type: TimestampFieldType},
|
||||
@ -736,11 +775,11 @@ var (
|
||||
}
|
||||
|
||||
testTimestamp = time.Date(2016, 11, 5, 7, 50, 22, 8, time.UTC)
|
||||
testDate = civil.Date{2016, 11, 5}
|
||||
testTime = civil.Time{7, 50, 22, 8}
|
||||
testDateTime = civil.DateTime{testDate, testTime}
|
||||
testDate = civil.Date{Year: 2016, Month: 11, Day: 5}
|
||||
testTime = civil.Time{Hour: 7, Minute: 50, Second: 22, Nanosecond: 8}
|
||||
testDateTime = civil.DateTime{Date: testDate, Time: testTime}
|
||||
|
||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), 3.14, true,
|
||||
testValues = []Value{"x", "y", []byte{1, 2, 3}, int64(7), int64(8), 3.14, true,
|
||||
testTimestamp, testDate, testTime, testDateTime,
|
||||
[]Value{"nested", int64(17)}, "z"}
|
||||
)
|
||||
@ -748,6 +787,7 @@ var (
|
||||
type testStruct1 struct {
|
||||
B bool
|
||||
I int
|
||||
U uint16
|
||||
times
|
||||
S string
|
||||
S2 String
|
||||
@ -774,14 +814,13 @@ type times struct {
|
||||
|
||||
func TestStructLoader(t *testing.T) {
|
||||
var ts1 testStruct1
|
||||
if err := load(&ts1, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mustLoad(t, &ts1, schema2, testValues)
|
||||
// Note: the schema field named "s" gets matched to the exported struct
|
||||
// field "S", not the unexported "s".
|
||||
want := &testStruct1{
|
||||
B: true,
|
||||
I: 7,
|
||||
U: 8,
|
||||
F: 3.14,
|
||||
times: times{TS: testTimestamp, T: testTime, D: testDate, DT: testDateTime},
|
||||
S: "x",
|
||||
@ -790,33 +829,25 @@ func TestStructLoader(t *testing.T) {
|
||||
Nested: nested{NestS: "nested", NestI: 17},
|
||||
Tagged: "z",
|
||||
}
|
||||
if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
|
||||
d, _, err := pretty.Diff(*want, ts1)
|
||||
if err == nil {
|
||||
t.Logf("diff:\n%s", d)
|
||||
}
|
||||
if diff := testutil.Diff(&ts1, want, cmp.AllowUnexported(testStruct1{})); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
// Test pointers to nested structs.
|
||||
type nestedPtr struct{ Nested *nested }
|
||||
var np nestedPtr
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mustLoad(t, &np, schema2, testValues)
|
||||
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
|
||||
if !testutil.Equal(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
if diff := testutil.Diff(&np, want2); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
// Existing values should be reused.
|
||||
nst := &nested{NestS: "x", NestI: -10}
|
||||
np = nestedPtr{Nested: nst}
|
||||
if err := load(&np, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(&np, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
|
||||
mustLoad(t, &np, schema2, testValues)
|
||||
if diff := testutil.Diff(&np, want2); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
if np.Nested != nst {
|
||||
t.Error("nested struct pointers not equal")
|
||||
@ -851,28 +882,23 @@ var (
|
||||
|
||||
func TestStructLoaderRepeated(t *testing.T) {
|
||||
var r1 repStruct
|
||||
if err := load(&r1, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mustLoad(t, &r1, repSchema, repValues)
|
||||
want := repStruct{
|
||||
Nums: []int{1, 2, 3},
|
||||
ShortNums: [...]int{1, 2}, // extra values discarded
|
||||
LongNums: [...]int{1, 2, 3, 0, 0},
|
||||
Nested: []*nested{{"x", 1}, {"y", 2}},
|
||||
}
|
||||
if !testutil.Equal(r1, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
|
||||
if diff := testutil.Diff(r1, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
r2 := repStruct{
|
||||
Nums: []int{-1, -2, -3, -4, -5}, // truncated to zero and appended to
|
||||
LongNums: [...]int{-1, -2, -3, -4, -5}, // unset elements are zeroed
|
||||
}
|
||||
if err := load(&r2, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(r2, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
|
||||
mustLoad(t, &r2, repSchema, repValues)
|
||||
if diff := testutil.Diff(r2, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
if got, want := cap(r2.Nums), 5; got != want {
|
||||
t.Errorf("cap(r2.Nums) = %d, want %d", got, want)
|
||||
@ -880,33 +906,109 @@ func TestStructLoaderRepeated(t *testing.T) {
|
||||
|
||||
// Short slice case.
|
||||
r3 := repStruct{Nums: []int{-1}}
|
||||
if err := load(&r3, repSchema, repValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(r3, want) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
|
||||
mustLoad(t, &r3, repSchema, repValues)
|
||||
if diff := testutil.Diff(r3, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
if got, want := cap(r3.Nums), 3; got != want {
|
||||
t.Errorf("cap(r3.Nums) = %d, want %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
type testStructNullable struct {
|
||||
String NullString
|
||||
Bytes []byte
|
||||
Integer NullInt64
|
||||
Float NullFloat64
|
||||
Boolean NullBool
|
||||
Timestamp NullTimestamp
|
||||
Date NullDate
|
||||
Time NullTime
|
||||
DateTime NullDateTime
|
||||
Record *subNullable
|
||||
}
|
||||
|
||||
type subNullable struct {
|
||||
X NullInt64
|
||||
}
|
||||
|
||||
var testStructNullableSchema = Schema{
|
||||
{Name: "String", Type: StringFieldType, Required: false},
|
||||
{Name: "Bytes", Type: BytesFieldType, Required: false},
|
||||
{Name: "Integer", Type: IntegerFieldType, Required: false},
|
||||
{Name: "Float", Type: FloatFieldType, Required: false},
|
||||
{Name: "Boolean", Type: BooleanFieldType, Required: false},
|
||||
{Name: "Timestamp", Type: TimestampFieldType, Required: false},
|
||||
{Name: "Date", Type: DateFieldType, Required: false},
|
||||
{Name: "Time", Type: TimeFieldType, Required: false},
|
||||
{Name: "DateTime", Type: DateTimeFieldType, Required: false},
|
||||
{Name: "Record", Type: RecordFieldType, Required: false, Schema: Schema{
|
||||
{Name: "X", Type: IntegerFieldType, Required: false},
|
||||
}},
|
||||
}
|
||||
|
||||
func TestStructLoaderNullable(t *testing.T) {
|
||||
var ts testStructNullable
|
||||
nilVals := []Value{nil, nil, nil, nil, nil, nil, nil, nil, nil, nil}
|
||||
mustLoad(t, &ts, testStructNullableSchema, nilVals)
|
||||
want := testStructNullable{}
|
||||
if diff := testutil.Diff(ts, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
nonnilVals := []Value{"x", []byte{1, 2, 3}, int64(1), 2.3, true, testTimestamp, testDate, testTime, testDateTime, []Value{int64(4)}}
|
||||
|
||||
// All ts fields are nil. Loading non-nil values will cause them all to
|
||||
// be allocated.
|
||||
mustLoad(t, &ts, testStructNullableSchema, nonnilVals)
|
||||
want = testStructNullable{
|
||||
String: NullString{StringVal: "x", Valid: true},
|
||||
Bytes: []byte{1, 2, 3},
|
||||
Integer: NullInt64{Int64: 1, Valid: true},
|
||||
Float: NullFloat64{Float64: 2.3, Valid: true},
|
||||
Boolean: NullBool{Bool: true, Valid: true},
|
||||
Timestamp: NullTimestamp{Timestamp: testTimestamp, Valid: true},
|
||||
Date: NullDate{Date: testDate, Valid: true},
|
||||
Time: NullTime{Time: testTime, Valid: true},
|
||||
DateTime: NullDateTime{DateTime: testDateTime, Valid: true},
|
||||
Record: &subNullable{X: NullInt64{Int64: 4, Valid: true}},
|
||||
}
|
||||
if diff := testutil.Diff(ts, want); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
// Struct pointers are reused, byte slices are not.
|
||||
want = ts
|
||||
want.Bytes = []byte{17}
|
||||
vals2 := []Value{nil, []byte{17}, nil, nil, nil, nil, nil, nil, nil, []Value{int64(7)}}
|
||||
mustLoad(t, &ts, testStructNullableSchema, vals2)
|
||||
if ts.Record != want.Record {
|
||||
t.Error("record pointers not identical")
|
||||
}
|
||||
}
|
||||
|
||||
func TestStructLoaderOverflow(t *testing.T) {
|
||||
type S struct {
|
||||
I int16
|
||||
U uint16
|
||||
F float32
|
||||
}
|
||||
schema := Schema{
|
||||
{Name: "I", Type: IntegerFieldType},
|
||||
{Name: "U", Type: IntegerFieldType},
|
||||
{Name: "F", Type: FloatFieldType},
|
||||
}
|
||||
var s S
|
||||
if err := load(&s, schema, []Value{int64(math.MaxInt16 + 1), 0}); err == nil {
|
||||
t.Error("int: got nil, want error")
|
||||
}
|
||||
if err := load(&s, schema, []Value{int64(0), math.MaxFloat32 * 2}); err == nil {
|
||||
t.Error("float: got nil, want error")
|
||||
z64 := int64(0)
|
||||
for _, vals := range [][]Value{
|
||||
{int64(math.MaxInt16 + 1), z64, 0},
|
||||
{z64, int64(math.MaxInt32), 0},
|
||||
{z64, int64(-1), 0},
|
||||
{z64, z64, math.MaxFloat32 * 2},
|
||||
} {
|
||||
if err := load(&s, schema, vals); err == nil {
|
||||
t.Errorf("%+v: got nil, want error", vals)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -922,20 +1024,18 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want1 := S1{I: 7}
|
||||
if !testutil.Equal(s1, want1) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
|
||||
if diff := testutil.Diff(s1, want1); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
|
||||
// It's even valid to have no overlapping fields at all.
|
||||
type S2 struct{ Z int }
|
||||
|
||||
var s2 S2
|
||||
if err := load(&s2, schema2, testValues); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mustLoad(t, &s2, schema2, testValues)
|
||||
want2 := S2{}
|
||||
if !testutil.Equal(s2, want2) {
|
||||
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
|
||||
if diff := testutil.Diff(s2, want2); diff != "" {
|
||||
t.Error(diff)
|
||||
}
|
||||
}
|
||||
|
||||
@ -989,21 +1089,17 @@ func TestStructLoaderErrors(t *testing.T) {
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "d", Type: DateFieldType},
|
||||
}
|
||||
type s struct {
|
||||
I int
|
||||
F float64
|
||||
B bool
|
||||
S string
|
||||
By []byte
|
||||
D civil.Date
|
||||
}
|
||||
vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate}
|
||||
if err := load(&s{}, schema, vals); err != nil {
|
||||
t.Fatal(err)
|
||||
I int
|
||||
F float64
|
||||
B bool
|
||||
S string
|
||||
D civil.Date
|
||||
}
|
||||
vals := []Value{int64(0), 0.0, false, "", testDate}
|
||||
mustLoad(t, &s{}, schema, vals)
|
||||
for i, e := range vals {
|
||||
vals[i] = nil
|
||||
got := load(&s{}, schema, vals)
|
||||
@ -1033,6 +1129,12 @@ func TestStructLoaderErrors(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func mustLoad(t *testing.T, pval interface{}, schema Schema, vals []Value) {
|
||||
if err := load(pval, schema, vals); err != nil {
|
||||
t.Fatalf("loading: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func load(pval interface{}, schema Schema, vals []Value) error {
|
||||
var sl structLoader
|
||||
if err := sl.set(pval, schema); err != nil {
|
||||
|
497
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
497
vendor/cloud.google.com/go/bigtable/admin.go
generated
vendored
@ -18,28 +18,36 @@ package bigtable
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/internal/gax"
|
||||
btopt "cloud.google.com/go/bigtable/internal/option"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
durpb "github.com/golang/protobuf/ptypes/duration"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/cloudresourcemanager/v1"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
gtransport "google.golang.org/api/transport/grpc"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
const adminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// AdminClient is a client type for performing admin operations within a specific instance.
|
||||
type AdminClient struct {
|
||||
conn *grpc.ClientConn
|
||||
tClient btapb.BigtableTableAdminClient
|
||||
conn *grpc.ClientConn
|
||||
tClient btapb.BigtableTableAdminClient
|
||||
lroClient *lroauto.OperationsClient
|
||||
|
||||
project, instance string
|
||||
|
||||
@ -53,17 +61,32 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Need to add scopes for long running operations (for create table & snapshots)
|
||||
o = append(o, option.WithScopes(cloudresourcemanager.CloudPlatformScope))
|
||||
o = append(o, opts...)
|
||||
conn, err := gtransport.Dial(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
|
||||
lroClient, err := lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &AdminClient{
|
||||
conn: conn,
|
||||
tClient: btapb.NewBigtableTableAdminClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
|
||||
conn: conn,
|
||||
tClient: btapb.NewBigtableTableAdminClient(conn),
|
||||
lroClient: lroClient,
|
||||
project: project,
|
||||
instance: instance,
|
||||
md: metadata.Pairs(resourcePrefixHeader, fmt.Sprintf("projects/%s/instances/%s", project, instance)),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -122,13 +145,13 @@ func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf)
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
var req_splits []*btapb.CreateTableRequest_Split
|
||||
for _, split := range conf.SplitKeys {
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
|
||||
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{Key: []byte(split)})
|
||||
}
|
||||
var tbl btapb.Table
|
||||
if conf.Families != nil {
|
||||
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
|
||||
for fam, policy := range conf.Families {
|
||||
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
|
||||
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{GcRule: policy.proto()}
|
||||
}
|
||||
}
|
||||
prefix := ac.instancePrefix()
|
||||
@ -151,7 +174,7 @@ func (ac *AdminClient) CreateColumnFamily(ctx context.Context, table, family str
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
@ -177,7 +200,7 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{true},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Drop{Drop: true},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
@ -226,7 +249,7 @@ func (ac *AdminClient) SetGCPolicy(ctx context.Context, table, family string, po
|
||||
Name: prefix + "/tables/" + table,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: family,
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{GcRule: policy.proto()}},
|
||||
}},
|
||||
}
|
||||
_, err := ac.tClient.ModifyColumnFamilies(ctx, req)
|
||||
@ -239,12 +262,289 @@ func (ac *AdminClient) DropRowRange(ctx context.Context, table, rowKeyPrefix str
|
||||
prefix := ac.instancePrefix()
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte(rowKeyPrefix)},
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte(rowKeyPrefix)},
|
||||
}
|
||||
_, err := ac.tClient.DropRowRange(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// CreateTableFromSnapshot creates a table from snapshot.
|
||||
// The table will be created in the same cluster as the snapshot.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) CreateTableFromSnapshot(ctx context.Context, table, cluster, snapshot string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
snapshotPath := prefix + "/clusters/" + cluster + "/snapshots/" + snapshot
|
||||
|
||||
req := &btapb.CreateTableFromSnapshotRequest{
|
||||
Parent: prefix,
|
||||
TableId: table,
|
||||
SourceSnapshot: snapshotPath,
|
||||
}
|
||||
op, err := ac.tClient.CreateTableFromSnapshot(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Table{}
|
||||
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
const DefaultSnapshotDuration time.Duration = 0
|
||||
|
||||
// Creates a new snapshot in the specified cluster from the specified source table.
|
||||
// Setting the ttl to `DefaultSnapshotDuration` will use the server side default for the duration.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) SnapshotTable(ctx context.Context, table, cluster, snapshot string, ttl time.Duration) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
|
||||
var ttlProto *durpb.Duration
|
||||
|
||||
if ttl > 0 {
|
||||
ttlProto = ptypes.DurationProto(ttl)
|
||||
}
|
||||
|
||||
req := &btapb.SnapshotTableRequest{
|
||||
Name: prefix + "/tables/" + table,
|
||||
Cluster: prefix + "/clusters/" + cluster,
|
||||
SnapshotId: snapshot,
|
||||
Ttl: ttlProto,
|
||||
}
|
||||
|
||||
op, err := ac.tClient.SnapshotTable(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Snapshot{}
|
||||
return longrunning.InternalNewOperation(ac.lroClient, op).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// Returns a SnapshotIterator for iterating over the snapshots in a cluster.
|
||||
// To list snapshots across all of the clusters in the instance specify "-" as the cluster.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) ListSnapshots(ctx context.Context, cluster string) *SnapshotIterator {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
clusterPath := prefix + "/clusters/" + cluster
|
||||
|
||||
it := &SnapshotIterator{}
|
||||
req := &btapb.ListSnapshotsRequest{
|
||||
Parent: clusterPath,
|
||||
}
|
||||
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
|
||||
resp, err := ac.tClient.ListSnapshots(ctx, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, s := range resp.Snapshots {
|
||||
snapshotInfo, err := newSnapshotInfo(s)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("Failed to parse snapshot proto %v", err)
|
||||
}
|
||||
it.items = append(it.items, snapshotInfo)
|
||||
}
|
||||
return resp.NextPageToken, nil
|
||||
}
|
||||
bufLen := func() int { return len(it.items) }
|
||||
takeBuf := func() interface{} { b := it.items; it.items = nil; return b }
|
||||
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, bufLen, takeBuf)
|
||||
|
||||
return it
|
||||
}
|
||||
|
||||
func newSnapshotInfo(snapshot *btapb.Snapshot) (*SnapshotInfo, error) {
|
||||
nameParts := strings.Split(snapshot.Name, "/")
|
||||
name := nameParts[len(nameParts)-1]
|
||||
tablePathParts := strings.Split(snapshot.SourceTable.Name, "/")
|
||||
tableId := tablePathParts[len(tablePathParts)-1]
|
||||
|
||||
createTime, err := ptypes.Timestamp(snapshot.CreateTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid createTime: %v", err)
|
||||
}
|
||||
|
||||
deleteTime, err := ptypes.Timestamp(snapshot.DeleteTime)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Invalid deleteTime: %v", err)
|
||||
}
|
||||
|
||||
return &SnapshotInfo{
|
||||
Name: name,
|
||||
SourceTable: tableId,
|
||||
DataSize: snapshot.DataSizeBytes,
|
||||
CreateTime: createTime,
|
||||
DeleteTime: deleteTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// An EntryIterator iterates over log entries.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
type SnapshotIterator struct {
|
||||
items []*SnapshotInfo
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See https://godoc.org/google.golang.org/api/iterator package for details.
|
||||
func (it *SnapshotIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done
|
||||
// (https://godoc.org/google.golang.org/api/iterator) if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *SnapshotIterator) Next() (*SnapshotInfo, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
type SnapshotInfo struct {
|
||||
Name string
|
||||
SourceTable string
|
||||
DataSize int64
|
||||
CreateTime time.Time
|
||||
DeleteTime time.Time
|
||||
}
|
||||
|
||||
// Get snapshot metadata.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) SnapshotInfo(ctx context.Context, cluster, snapshot string) (*SnapshotInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
clusterPath := prefix + "/clusters/" + cluster
|
||||
snapshotPath := clusterPath + "/snapshots/" + snapshot
|
||||
|
||||
req := &btapb.GetSnapshotRequest{
|
||||
Name: snapshotPath,
|
||||
}
|
||||
|
||||
resp, err := ac.tClient.GetSnapshot(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return newSnapshotInfo(resp)
|
||||
}
|
||||
|
||||
// Delete a snapshot in a cluster.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable snapshots. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) DeleteSnapshot(ctx context.Context, cluster, snapshot string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, ac.md)
|
||||
prefix := ac.instancePrefix()
|
||||
clusterPath := prefix + "/clusters/" + cluster
|
||||
snapshotPath := clusterPath + "/snapshots/" + snapshot
|
||||
|
||||
req := &btapb.DeleteSnapshotRequest{
|
||||
Name: snapshotPath,
|
||||
}
|
||||
_, err := ac.tClient.DeleteSnapshot(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// getConsistencyToken gets the consistency token for a table.
|
||||
func (ac *AdminClient) getConsistencyToken(ctx context.Context, tableName string) (string, error) {
|
||||
req := &btapb.GenerateConsistencyTokenRequest{
|
||||
Name: tableName,
|
||||
}
|
||||
resp, err := ac.tClient.GenerateConsistencyToken(ctx, req)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return resp.GetConsistencyToken(), nil
|
||||
}
|
||||
|
||||
// isConsistent checks if a token is consistent for a table.
|
||||
func (ac *AdminClient) isConsistent(ctx context.Context, tableName, token string) (bool, error) {
|
||||
req := &btapb.CheckConsistencyRequest{
|
||||
Name: tableName,
|
||||
ConsistencyToken: token,
|
||||
}
|
||||
var resp *btapb.CheckConsistencyResponse
|
||||
|
||||
// Retry calls on retryable errors to avoid losing the token gathered before.
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
var err error
|
||||
resp, err = ac.tClient.CheckConsistency(ctx, req)
|
||||
return err
|
||||
}, retryOptions...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return resp.GetConsistent(), nil
|
||||
}
|
||||
|
||||
// WaitForReplication waits until all the writes committed before the call started have been propagated to all the clusters in the instance via replication.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (ac *AdminClient) WaitForReplication(ctx context.Context, table string) error {
|
||||
// Get the token.
|
||||
prefix := ac.instancePrefix()
|
||||
tableName := prefix + "/tables/" + table
|
||||
token, err := ac.getConsistencyToken(ctx, tableName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Periodically check if the token is consistent.
|
||||
timer := time.NewTicker(time.Second * 10)
|
||||
defer timer.Stop()
|
||||
for {
|
||||
consistent, err := ac.isConsistent(ctx, tableName, token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if consistent {
|
||||
return nil
|
||||
}
|
||||
// Sleep for a bit or until the ctx is cancelled.
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return ctx.Err()
|
||||
case <-timer.C:
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const instanceAdminAddr = "bigtableadmin.googleapis.com:443"
|
||||
|
||||
// InstanceAdminClient is a client type for performing admin operations on instances.
|
||||
@ -336,24 +636,55 @@ type InstanceConf struct {
|
||||
InstanceType InstanceType
|
||||
}
|
||||
|
||||
// InstanceWithClustersConfig contains the information necessary to create an Instance
|
||||
type InstanceWithClustersConfig struct {
|
||||
InstanceID, DisplayName string
|
||||
Clusters []ClusterConfig
|
||||
InstanceType InstanceType
|
||||
}
|
||||
|
||||
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
|
||||
|
||||
// CreateInstance creates a new instance in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *InstanceConf) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceId,
|
||||
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
|
||||
Clusters: map[string]*btapb.Cluster{
|
||||
conf.ClusterId: {
|
||||
ServeNodes: conf.NumNodes,
|
||||
DefaultStorageType: conf.StorageType.proto(),
|
||||
Location: "projects/" + iac.project + "/locations/" + conf.Zone,
|
||||
newConfig := InstanceWithClustersConfig{
|
||||
InstanceID: conf.InstanceId,
|
||||
DisplayName: conf.DisplayName,
|
||||
InstanceType: conf.InstanceType,
|
||||
Clusters: []ClusterConfig{
|
||||
{
|
||||
InstanceID: conf.InstanceId,
|
||||
ClusterID: conf.ClusterId,
|
||||
Zone: conf.Zone,
|
||||
NumNodes: conf.NumNodes,
|
||||
StorageType: conf.StorageType,
|
||||
},
|
||||
},
|
||||
}
|
||||
return iac.CreateInstanceWithClusters(ctx, &newConfig)
|
||||
}
|
||||
|
||||
// CreateInstance creates a new instance with configured clusters in the project.
|
||||
// This method will return when the instance has been created or when an error occurs.
|
||||
//
|
||||
// Instances with multiple clusters are part of a private alpha release of Cloud Bigtable replication.
|
||||
// This feature is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (iac *InstanceAdminClient) CreateInstanceWithClusters(ctx context.Context, conf *InstanceWithClustersConfig) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
clusters := make(map[string]*btapb.Cluster)
|
||||
for _, cluster := range conf.Clusters {
|
||||
clusters[cluster.ClusterID] = cluster.proto(iac.project)
|
||||
}
|
||||
|
||||
req := &btapb.CreateInstanceRequest{
|
||||
Parent: "projects/" + iac.project,
|
||||
InstanceId: conf.InstanceID,
|
||||
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
|
||||
Clusters: clusters,
|
||||
}
|
||||
|
||||
lro, err := iac.iClient.CreateInstance(ctx, req)
|
||||
if err != nil {
|
||||
@ -366,7 +697,7 @@ func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *Instan
|
||||
// DeleteInstance deletes an instance from the project.
|
||||
func (iac *InstanceAdminClient) DeleteInstance(ctx context.Context, instanceId string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteInstanceRequest{"projects/" + iac.project + "/instances/" + instanceId}
|
||||
req := &btapb.DeleteInstanceRequest{Name: "projects/" + iac.project + "/instances/" + instanceId}
|
||||
_, err := iac.iClient.DeleteInstance(ctx, req)
|
||||
return err
|
||||
}
|
||||
@ -421,3 +752,119 @@ func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId str
|
||||
DisplayName: res.DisplayName,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClusterConfig contains the information necessary to create a cluster
|
||||
type ClusterConfig struct {
|
||||
InstanceID, ClusterID, Zone string
|
||||
NumNodes int32
|
||||
StorageType StorageType
|
||||
}
|
||||
|
||||
func (cc *ClusterConfig) proto(project string) *btapb.Cluster {
|
||||
return &btapb.Cluster{
|
||||
ServeNodes: cc.NumNodes,
|
||||
DefaultStorageType: cc.StorageType.proto(),
|
||||
Location: "projects/" + project + "/locations/" + cc.Zone,
|
||||
}
|
||||
}
|
||||
|
||||
// ClusterInfo represents information about a cluster.
|
||||
type ClusterInfo struct {
|
||||
Name string // name of the cluster
|
||||
Zone string // GCP zone of the cluster (e.g. "us-central1-a")
|
||||
ServeNodes int // number of allocated serve nodes
|
||||
State string // state of the cluster
|
||||
}
|
||||
|
||||
// CreateCluster creates a new cluster in an instance.
|
||||
// This method will return when the cluster has been created or when an error occurs.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (iac *InstanceAdminClient) CreateCluster(ctx context.Context, conf *ClusterConfig) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
|
||||
req := &btapb.CreateClusterRequest{
|
||||
Parent: "projects/" + iac.project + "/instances/" + conf.InstanceID,
|
||||
ClusterId: conf.ClusterID,
|
||||
Cluster: conf.proto(iac.project),
|
||||
}
|
||||
|
||||
lro, err := iac.iClient.CreateCluster(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
resp := btapb.Cluster{}
|
||||
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, &resp)
|
||||
}
|
||||
|
||||
// DeleteCluster deletes a cluster from an instance.
|
||||
//
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (iac *InstanceAdminClient) DeleteCluster(ctx context.Context, instanceId, clusterId string) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.DeleteClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId}
|
||||
_, err := iac.iClient.DeleteCluster(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// UpdateCluster updates attributes of a cluster
|
||||
func (iac *InstanceAdminClient) UpdateCluster(ctx context.Context, instanceId, clusterId string, serveNodes int32) error {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
cluster := &btapb.Cluster{
|
||||
Name: "projects/" + iac.project + "/instances/" + instanceId + "/clusters/" + clusterId,
|
||||
ServeNodes: serveNodes}
|
||||
lro, err := iac.iClient.UpdateCluster(ctx, cluster)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return longrunning.InternalNewOperation(iac.lroClient, lro).Wait(ctx, nil)
|
||||
}
|
||||
|
||||
// Clusters lists the clusters in an instance.
|
||||
func (iac *InstanceAdminClient) Clusters(ctx context.Context, instanceId string) ([]*ClusterInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.ListClustersRequest{Parent: "projects/" + iac.project + "/instances/" + instanceId}
|
||||
res, err := iac.iClient.ListClusters(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// TODO(garyelliott): Deal with failed_locations.
|
||||
var cis []*ClusterInfo
|
||||
for _, c := range res.Clusters {
|
||||
nameParts := strings.Split(c.Name, "/")
|
||||
locParts := strings.Split(c.Location, "/")
|
||||
cis = append(cis, &ClusterInfo{
|
||||
Name: nameParts[len(nameParts)-1],
|
||||
Zone: locParts[len(locParts)-1],
|
||||
ServeNodes: int(c.ServeNodes),
|
||||
State: c.State.String(),
|
||||
})
|
||||
}
|
||||
return cis, nil
|
||||
}
|
||||
|
||||
// GetCluster fetches a cluster in an instance
|
||||
func (iac *InstanceAdminClient) GetCluster(ctx context.Context, instanceID, clusterID string) (*ClusterInfo, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, iac.md)
|
||||
req := &btapb.GetClusterRequest{Name: "projects/" + iac.project + "/instances/" + instanceID + "/clusters" + clusterID}
|
||||
c, err := iac.iClient.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
nameParts := strings.Split(c.Name, "/")
|
||||
locParts := strings.Split(c.Location, "/")
|
||||
cis := &ClusterInfo{
|
||||
Name: nameParts[len(nameParts)-1],
|
||||
Zone: locParts[len(locParts)-1],
|
||||
ServeNodes: int(c.ServeNodes),
|
||||
State: c.State.String(),
|
||||
}
|
||||
return cis, nil
|
||||
}
|
||||
|
257
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
257
vendor/cloud.google.com/go/bigtable/admin_test.go
generated
vendored
@ -15,6 +15,7 @@
|
||||
package bigtable
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
@ -22,9 +23,10 @@ import (
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
@ -99,6 +101,9 @@ func TestAdminIntegration(t *testing.T) {
|
||||
if got, want := list(), []string{"myothertable", "mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
|
||||
adminClient.WaitForReplication(ctx, "mytable")
|
||||
|
||||
if err := adminClient.DeleteTable(ctx, "myothertable"); err != nil {
|
||||
t.Fatalf("Deleting table: %v", err)
|
||||
}
|
||||
@ -178,3 +183,251 @@ func TestAdminIntegration(t *testing.T) {
|
||||
t.Errorf("Invalid row count after dropping range: got %v, want %v", gotRowCount, 5)
|
||||
}
|
||||
}
|
||||
|
||||
func TestInstanceUpdate(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
|
||||
defer adminClient.Close()
|
||||
|
||||
iAdminClient, err := testEnv.NewInstanceAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewInstanceAdminClient: %v", err)
|
||||
}
|
||||
|
||||
if iAdminClient == nil {
|
||||
return
|
||||
}
|
||||
|
||||
defer iAdminClient.Close()
|
||||
|
||||
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
|
||||
if err != nil {
|
||||
t.Errorf("InstanceInfo: %v", err)
|
||||
}
|
||||
|
||||
if iInfo.Name != adminClient.instance {
|
||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||
}
|
||||
|
||||
if iInfo.DisplayName != adminClient.instance {
|
||||
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
|
||||
}
|
||||
|
||||
const numNodes = 4
|
||||
// update cluster nodes
|
||||
if err := iAdminClient.UpdateCluster(ctx, adminClient.instance, testEnv.Config().Cluster, int32(numNodes)); err != nil {
|
||||
t.Errorf("UpdateCluster: %v", err)
|
||||
}
|
||||
|
||||
// get cluster after updating
|
||||
cis, err := iAdminClient.GetCluster(ctx, adminClient.instance, testEnv.Config().Cluster)
|
||||
if err != nil {
|
||||
t.Errorf("GetCluster %v", err)
|
||||
}
|
||||
if cis.ServeNodes != int(numNodes) {
|
||||
t.Errorf("ServeNodes returned %d, want %d", cis.ServeNodes, int(numNodes))
|
||||
}
|
||||
}
|
||||
|
||||
func TestAdminSnapshotIntegration(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
if !testEnv.Config().UseProd {
|
||||
t.Skip("emulator doesn't support snapshots")
|
||||
}
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
table := testEnv.Config().Table
|
||||
cluster := testEnv.Config().Cluster
|
||||
|
||||
list := func(cluster string) ([]*SnapshotInfo, error) {
|
||||
infos := []*SnapshotInfo(nil)
|
||||
|
||||
it := adminClient.ListSnapshots(ctx, cluster)
|
||||
for {
|
||||
s, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
infos = append(infos, s)
|
||||
}
|
||||
return infos, err
|
||||
}
|
||||
|
||||
// Delete the table at the end of the test. Schedule ahead of time
|
||||
// in case the client fails
|
||||
defer adminClient.DeleteTable(ctx, table)
|
||||
|
||||
if err := adminClient.CreateTable(ctx, table); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
// Precondition: no snapshots
|
||||
snapshots, err := list(cluster)
|
||||
if err != nil {
|
||||
t.Fatalf("Initial snapshot list: %v", err)
|
||||
}
|
||||
if got, want := len(snapshots), 0; got != want {
|
||||
t.Fatalf("Initial snapshot list len: %d, want: %d", got, want)
|
||||
}
|
||||
|
||||
// Create snapshot
|
||||
defer adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot")
|
||||
|
||||
if err = adminClient.SnapshotTable(ctx, table, cluster, "mysnapshot", 5*time.Hour); err != nil {
|
||||
t.Fatalf("Creating snaphot: %v", err)
|
||||
}
|
||||
|
||||
// List snapshot
|
||||
snapshots, err = list(cluster)
|
||||
if err != nil {
|
||||
t.Fatalf("Listing snapshots: %v", err)
|
||||
}
|
||||
if got, want := len(snapshots), 1; got != want {
|
||||
t.Fatalf("Listing snapshot count: %d, want: %d", got, want)
|
||||
}
|
||||
if got, want := snapshots[0].Name, "mysnapshot"; got != want {
|
||||
t.Fatalf("Snapshot name: %s, want: %s", got, want)
|
||||
}
|
||||
if got, want := snapshots[0].SourceTable, table; got != want {
|
||||
t.Fatalf("Snapshot SourceTable: %s, want: %s", got, want)
|
||||
}
|
||||
if got, want := snapshots[0].DeleteTime, snapshots[0].CreateTime.Add(5*time.Hour); math.Abs(got.Sub(want).Minutes()) > 1 {
|
||||
t.Fatalf("Snapshot DeleteTime: %s, want: %s", got, want)
|
||||
}
|
||||
|
||||
// Get snapshot
|
||||
snapshot, err := adminClient.SnapshotInfo(ctx, cluster, "mysnapshot")
|
||||
if err != nil {
|
||||
t.Fatalf("SnapshotInfo: %v", snapshot)
|
||||
}
|
||||
if got, want := *snapshot, *snapshots[0]; got != want {
|
||||
t.Fatalf("SnapshotInfo: %v, want: %v", got, want)
|
||||
}
|
||||
|
||||
// Restore
|
||||
restoredTable := table + "-restored"
|
||||
defer adminClient.DeleteTable(ctx, restoredTable)
|
||||
if err = adminClient.CreateTableFromSnapshot(ctx, restoredTable, cluster, "mysnapshot"); err != nil {
|
||||
t.Fatalf("CreateTableFromSnapshot: %v", err)
|
||||
}
|
||||
if _, err := adminClient.TableInfo(ctx, restoredTable); err != nil {
|
||||
t.Fatalf("Restored TableInfo: %v", err)
|
||||
}
|
||||
|
||||
// Delete snapshot
|
||||
if err = adminClient.DeleteSnapshot(ctx, cluster, "mysnapshot"); err != nil {
|
||||
t.Fatalf("DeleteSnapshot: %v", err)
|
||||
}
|
||||
snapshots, err = list(cluster)
|
||||
if err != nil {
|
||||
t.Fatalf("List after Delete: %v", err)
|
||||
}
|
||||
if got, want := len(snapshots), 0; got != want {
|
||||
t.Fatalf("List after delete len: %d, want: %d", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestGranularity(t *testing.T) {
|
||||
testEnv, err := NewIntegrationEnv()
|
||||
if err != nil {
|
||||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
defer testEnv.Close()
|
||||
|
||||
timeout := 2 * time.Second
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
|
||||
adminClient, err := testEnv.NewAdminClient()
|
||||
if err != nil {
|
||||
t.Fatalf("NewAdminClient: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
|
||||
list := func() []string {
|
||||
tbls, err := adminClient.Tables(ctx)
|
||||
if err != nil {
|
||||
t.Fatalf("Fetching list of tables: %v", err)
|
||||
}
|
||||
sort.Strings(tbls)
|
||||
return tbls
|
||||
}
|
||||
containsAll := func(got, want []string) bool {
|
||||
gotSet := make(map[string]bool)
|
||||
|
||||
for _, s := range got {
|
||||
gotSet[s] = true
|
||||
}
|
||||
for _, s := range want {
|
||||
if !gotSet[s] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
defer adminClient.DeleteTable(ctx, "mytable")
|
||||
|
||||
if err := adminClient.CreateTable(ctx, "mytable"); err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
tables := list()
|
||||
if got, want := tables, []string{"mytable"}; !containsAll(got, want) {
|
||||
t.Errorf("adminClient.Tables returned %#v, want %#v", got, want)
|
||||
}
|
||||
|
||||
// calling ModifyColumnFamilies to check the granularity of table
|
||||
prefix := adminClient.instancePrefix()
|
||||
req := &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: prefix + "/tables/" + "mytable",
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
table, err := adminClient.tClient.ModifyColumnFamilies(ctx, req)
|
||||
if err != nil {
|
||||
t.Fatalf("Creating column family: %v", err)
|
||||
}
|
||||
if table.Granularity != btapb.Table_TimestampGranularity(btapb.Table_MILLIS) {
|
||||
t.Errorf("ModifyColumnFamilies returned granularity %#v, want %#v", table.Granularity, btapb.Table_TimestampGranularity(btapb.Table_MILLIS))
|
||||
}
|
||||
}
|
||||
|
84
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
84
vendor/cloud.google.com/go/bigtable/bigtable.go
generated
vendored
@ -44,10 +44,28 @@ type Client struct {
|
||||
conn *grpc.ClientConn
|
||||
client btpb.BigtableClient
|
||||
project, instance string
|
||||
// App Profiles are part of the private alpha release of Cloud Bigtable replication.
|
||||
// This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
appProfile string
|
||||
}
|
||||
|
||||
// ClientConfig has configurations for the client.
|
||||
type ClientConfig struct {
|
||||
// The id of the app profile to associate with all data operations sent from this client.
|
||||
// If unspecified, the default app profile for the instance will be used.
|
||||
AppProfile string
|
||||
}
|
||||
|
||||
// NewClient creates a new Client for a given project and instance.
|
||||
// The default ClientConfig will be used.
|
||||
func NewClient(ctx context.Context, project, instance string, opts ...option.ClientOption) (*Client, error) {
|
||||
return NewClientWithConfig(ctx, project, instance, ClientConfig{}, opts...)
|
||||
}
|
||||
|
||||
func NewClientWithConfig(ctx context.Context, project, instance string, config ClientConfig, opts ...option.ClientOption) (*Client, error) {
|
||||
o, err := btopt.DefaultClientOptions(prodAddr, Scope, clientUserAgent)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -66,10 +84,11 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
}
|
||||
return &Client{
|
||||
conn: conn,
|
||||
client: btpb.NewBigtableClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
conn: conn,
|
||||
client: btpb.NewBigtableClient(conn),
|
||||
project: project,
|
||||
instance: instance,
|
||||
appProfile: config.AppProfile,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -130,9 +149,16 @@ func (t *Table) ReadRows(ctx context.Context, arg RowSet, f func(Row) bool, opts
|
||||
|
||||
var prevRowKey string
|
||||
err := gax.Invoke(ctx, func(ctx context.Context) error {
|
||||
if !arg.valid() {
|
||||
// Empty row set, no need to make an API call.
|
||||
// NOTE: we must return early if arg == RowList{} because reading
|
||||
// an empty RowList from bigtable returns all rows from that table.
|
||||
return nil
|
||||
}
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Rows: arg.proto(),
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
AppProfileId: t.c.appProfile,
|
||||
Rows: arg.proto(),
|
||||
}
|
||||
for _, opt := range opts {
|
||||
opt.set(req)
|
||||
@ -292,10 +318,10 @@ func (r RowRange) String() string {
|
||||
|
||||
func (r RowRange) proto() *btpb.RowSet {
|
||||
rr := &btpb.RowRange{
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{[]byte(r.start)},
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte(r.start)},
|
||||
}
|
||||
if !r.Unbounded() {
|
||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{[]byte(r.limit)}
|
||||
rr.EndKey = &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte(r.limit)}
|
||||
}
|
||||
return &btpb.RowSet{RowRanges: []*btpb.RowRange{rr}}
|
||||
}
|
||||
@ -313,7 +339,7 @@ func (r RowRange) retainRowsAfter(lastRowKey string) RowSet {
|
||||
}
|
||||
|
||||
func (r RowRange) valid() bool {
|
||||
return r.start < r.limit
|
||||
return r.Unbounded() || r.start < r.limit
|
||||
}
|
||||
|
||||
// RowRangeList is a sequence of RowRanges representing the union of the ranges.
|
||||
@ -440,9 +466,10 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
|
||||
var callOptions []gax.CallOption
|
||||
if m.cond == nil {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
AppProfileId: t.c.appProfile,
|
||||
RowKey: []byte(row),
|
||||
Mutations: m.ops,
|
||||
}
|
||||
if mutationsAreRetryable(m.ops) {
|
||||
callOptions = retryOptions
|
||||
@ -461,13 +488,20 @@ func (t *Table) Apply(ctx context.Context, row string, m *Mutation, opts ...Appl
|
||||
|
||||
req := &btpb.CheckAndMutateRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
AppProfileId: t.c.appProfile,
|
||||
RowKey: []byte(row),
|
||||
PredicateFilter: m.cond.proto(),
|
||||
}
|
||||
if m.mtrue != nil {
|
||||
if m.mtrue.cond != nil {
|
||||
return errors.New("bigtable: conditional mutations cannot be nested")
|
||||
}
|
||||
req.TrueMutations = m.mtrue.ops
|
||||
}
|
||||
if m.mfalse != nil {
|
||||
if m.mfalse.cond != nil {
|
||||
return errors.New("bigtable: conditional mutations cannot be nested")
|
||||
}
|
||||
req.FalseMutations = m.mfalse.ops
|
||||
}
|
||||
if mutationsAreRetryable(req.TrueMutations) && mutationsAreRetryable(req.FalseMutations) {
|
||||
@ -531,7 +565,7 @@ func NewCondMutation(cond Filter, mtrue, mfalse *Mutation) *Mutation {
|
||||
// The timestamp will be truncated to millisecond granularity.
|
||||
// A timestamp of ServerTime means to use the server timestamp.
|
||||
func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimestampMicros: int64(ts.TruncateToMilliseconds()),
|
||||
@ -541,7 +575,7 @@ func (m *Mutation) Set(family, column string, ts Timestamp, value []byte) {
|
||||
|
||||
// DeleteCellsInColumn will delete all the cells whose columns are family:column.
|
||||
func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
}}})
|
||||
@ -552,7 +586,7 @@ func (m *Mutation) DeleteCellsInColumn(family, column string) {
|
||||
// If end is zero, it will be interpreted as infinity.
|
||||
// The timestamps will be truncated to millisecond granularity.
|
||||
func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timestamp) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{&btpb.Mutation_DeleteFromColumn{
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromColumn_{DeleteFromColumn: &btpb.Mutation_DeleteFromColumn{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
TimeRange: &btpb.TimestampRange{
|
||||
@ -564,14 +598,14 @@ func (m *Mutation) DeleteTimestampRange(family, column string, start, end Timest
|
||||
|
||||
// DeleteCellsInFamily will delete all the cells whose columns are family:*.
|
||||
func (m *Mutation) DeleteCellsInFamily(family string) {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{&btpb.Mutation_DeleteFromFamily{
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromFamily_{DeleteFromFamily: &btpb.Mutation_DeleteFromFamily{
|
||||
FamilyName: family,
|
||||
}}})
|
||||
}
|
||||
|
||||
// DeleteRow deletes the entire row.
|
||||
func (m *Mutation) DeleteRow() {
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{&btpb.Mutation_DeleteFromRow{}}})
|
||||
m.ops = append(m.ops, &btpb.Mutation{Mutation: &btpb.Mutation_DeleteFromRow_{DeleteFromRow: &btpb.Mutation_DeleteFromRow{}}})
|
||||
}
|
||||
|
||||
// entryErr is a container that combines an entry with the error that was returned for it.
|
||||
@ -670,8 +704,9 @@ func (t *Table) doApplyBulk(ctx context.Context, entryErrs []*entryErr, opts ...
|
||||
entries[i] = entryErr.Entry
|
||||
}
|
||||
req := &btpb.MutateRowsRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
Entries: entries,
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
AppProfileId: t.c.appProfile,
|
||||
Entries: entries,
|
||||
}
|
||||
stream, err := t.c.client.MutateRows(ctx, req)
|
||||
if err != nil {
|
||||
@ -729,9 +764,10 @@ func (ts Timestamp) TruncateToMilliseconds() Timestamp {
|
||||
func (t *Table) ApplyReadModifyWrite(ctx context.Context, row string, m *ReadModifyWrite) (Row, error) {
|
||||
ctx = mergeOutgoingMetadata(ctx, t.md)
|
||||
req := &btpb.ReadModifyWriteRowRequest{
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
TableName: t.c.fullTableName(t.table),
|
||||
AppProfileId: t.c.appProfile,
|
||||
RowKey: []byte(row),
|
||||
Rules: m.ops,
|
||||
}
|
||||
res, err := t.c.client.ReadModifyWriteRow(ctx, req)
|
||||
if err != nil {
|
||||
@ -768,7 +804,7 @@ func (m *ReadModifyWrite) AppendValue(family, column string, v []byte) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{v},
|
||||
Rule: &btpb.ReadModifyWriteRule_AppendValue{AppendValue: v},
|
||||
})
|
||||
}
|
||||
|
||||
@ -780,7 +816,7 @@ func (m *ReadModifyWrite) Increment(family, column string, delta int64) {
|
||||
m.ops = append(m.ops, &btpb.ReadModifyWriteRule{
|
||||
FamilyName: family,
|
||||
ColumnQualifier: []byte(column),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{delta},
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: delta},
|
||||
})
|
||||
}
|
||||
|
||||
|
130
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
130
vendor/cloud.google.com/go/bigtable/bigtable_test.go
generated
vendored
@ -27,6 +27,8 @@ import (
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestPrefix(t *testing.T) {
|
||||
@ -54,6 +56,29 @@ func TestPrefix(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestApplyErrors(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
table := &Table{
|
||||
c: &Client{
|
||||
project: "P",
|
||||
instance: "I",
|
||||
},
|
||||
table: "t",
|
||||
}
|
||||
f := ColumnFilter("C")
|
||||
m := NewMutation()
|
||||
m.DeleteRow()
|
||||
// Test nested conditional mutations.
|
||||
cm := NewCondMutation(f, NewCondMutation(f, m, nil), nil)
|
||||
if err := table.Apply(ctx, "x", cm); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
cm = NewCondMutation(f, nil, NewCondMutation(f, m, nil))
|
||||
if err := table.Apply(ctx, "x", cm); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestClientIntegration(t *testing.T) {
|
||||
start := time.Now()
|
||||
lastCheckpoint := start
|
||||
@ -70,7 +95,7 @@ func TestClientIntegration(t *testing.T) {
|
||||
|
||||
var timeout time.Duration
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
timeout = 10 * time.Minute
|
||||
t.Logf("Running test against production")
|
||||
} else {
|
||||
timeout = 1 * time.Minute
|
||||
@ -129,6 +154,12 @@ func TestClientIntegration(t *testing.T) {
|
||||
}
|
||||
checkpoint("inserted initial data")
|
||||
|
||||
// TODO(igorbernstein): re-enable this when ready
|
||||
//if err := adminClient.WaitForReplication(ctx, table); err != nil {
|
||||
// t.Errorf("Waiting for replication for table %q: %v", table, err)
|
||||
//}
|
||||
//checkpoint("waited for replication")
|
||||
|
||||
// Do a conditional mutation with a complex filter.
|
||||
mutTrue := NewMutation()
|
||||
mutTrue.Set("follows", "wmckinley", 0, []byte("1"))
|
||||
@ -915,6 +946,103 @@ func TestClientIntegration(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
type requestCountingInterceptor struct {
|
||||
grpc.ClientStream
|
||||
requestCallback func()
|
||||
}
|
||||
|
||||
func (i *requestCountingInterceptor) SendMsg(m interface{}) error {
|
||||
i.requestCallback()
|
||||
return i.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (i *requestCountingInterceptor) RecvMsg(m interface{}) error {
|
||||
return i.ClientStream.RecvMsg(m)
|
||||
}
|
||||
|
||||
func requestCallback(callback func()) func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
return func(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
|
||||
clientStream, err := streamer(ctx, desc, cc, method, opts...)
|
||||
return &requestCountingInterceptor{
|
||||
ClientStream: clientStream,
|
||||
requestCallback: callback,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
|
||||
// TestReadRowsInvalidRowSet verifies that the client doesn't send ReadRows() requests with invalid RowSets.
|
||||
func TestReadRowsInvalidRowSet(t *testing.T) {
|
||||
testEnv, err := NewEmulatedEnv(IntegrationTestConfig{})
|
||||
if err != nil {
|
||||
t.Fatalf("NewEmulatedEnv failed: %v", err)
|
||||
}
|
||||
var requestCount int
|
||||
incrementRequestCount := func() { requestCount++ }
|
||||
conn, err := grpc.Dial(testEnv.server.Addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)),
|
||||
grpc.WithStreamInterceptor(requestCallback(incrementRequestCount)),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatalf("grpc.Dial failed: %v", err)
|
||||
}
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 20*time.Second)
|
||||
defer cancel()
|
||||
adminClient, err := NewAdminClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient failed: %v", err)
|
||||
}
|
||||
defer adminClient.Close()
|
||||
if err := adminClient.CreateTable(ctx, testEnv.config.Table); err != nil {
|
||||
t.Fatalf("CreateTable(%v) failed: %v", testEnv.config.Table, err)
|
||||
}
|
||||
client, err := NewClient(ctx, testEnv.config.Project, testEnv.config.Instance, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient failed: %v", err)
|
||||
}
|
||||
defer client.Close()
|
||||
table := client.Open(testEnv.config.Table)
|
||||
tests := []struct {
|
||||
rr RowSet
|
||||
valid bool
|
||||
}{
|
||||
{
|
||||
rr: RowRange{},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
rr: RowRange{start: "b"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
rr: RowRange{start: "b", limit: "c"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
rr: RowRange{start: "b", limit: "a"},
|
||||
valid: false,
|
||||
},
|
||||
{
|
||||
rr: RowList{"a"},
|
||||
valid: true,
|
||||
},
|
||||
{
|
||||
rr: RowList{},
|
||||
valid: false,
|
||||
},
|
||||
}
|
||||
for _, test := range tests {
|
||||
requestCount = 0
|
||||
err = table.ReadRows(ctx, test.rr, func(r Row) bool { return true })
|
||||
if err != nil {
|
||||
t.Fatalf("ReadRows(%v) failed: %v", test.rr, err)
|
||||
}
|
||||
requestValid := requestCount != 0
|
||||
if requestValid != test.valid {
|
||||
t.Errorf("%s: got %v, want %v", test.rr, requestValid, test.valid)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func formatReadItem(ri ReadItem) string {
|
||||
// Use the column qualifier only to make the test data briefer.
|
||||
col := ri.Column[strings.Index(ri.Column, ":")+1:]
|
||||
|
2
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/bttest/example_test.go
generated
vendored
@ -28,7 +28,7 @@ import (
|
||||
|
||||
func ExampleNewServer() {
|
||||
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
srv, err := bttest.NewServer("localhost:0")
|
||||
|
||||
if err != nil {
|
||||
log.Fatalln(err)
|
||||
|
229
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
229
vendor/cloud.google.com/go/bigtable/bttest/inmem.go
generated
vendored
@ -19,7 +19,7 @@ Package bttest contains test helpers for working with the bigtable package.
|
||||
|
||||
To use a Server, create it, and then connect to it with no security:
|
||||
(The project/instance values are ignored.)
|
||||
srv, err := bttest.NewServer("127.0.0.1:0")
|
||||
srv, err := bttest.NewServer("localhost:0")
|
||||
...
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
...
|
||||
@ -45,12 +45,14 @@ import (
|
||||
|
||||
emptypb "github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/btree"
|
||||
"golang.org/x/net/context"
|
||||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
statpb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// Server is an in-memory Cloud Bigtable fake.
|
||||
@ -162,7 +164,7 @@ func (s *server) DeleteTable(ctx context.Context, req *btapb.DeleteTableRequest)
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
if _, ok := s.tables[req.Name]; !ok {
|
||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
}
|
||||
delete(s.tables, req.Name)
|
||||
return &emptypb.Empty{}, nil
|
||||
@ -216,6 +218,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
|
||||
return &btapb.Table{
|
||||
Name: tblName,
|
||||
ColumnFamilies: toColumnFamilies(tbl.families),
|
||||
Granularity: btapb.Table_TimestampGranularity(btapb.Table_MILLIS),
|
||||
}, nil
|
||||
}
|
||||
|
||||
@ -224,65 +227,103 @@ func (s *server) DropRowRange(ctx context.Context, req *btapb.DropRowRangeReques
|
||||
defer s.mu.Unlock()
|
||||
tbl, ok := s.tables[req.Name]
|
||||
if !ok {
|
||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
}
|
||||
|
||||
if req.GetDeleteAllDataFromTable() {
|
||||
tbl.rows = nil
|
||||
tbl.rowIndex = make(map[string]*row)
|
||||
tbl.rows = btree.New(btreeDegree)
|
||||
} else {
|
||||
// Delete rows by prefix
|
||||
// Delete rows by prefix.
|
||||
prefixBytes := req.GetRowKeyPrefix()
|
||||
if prefixBytes == nil {
|
||||
return nil, fmt.Errorf("missing row key prefix")
|
||||
}
|
||||
prefix := string(prefixBytes)
|
||||
|
||||
start := -1
|
||||
end := 0
|
||||
for i, row := range tbl.rows {
|
||||
match := strings.HasPrefix(row.key, prefix)
|
||||
if match {
|
||||
// Delete the mapping. Row will be deleted from sorted range below.
|
||||
delete(tbl.rowIndex, row.key)
|
||||
// The BTree does not specify what happens if rows are deleted during
|
||||
// iteration, and it provides no "delete range" method.
|
||||
// So we collect the rows first, then delete them one by one.
|
||||
var rowsToDelete []*row
|
||||
tbl.rows.AscendGreaterOrEqual(btreeKey(prefix), func(i btree.Item) bool {
|
||||
r := i.(*row)
|
||||
if strings.HasPrefix(r.key, prefix) {
|
||||
rowsToDelete = append(rowsToDelete, r)
|
||||
return true
|
||||
} else {
|
||||
return false // stop iteration
|
||||
}
|
||||
if match && start == -1 {
|
||||
start = i
|
||||
} else if !match && start != -1 {
|
||||
break
|
||||
}
|
||||
end++
|
||||
}
|
||||
if start != -1 {
|
||||
// Delete the range, using method from https://github.com/golang/go/wiki/SliceTricks
|
||||
copy(tbl.rows[start:], tbl.rows[end:])
|
||||
for k, n := len(tbl.rows)-end+start, len(tbl.rows); k < n; k++ {
|
||||
tbl.rows[k] = nil
|
||||
}
|
||||
tbl.rows = tbl.rows[:len(tbl.rows)-end+start]
|
||||
})
|
||||
for _, r := range rowsToDelete {
|
||||
tbl.rows.Delete(r)
|
||||
}
|
||||
}
|
||||
|
||||
return &emptypb.Empty{}, nil
|
||||
}
|
||||
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (s *server) GenerateConsistencyToken(ctx context.Context, req *btapb.GenerateConsistencyTokenRequest) (*btapb.GenerateConsistencyTokenResponse, error) {
|
||||
// Check that the table exists.
|
||||
_, ok := s.tables[req.Name]
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
}
|
||||
|
||||
return &btapb.GenerateConsistencyTokenResponse{
|
||||
ConsistencyToken: "TokenFor-" + req.Name,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// This is a private alpha release of Cloud Bigtable replication. This feature
|
||||
// is not currently available to most Cloud Bigtable customers. This feature
|
||||
// might be changed in backward-incompatible ways and is not recommended for
|
||||
// production use. It is not subject to any SLA or deprecation policy.
|
||||
func (s *server) CheckConsistency(ctx context.Context, req *btapb.CheckConsistencyRequest) (*btapb.CheckConsistencyResponse, error) {
|
||||
// Check that the table exists.
|
||||
_, ok := s.tables[req.Name]
|
||||
if !ok {
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.Name)
|
||||
}
|
||||
|
||||
// Check this is the right token.
|
||||
if req.ConsistencyToken != "TokenFor-"+req.Name {
|
||||
return nil, status.Errorf(codes.InvalidArgument, "token %q not valid", req.ConsistencyToken)
|
||||
}
|
||||
|
||||
// Single cluster instances are always consistent.
|
||||
return &btapb.CheckConsistencyResponse{
|
||||
Consistent: true,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRowsServer) error {
|
||||
s.mu.Lock()
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
|
||||
// Rows to read can be specified by a set of row keys and/or a set of row ranges.
|
||||
// Output is a stream of sorted, de-duped rows.
|
||||
tbl.mu.RLock()
|
||||
rowSet := make(map[string]*row)
|
||||
|
||||
addRow := func(i btree.Item) bool {
|
||||
r := i.(*row)
|
||||
rowSet[r.key] = r
|
||||
return true
|
||||
}
|
||||
|
||||
if req.Rows != nil {
|
||||
// Add the explicitly given keys
|
||||
for _, key := range req.Rows.RowKeys {
|
||||
start := string(key)
|
||||
addRows(start, start+"\x00", tbl, rowSet)
|
||||
k := string(key)
|
||||
if i := tbl.rows.Get(btreeKey(k)); i != nil {
|
||||
addRow(i)
|
||||
}
|
||||
}
|
||||
|
||||
// Add keys from row ranges
|
||||
@ -300,12 +341,20 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
|
||||
case *btpb.RowRange_EndKeyOpen:
|
||||
end = string(ek.EndKeyOpen)
|
||||
}
|
||||
|
||||
addRows(start, end, tbl, rowSet)
|
||||
switch {
|
||||
case start == "" && end == "":
|
||||
tbl.rows.Ascend(addRow) // all rows
|
||||
case start == "":
|
||||
tbl.rows.AscendLessThan(btreeKey(end), addRow)
|
||||
case end == "":
|
||||
tbl.rows.AscendGreaterOrEqual(btreeKey(start), addRow)
|
||||
default:
|
||||
tbl.rows.AscendRange(btreeKey(start), btreeKey(end), addRow)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Read all rows
|
||||
addRows("", "", tbl, rowSet)
|
||||
tbl.rows.Ascend(addRow)
|
||||
}
|
||||
tbl.mu.RUnlock()
|
||||
|
||||
@ -332,21 +381,6 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
|
||||
return nil
|
||||
}
|
||||
|
||||
func addRows(start, end string, tbl *table, rowSet map[string]*row) {
|
||||
si, ei := 0, len(tbl.rows) // half-open interval
|
||||
if start != "" {
|
||||
si = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= start })
|
||||
}
|
||||
if end != "" {
|
||||
ei = sort.Search(len(tbl.rows), func(i int) bool { return tbl.rows[i].key >= end })
|
||||
}
|
||||
if si < ei {
|
||||
for _, row := range tbl.rows[si:ei] {
|
||||
rowSet[row.key] = row
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// streamRow filters the given row and sends it via the given stream.
|
||||
// Returns true if at least one cell matched the filter and was streamed, false otherwise.
|
||||
func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (bool, error) {
|
||||
@ -382,7 +416,7 @@ func streamRow(stream btpb.Bigtable_ReadRowsServer, r *row, f *btpb.RowFilter) (
|
||||
// We can't have a cell with just COMMIT set, which would imply a new empty cell.
|
||||
// So modify the last cell to have the COMMIT flag set.
|
||||
if len(rrr.Chunks) > 0 {
|
||||
rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{true}
|
||||
rrr.Chunks[len(rrr.Chunks)-1].RowStatus = &btpb.ReadRowsResponse_CellChunk_CommitRow{CommitRow: true}
|
||||
}
|
||||
|
||||
return true, stream.Send(rrr)
|
||||
@ -396,6 +430,10 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
||||
}
|
||||
// Handle filters that apply beyond just including/excluding cells.
|
||||
switch f := f.Filter.(type) {
|
||||
case *btpb.RowFilter_BlockAllFilter:
|
||||
return !f.BlockAllFilter
|
||||
case *btpb.RowFilter_PassAllFilter:
|
||||
return f.PassAllFilter
|
||||
case *btpb.RowFilter_Chain_:
|
||||
for _, sub := range f.Chain.Filters {
|
||||
if !filterRow(sub, r) {
|
||||
@ -623,12 +661,11 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
fs := tbl.columnFamilies()
|
||||
r, _ := tbl.mutableRow(string(req.RowKey))
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
|
||||
defer r.mu.Unlock()
|
||||
if err := applyMutations(tbl, r, req.Mutations, fs); err != nil {
|
||||
return nil, err
|
||||
@ -641,15 +678,14 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
|
||||
|
||||
fs := tbl.columnFamilies()
|
||||
|
||||
defer tbl.resortRowIndex()
|
||||
for i, entry := range req.Entries {
|
||||
r, _ := tbl.mutableRow(string(entry.RowKey))
|
||||
r := tbl.mutableRow(string(entry.RowKey))
|
||||
r.mu.Lock()
|
||||
code, msg := int32(codes.OK), ""
|
||||
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
|
||||
@ -671,13 +707,13 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
res := &btpb.CheckAndMutateRowResponse{}
|
||||
|
||||
fs := tbl.columnFamilies()
|
||||
|
||||
r, _ := tbl.mutableRow(string(req.RowKey))
|
||||
r := tbl.mutableRow(string(req.RowKey))
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
@ -699,7 +735,6 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
|
||||
muts = req.TrueMutations
|
||||
}
|
||||
|
||||
defer tbl.resortRowIndex()
|
||||
if err := applyMutations(tbl, r, muts, fs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -824,18 +859,15 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return nil, status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
|
||||
|
||||
fs := tbl.columnFamilies()
|
||||
|
||||
rowKey := string(req.RowKey)
|
||||
r, isNewRow := tbl.mutableRow(rowKey)
|
||||
r := tbl.mutableRow(rowKey)
|
||||
// This must be done before the row lock, acquired below, is released.
|
||||
if isNewRow {
|
||||
defer tbl.resortRowIndex()
|
||||
}
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
// Assume all mutations apply to the most recent version of the cell.
|
||||
@ -920,7 +952,7 @@ func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigta
|
||||
tbl, ok := s.tables[req.TableName]
|
||||
s.mu.Unlock()
|
||||
if !ok {
|
||||
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
return status.Errorf(codes.NotFound, "table %q not found", req.TableName)
|
||||
}
|
||||
|
||||
tbl.mu.RLock()
|
||||
@ -929,20 +961,25 @@ func (s *server) SampleRowKeys(req *btpb.SampleRowKeysRequest, stream btpb.Bigta
|
||||
// The return value of SampleRowKeys is very loosely defined. Return at least the
|
||||
// final row key in the table and choose other row keys randomly.
|
||||
var offset int64
|
||||
for i, row := range tbl.rows {
|
||||
if i == len(tbl.rows)-1 || rand.Int31n(100) == 0 {
|
||||
var err error
|
||||
i := 0
|
||||
tbl.rows.Ascend(func(it btree.Item) bool {
|
||||
row := it.(*row)
|
||||
if i == tbl.rows.Len()-1 || rand.Int31n(100) == 0 {
|
||||
resp := &btpb.SampleRowKeysResponse{
|
||||
RowKey: []byte(row.key),
|
||||
OffsetBytes: offset,
|
||||
}
|
||||
err := stream.Send(resp)
|
||||
err = stream.Send(resp)
|
||||
if err != nil {
|
||||
return err
|
||||
return false
|
||||
}
|
||||
}
|
||||
offset += int64(row.size())
|
||||
}
|
||||
return nil
|
||||
i++
|
||||
return true
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
// needGC is invoked whenever the server needs gcloop running.
|
||||
@ -987,10 +1024,11 @@ type table struct {
|
||||
mu sync.RWMutex
|
||||
counter uint64 // increment by 1 when a new family is created
|
||||
families map[string]*columnFamily // keyed by plain family name
|
||||
rows []*row // sorted by row key
|
||||
rowIndex map[string]*row // indexed by row key
|
||||
rows *btree.BTree // indexed by row key
|
||||
}
|
||||
|
||||
const btreeDegree = 16
|
||||
|
||||
func newTable(ctr *btapb.CreateTableRequest) *table {
|
||||
fams := make(map[string]*columnFamily)
|
||||
c := uint64(0)
|
||||
@ -1007,7 +1045,7 @@ func newTable(ctr *btapb.CreateTableRequest) *table {
|
||||
return &table{
|
||||
families: fams,
|
||||
counter: c,
|
||||
rowIndex: make(map[string]*row),
|
||||
rows: btree.New(btreeDegree),
|
||||
}
|
||||
}
|
||||
|
||||
@ -1026,31 +1064,26 @@ func (t *table) columnFamilies() map[string]*columnFamily {
|
||||
return cp
|
||||
}
|
||||
|
||||
func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
|
||||
func (t *table) mutableRow(key string) *row {
|
||||
bkey := btreeKey(key)
|
||||
// Try fast path first.
|
||||
t.mu.RLock()
|
||||
r := t.rowIndex[row]
|
||||
i := t.rows.Get(bkey)
|
||||
t.mu.RUnlock()
|
||||
if r != nil {
|
||||
return r, false
|
||||
if i != nil {
|
||||
return i.(*row)
|
||||
}
|
||||
|
||||
// We probably need to create the row.
|
||||
t.mu.Lock()
|
||||
r = t.rowIndex[row]
|
||||
if r == nil {
|
||||
r = newRow(row)
|
||||
t.rowIndex[row] = r
|
||||
t.rows = append(t.rows, r)
|
||||
defer t.mu.Unlock()
|
||||
i = t.rows.Get(bkey)
|
||||
if i != nil {
|
||||
return i.(*row)
|
||||
}
|
||||
t.mu.Unlock()
|
||||
return r, true
|
||||
}
|
||||
|
||||
func (t *table) resortRowIndex() {
|
||||
t.mu.Lock()
|
||||
sort.Sort(byRowKey(t.rows))
|
||||
t.mu.Unlock()
|
||||
r := newRow(key)
|
||||
t.rows.ReplaceOrInsert(r)
|
||||
return r
|
||||
}
|
||||
|
||||
func (t *table) gc() {
|
||||
@ -1069,11 +1102,13 @@ func (t *table) gc() {
|
||||
return
|
||||
}
|
||||
|
||||
for _, r := range t.rows {
|
||||
t.rows.Ascend(func(i btree.Item) bool {
|
||||
r := i.(*row)
|
||||
r.mu.Lock()
|
||||
r.gc(rules)
|
||||
r.mu.Unlock()
|
||||
}
|
||||
return true
|
||||
})
|
||||
}
|
||||
|
||||
type byRowKey []*row
|
||||
@ -1177,6 +1212,14 @@ func (r *row) size() int {
|
||||
return size
|
||||
}
|
||||
|
||||
// Less implements btree.Less.
|
||||
func (r *row) Less(i btree.Item) bool {
|
||||
return r.key < i.(*row).key
|
||||
}
|
||||
|
||||
// btreeKey returns a row for use as a key into the BTree.
|
||||
func btreeKey(s string) *row { return &row{key: s} }
|
||||
|
||||
func (r *row) String() string {
|
||||
return r.key
|
||||
}
|
||||
|
215
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
215
vendor/cloud.google.com/go/bigtable/bttest/inmem_test.go
generated
vendored
@ -46,7 +46,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
_, err := s.ModifyColumnFamilies(ctx, req)
|
||||
@ -57,8 +57,8 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
Name: name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf",
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{&btapb.ColumnFamily{
|
||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Update{Update: &btapb.ColumnFamily{
|
||||
GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
@ -70,7 +70,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
var ts int64
|
||||
ms := func() []*btpb.Mutation {
|
||||
return []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte(`col`),
|
||||
TimestampMicros: atomic.AddInt64(&ts, 1000),
|
||||
@ -85,7 +85,7 @@ func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
@ -139,8 +139,8 @@ func TestCreateTableWithFamily(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{123}}},
|
||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{456}}},
|
||||
"cf1": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 123}}},
|
||||
"cf2": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 456}}},
|
||||
},
|
||||
}
|
||||
cTbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
@ -184,7 +184,7 @@ func TestSampleRowKeys(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
@ -200,7 +200,7 @@ func TestSampleRowKeys(t *testing.T) {
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-" + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
@ -235,7 +235,7 @@ func TestDropRowRange(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
@ -255,7 +255,7 @@ func TestDropRowRange(t *testing.T) {
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte(prefix + strconv.Itoa(i)),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
@ -271,51 +271,51 @@ func TestDropRowRange(t *testing.T) {
|
||||
}
|
||||
|
||||
doWrite()
|
||||
tblSize := len(tbl.rows)
|
||||
tblSize := tbl.rows.Len()
|
||||
req := &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("AAA")},
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("AAA")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping first range: %v", err)
|
||||
}
|
||||
got, want := len(tbl.rows), tblSize-count
|
||||
got, want := tbl.rows.Len(), tblSize-count
|
||||
if got != want {
|
||||
t.Errorf("Row count after first drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("DDD")},
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("DDD")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping second range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
got, want = tbl.rows.Len(), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after second drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("XXX")},
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("XXX")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping invalid range: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), tblSize-(2*count)
|
||||
got, want = tbl.rows.Len(), tblSize-(2*count)
|
||||
if got != want {
|
||||
t.Errorf("Row count after invalid drop: got %d (%v), want %d", got, tbl.rows, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
got, want = tbl.rows.Len(), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
@ -326,31 +326,31 @@ func TestDropRowRange(t *testing.T) {
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{true},
|
||||
Target: &btapb.DropRowRangeRequest_DeleteAllDataFromTable{DeleteAllDataFromTable: true},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping all data: %v", err)
|
||||
}
|
||||
got, want = len(tbl.rows), 0
|
||||
got, want = tbl.rows.Len(), 0
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop all: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
got, want = tbl.rows.Len(), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after rewrite: got %d, want %d", got, want)
|
||||
}
|
||||
|
||||
req = &btapb.DropRowRangeRequest{
|
||||
Name: tblInfo.Name,
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{[]byte("BBB")},
|
||||
Target: &btapb.DropRowRangeRequest_RowKeyPrefix{RowKeyPrefix: []byte("BBB")},
|
||||
}
|
||||
if _, err = s.DropRowRange(ctx, req); err != nil {
|
||||
t.Fatalf("Dropping range: %v", err)
|
||||
}
|
||||
doWrite()
|
||||
got, want = len(tbl.rows), len(prefixes)
|
||||
got, want = tbl.rows.Len(), len(prefixes)
|
||||
if got != want {
|
||||
t.Errorf("Row count after drop range: got %d, want %d", got, want)
|
||||
}
|
||||
@ -366,6 +366,56 @@ func (s *MockReadRowsServer) Send(resp *btpb.ReadRowsResponse) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestReadRows(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
mreq := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf0",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 1000,
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, mreq); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
|
||||
for _, rowset := range []*btpb.RowSet{
|
||||
{RowKeys: [][]byte{[]byte("row")}},
|
||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")}}}},
|
||||
{RowRanges: []*btpb.RowRange{{StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("r")}}}},
|
||||
{RowRanges: []*btpb.RowRange{{
|
||||
StartKey: &btpb.RowRange_StartKeyClosed{StartKeyClosed: []byte("")},
|
||||
EndKey: &btpb.RowRange_EndKeyOpen{EndKeyOpen: []byte("s")},
|
||||
}}},
|
||||
} {
|
||||
mock := &MockReadRowsServer{}
|
||||
req := &btpb.ReadRowsRequest{TableName: tblInfo.Name, Rows: rowset}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Fatalf("ReadRows error: %v", err)
|
||||
}
|
||||
if got, want := len(mock.responses), 1; got != want {
|
||||
t.Errorf("%+v: response count: got %d, want %d", rowset, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadRowsOrder(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
@ -373,7 +423,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
@ -386,7 +436,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
Name: tblInfo.Name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf" + strconv.Itoa(i),
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{Create: &btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
}
|
||||
@ -404,7 +454,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf" + strconv.Itoa(fc),
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||
TimestampMicros: int64((tc + 1) * 1000),
|
||||
@ -462,16 +512,17 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
|
||||
// Read with interleave filter
|
||||
inter := &btpb.RowFilter_Interleave{}
|
||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{"1"}}
|
||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte("2")}}
|
||||
fnr := &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: "1"}}
|
||||
cqr := &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte("2")}}
|
||||
inter.Filters = append(inter.Filters, fnr, cqr)
|
||||
req = &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
Filter: &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
Filter: &btpb.RowFilter_Interleave_{Interleave: inter},
|
||||
},
|
||||
}
|
||||
|
||||
mock = &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
@ -492,7 +543,7 @@ func TestReadRowsOrder(t *testing.T) {
|
||||
Rules: []*btpb.ReadModifyWriteRule{{
|
||||
FamilyName: "cf3",
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(i)),
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{1},
|
||||
Rule: &btpb.ReadModifyWriteRule_IncrementAmount{IncrementAmount: 1},
|
||||
}},
|
||||
}
|
||||
}
|
||||
@ -523,7 +574,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{MaxNumVersions: 1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
@ -537,7 +588,7 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-present"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
Mutation: &btpb.Mutation_SetCell_{SetCell: &btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
@ -569,3 +620,99 @@ func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
// helper function to populate table data
|
||||
func populateTable(ctx context.Context, s *server) (*btapb.Table, error) {
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf0": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tblInfo, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
count := 3
|
||||
mcf := func(i int) *btapb.ModifyColumnFamiliesRequest {
|
||||
return &btapb.ModifyColumnFamiliesRequest{
|
||||
Name: tblInfo.Name,
|
||||
Modifications: []*btapb.ModifyColumnFamiliesRequest_Modification{{
|
||||
Id: "cf" + strconv.Itoa(i),
|
||||
Mod: &btapb.ModifyColumnFamiliesRequest_Modification_Create{&btapb.ColumnFamily{}},
|
||||
}},
|
||||
}
|
||||
}
|
||||
for i := 1; i <= count; i++ {
|
||||
_, err = s.ModifyColumnFamilies(ctx, mcf(i))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
// Populate the table
|
||||
for fc := 0; fc < count; fc++ {
|
||||
for cc := count; cc > 0; cc-- {
|
||||
for tc := 0; tc < count; tc++ {
|
||||
req := &btpb.MutateRowRequest{
|
||||
TableName: tblInfo.Name,
|
||||
RowKey: []byte("row"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf" + strconv.Itoa(fc),
|
||||
ColumnQualifier: []byte("col" + strconv.Itoa(cc)),
|
||||
TimestampMicros: int64((tc + 1) * 1000),
|
||||
Value: []byte{},
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return tblInfo, nil
|
||||
}
|
||||
|
||||
func TestFilters(t *testing.T) {
|
||||
tests := []struct {
|
||||
in *btpb.RowFilter
|
||||
out int
|
||||
}{
|
||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{true}}, out: 0},
|
||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_BlockAllFilter{false}}, out: 1},
|
||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{true}}, out: 1},
|
||||
{in: &btpb.RowFilter{Filter: &btpb.RowFilter_PassAllFilter{false}}, out: 0},
|
||||
}
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
|
||||
tblInfo, err := populateTable(ctx, s)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
req := &btpb.ReadRowsRequest{
|
||||
TableName: tblInfo.Name,
|
||||
Rows: &btpb.RowSet{RowKeys: [][]byte{[]byte("row")}},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
req.Filter = tc.in
|
||||
|
||||
mock := &MockReadRowsServer{}
|
||||
if err = s.ReadRows(req, mock); err != nil {
|
||||
t.Errorf("ReadRows error: %v", err)
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mock.responses) != tc.out {
|
||||
t.Errorf("Response count: got %d, want %d", len(mock.responses), tc.out)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
564
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
564
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbt.go
generated
vendored
@ -34,9 +34,12 @@ import (
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"encoding/csv"
|
||||
|
||||
"cloud.google.com/go/bigtable"
|
||||
"cloud.google.com/go/bigtable/internal/cbtconfig"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
@ -64,7 +67,7 @@ func getCredentialOpts(opts []option.ClientOption) []option.ClientOption {
|
||||
return opts
|
||||
}
|
||||
|
||||
func getClient() *bigtable.Client {
|
||||
func getClient(clientConf bigtable.ClientConfig) *bigtable.Client {
|
||||
if client == nil {
|
||||
var opts []option.ClientOption
|
||||
if ep := config.DataEndpoint; ep != "" {
|
||||
@ -72,7 +75,7 @@ func getClient() *bigtable.Client {
|
||||
}
|
||||
opts = getCredentialOpts(opts)
|
||||
var err error
|
||||
client, err = bigtable.NewClient(context.Background(), config.Project, config.Instance, opts...)
|
||||
client, err = bigtable.NewClientWithConfig(context.Background(), config.Project, config.Instance, clientConf, opts...)
|
||||
if err != nil {
|
||||
log.Fatalf("Making bigtable.Client: %v", err)
|
||||
}
|
||||
@ -170,10 +173,15 @@ func init() {
|
||||
}
|
||||
tw.Flush()
|
||||
buf.WriteString(configHelp)
|
||||
buf.WriteString("\ncbt ` + version + ` ` + revision + ` ` + revisionDate + `")
|
||||
cmdSummary = buf.String()
|
||||
}
|
||||
|
||||
var configHelp = `
|
||||
Alpha features are not currently available to most Cloud Bigtable customers. The
|
||||
features might be changed in backward-incompatible ways and are not recommended
|
||||
for production use. They are not subject to any SLA or deprecation policy.
|
||||
|
||||
For convenience, values of the -project, -instance, -creds,
|
||||
-admin-endpoint and -data-endpoint flags may be specified in
|
||||
` + cbtconfig.Filename() + ` in this format:
|
||||
@ -183,8 +191,6 @@ For convenience, values of the -project, -instance, -creds,
|
||||
admin-endpoint = hostname:port
|
||||
data-endpoint = hostname:port
|
||||
All values are optional, and all will be overridden by flags.
|
||||
|
||||
cbt ` + version + ` ` + revision + ` ` + revisionDate + `
|
||||
`
|
||||
|
||||
var commands = []struct {
|
||||
@ -200,6 +206,30 @@ var commands = []struct {
|
||||
Usage: "cbt count <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createinstance",
|
||||
Desc: "Create an instance with an initial cluster",
|
||||
do: doCreateInstance,
|
||||
Usage: "cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>\n" +
|
||||
" instance-id Permanent, unique id for the instance\n" +
|
||||
" display-name Description of the instance\n" +
|
||||
" cluster-id Permanent, unique id for the cluster in the instance\n" +
|
||||
" zone The zone in which to create the cluster\n" +
|
||||
" num-nodes The number of nodes to create\n" +
|
||||
" storage-type SSD or HDD\n",
|
||||
Required: cbtconfig.ProjectRequired,
|
||||
},
|
||||
{
|
||||
Name: "createcluster",
|
||||
Desc: "Create a cluster in the configured instance (replication alpha)",
|
||||
do: doCreateCluster,
|
||||
Usage: "cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>\n" +
|
||||
" cluster-id Permanent, unique id for the cluster in the instance\n" +
|
||||
" zone The zone in which to create the cluster\n" +
|
||||
" num-nodes The number of nodes to create\n" +
|
||||
" storage-type SSD or HDD\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createfamily",
|
||||
Desc: "Create a column family",
|
||||
@ -211,16 +241,41 @@ var commands = []struct {
|
||||
Name: "createtable",
|
||||
Desc: "Create a table",
|
||||
do: doCreateTable,
|
||||
Usage: "cbt createtable <table> [initial_splits...]\n" +
|
||||
" initial_splits=row A row key to be used to initially split the table " +
|
||||
"into multiple tablets. Can be repeated to create multiple splits.",
|
||||
Usage: "cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]\n" +
|
||||
" families: Column families and their associated GC policies. See \"setgcpolicy\".\n" +
|
||||
" Example: families=family1:maxage=1w,family2:maxversions=1\n" +
|
||||
" splits: Row key to be used to initially split the table",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletecolumn",
|
||||
Desc: "Delete all cells in a column",
|
||||
do: doDeleteColumn,
|
||||
Usage: "cbt deletecolumn <table> <row> <family> <column>",
|
||||
Name: "updatecluster",
|
||||
Desc: "Update a cluster in the configured instance",
|
||||
do: doUpdateCluster,
|
||||
Usage: "cbt updatecluster <cluster-id> [num-nodes=num-nodes]\n" +
|
||||
" cluster-id Permanent, unique id for the cluster in the instance\n" +
|
||||
" num-nodes The number of nodes to update to",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deleteinstance",
|
||||
Desc: "Deletes an instance",
|
||||
do: doDeleteInstance,
|
||||
Usage: "cbt deleteinstance <instance>",
|
||||
Required: cbtconfig.ProjectRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletecluster",
|
||||
Desc: "Deletes a cluster from the configured instance (replication alpha)",
|
||||
do: doDeleteCluster,
|
||||
Usage: "cbt deletecluster <cluster>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletecolumn",
|
||||
Desc: "Delete all cells in a column",
|
||||
do: doDeleteColumn,
|
||||
Usage: "cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
@ -231,10 +286,11 @@ var commands = []struct {
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deleterow",
|
||||
Desc: "Delete a row",
|
||||
do: doDeleteRow,
|
||||
Usage: "cbt deleterow <table> <row>",
|
||||
Name: "deleterow",
|
||||
Desc: "Delete a row",
|
||||
do: doDeleteRow,
|
||||
Usage: "cbt deleterow <table> <row> [app-profile=<app profile id>]\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
@ -266,10 +322,18 @@ var commands = []struct {
|
||||
Required: cbtconfig.ProjectRequired,
|
||||
},
|
||||
{
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row>",
|
||||
Name: "listclusters",
|
||||
Desc: "List instances in an instance",
|
||||
do: doListClusters,
|
||||
Usage: "cbt listclusters",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "lookup",
|
||||
Desc: "Read from a single row",
|
||||
do: doLookup,
|
||||
Usage: "cbt lookup <table> <row> [app-profile=<app profile id>]\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
@ -292,19 +356,21 @@ var commands = []struct {
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
|
||||
" [regex=<regex>] [count=<n>]\n" +
|
||||
" [regex=<regex>] [count=<n>] [app-profile=<app profile id>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" end=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n" +
|
||||
" regex=<regex> Read rows with keys matching this regex\n" +
|
||||
" count=<n> Read only this many rows\n",
|
||||
" count=<n> Read only this many rows\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "set",
|
||||
Desc: "Set value of a cell",
|
||||
do: doSet,
|
||||
Usage: "cbt set <table> <row> family:column=val[@ts] ...\n" +
|
||||
Usage: "cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...\n" +
|
||||
" app-profile=<app profile id> The app profile id to use for the request (replication alpha)\n" +
|
||||
" family:column=val[@ts] may be repeated to set multiple cells.\n" +
|
||||
"\n" +
|
||||
" ts is an optional integer timestamp.\n" +
|
||||
@ -322,6 +388,53 @@ var commands = []struct {
|
||||
" maxversions=<n> Maximum number of versions to preserve",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "waitforreplication",
|
||||
Desc: "Blocks until all the completed writes have been replicated to all the clusters (replication alpha)",
|
||||
do: doWaitForReplicaiton,
|
||||
Usage: "cbt waitforreplication <table>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createtablefromsnapshot",
|
||||
Desc: "Create a table from a snapshot (snapshots alpha)",
|
||||
do: doCreateTableFromSnapshot,
|
||||
Usage: "cbt createtablefromsnapshot <table> <cluster> <snapshot>\n" +
|
||||
" table The name of the table to create\n" +
|
||||
" cluster The cluster where the snapshot is located\n" +
|
||||
" snapshot The snapshot to restore",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "createsnapshot",
|
||||
Desc: "Create a snapshot from a source table (snapshots alpha)",
|
||||
do: doSnapshotTable,
|
||||
Usage: "cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]\n" +
|
||||
"\n" +
|
||||
` [ttl=<d>] Lifespan of the snapshot (e.g. "1h", "4d")` + "\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "listsnapshots",
|
||||
Desc: "List snapshots in a cluster (snapshots alpha)",
|
||||
do: doListSnapshots,
|
||||
Usage: "cbt listsnapshots [<cluster>]",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "getsnapshot",
|
||||
Desc: "Get snapshot info (snapshots alpha)",
|
||||
do: doGetSnapshot,
|
||||
Usage: "cbt getsnapshot <cluster> <snapshot>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletesnapshot",
|
||||
Desc: "Delete snapshot in a cluster (snapshots alpha)",
|
||||
do: doDeleteSnapshot,
|
||||
Usage: "cbt deletesnapshot <cluster> <snapshot>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "version",
|
||||
Desc: "Print the current cbt version",
|
||||
@ -335,7 +448,7 @@ func doCount(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt count <table>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
tbl := getClient(bigtable.ClientConfig{}).Open(args[0])
|
||||
|
||||
n := 0
|
||||
err := tbl.ReadRows(ctx, bigtable.InfiniteRange(""), func(_ bigtable.Row) bool {
|
||||
@ -348,6 +461,51 @@ func doCount(ctx context.Context, args ...string) {
|
||||
fmt.Println(n)
|
||||
}
|
||||
|
||||
func doCreateTable(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatal("usage: cbt createtable <table> [families=family[:gcpolicy],...] [splits=split,...]")
|
||||
}
|
||||
|
||||
tblConf := bigtable.TableConf{TableID: args[0]}
|
||||
for _, arg := range args[1:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
chunks, err := csv.NewReader(strings.NewReader(val)).Read()
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid families arg format: %v", err)
|
||||
}
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "families":
|
||||
tblConf.Families = make(map[string]bigtable.GCPolicy)
|
||||
for _, family := range chunks {
|
||||
famPolicy := strings.Split(family, ":")
|
||||
var gcPolicy bigtable.GCPolicy
|
||||
if len(famPolicy) < 2 {
|
||||
gcPolicy = bigtable.MaxVersionsPolicy(1)
|
||||
log.Printf("Using default GC Policy of %v for family %v", gcPolicy, family)
|
||||
} else {
|
||||
gcPolicy, err = parseGCPolicy(famPolicy[1])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
tblConf.Families[famPolicy[0]] = gcPolicy
|
||||
}
|
||||
case "splits":
|
||||
tblConf.SplitKeys = chunks
|
||||
}
|
||||
}
|
||||
|
||||
if err := getAdminClient().CreateTableFromConf(ctx, &tblConf); err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt createfamily <table> <family>")
|
||||
@ -358,27 +516,131 @@ func doCreateFamily(ctx context.Context, args ...string) {
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateTable(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatal("usage: cbt createtable <table> [initial_splits...]")
|
||||
}
|
||||
var err error
|
||||
if len(args) > 1 {
|
||||
splits := args[1:]
|
||||
err = getAdminClient().CreatePresplitTable(ctx, args[0], splits)
|
||||
} else {
|
||||
err = getAdminClient().CreateTable(ctx, args[0])
|
||||
func doCreateInstance(ctx context.Context, args ...string) {
|
||||
if len(args) < 6 {
|
||||
log.Fatal("cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>")
|
||||
}
|
||||
|
||||
numNodes, err := strconv.ParseInt(args[4], 0, 32)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
log.Fatalf("Bad num-nodes %q: %v", args[4], err)
|
||||
}
|
||||
|
||||
sType, err := parseStorageType(args[5])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
ic := bigtable.InstanceWithClustersConfig{
|
||||
InstanceID: args[0],
|
||||
DisplayName: args[1],
|
||||
Clusters: []bigtable.ClusterConfig{{
|
||||
ClusterID: args[2],
|
||||
Zone: args[3],
|
||||
NumNodes: int32(numNodes),
|
||||
StorageType: sType,
|
||||
}},
|
||||
}
|
||||
err = getInstanceAdminClient().CreateInstanceWithClusters(ctx, &ic)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating instance: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doCreateCluster(ctx context.Context, args ...string) {
|
||||
if len(args) < 4 {
|
||||
log.Fatal("usage: cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>")
|
||||
}
|
||||
|
||||
numNodes, err := strconv.ParseInt(args[2], 0, 32)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad num_nodes %q: %v", args[2], err)
|
||||
}
|
||||
|
||||
sType, err := parseStorageType(args[3])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cc := bigtable.ClusterConfig{
|
||||
InstanceID: config.Instance,
|
||||
ClusterID: args[0],
|
||||
Zone: args[1],
|
||||
NumNodes: int32(numNodes),
|
||||
StorageType: sType,
|
||||
}
|
||||
err = getInstanceAdminClient().CreateCluster(ctx, &cc)
|
||||
if err != nil {
|
||||
log.Fatalf("Creating cluster: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doUpdateCluster(ctx context.Context, args ...string) {
|
||||
if len(args) < 2 {
|
||||
log.Fatal("cbt updatecluster <cluster-id> [num-nodes=num-nodes]")
|
||||
}
|
||||
|
||||
numNodes := int64(0)
|
||||
var err error
|
||||
for _, arg := range args[1:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "num-nodes":
|
||||
numNodes, err = strconv.ParseInt(val, 0, 32)
|
||||
if err != nil {
|
||||
log.Fatalf("Bad num-nodes %q: %v", val, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
if numNodes > 0 {
|
||||
err = getInstanceAdminClient().UpdateCluster(ctx, config.Instance, args[0], int32(numNodes))
|
||||
if err != nil {
|
||||
log.Fatalf("Updating cluster: %v", err)
|
||||
}
|
||||
} else {
|
||||
log.Fatal("Updating cluster: nothing to update")
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteInstance(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt deleteinstance <instance>")
|
||||
}
|
||||
err := getInstanceAdminClient().DeleteInstance(ctx, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting instance: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteCluster(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatal("usage: cbt deletecluster <cluster>")
|
||||
}
|
||||
err := getInstanceAdminClient().DeleteCluster(ctx, config.Instance, args[0])
|
||||
if err != nil {
|
||||
log.Fatalf("Deleting cluster: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteColumn(ctx context.Context, args ...string) {
|
||||
if len(args) != 4 {
|
||||
log.Fatal("usage: cbt deletecolumn <table> <row> <family> <column>")
|
||||
usage := "usage: cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]"
|
||||
if len(args) != 4 || len(args) != 5 {
|
||||
log.Fatal(usage)
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
var appProfile string
|
||||
if len(args) == 5 {
|
||||
if !strings.HasPrefix(args[4], "app-profile=") {
|
||||
log.Fatal(usage)
|
||||
}
|
||||
appProfile = strings.Split(args[4], "=")[1]
|
||||
}
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteCellsInColumn(args[2], args[3])
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
@ -397,10 +659,18 @@ func doDeleteFamily(ctx context.Context, args ...string) {
|
||||
}
|
||||
|
||||
func doDeleteRow(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deleterow <table> <row>")
|
||||
usage := "usage: cbt deleterow <table> <row> [app-profile=<app profile id>]"
|
||||
if len(args) != 2 || len(args) != 3 {
|
||||
log.Fatal(usage)
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
var appProfile string
|
||||
if len(args) == 3 {
|
||||
if !strings.HasPrefix(args[2], "app-profile=") {
|
||||
log.Fatal(usage)
|
||||
}
|
||||
appProfile = strings.Split(args[2], "=")[1]
|
||||
}
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteRow()
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
@ -450,8 +720,9 @@ func docFlags() []*flag.Flag {
|
||||
|
||||
func doDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
"ConfigHelp": configHelp,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := docTemplate.Execute(&buf, data); err != nil {
|
||||
@ -514,6 +785,8 @@ The options are:
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{.ConfigHelp}}
|
||||
|
||||
{{range .Commands}}
|
||||
{{.Desc}}
|
||||
|
||||
@ -558,12 +831,37 @@ func doListInstances(ctx context.Context, args ...string) {
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doListClusters(ctx context.Context, args ...string) {
|
||||
if len(args) != 0 {
|
||||
log.Fatalf("usage: cbt listclusters")
|
||||
}
|
||||
cis, err := getInstanceAdminClient().Clusters(ctx, config.Instance)
|
||||
if err != nil {
|
||||
log.Fatalf("Getting list of clusters: %v", err)
|
||||
}
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Cluster Name\tZone\tState\n")
|
||||
fmt.Fprintf(tw, "------------\t----\t----\n")
|
||||
for _, ci := range cis {
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s (%d serve nodes)\n", ci.Name, ci.Zone, ci.State, ci.ServeNodes)
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doLookup(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row>")
|
||||
if len(args) < 2 {
|
||||
log.Fatalf("usage: cbt lookup <table> <row> [app-profile=<app profile id>]")
|
||||
}
|
||||
var appProfile string
|
||||
if len(args) > 2 {
|
||||
i := strings.Index(args[2], "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", args[2])
|
||||
}
|
||||
appProfile = strings.Split(args[2], "=")[1]
|
||||
}
|
||||
table, row := args[0], args[1]
|
||||
tbl := getClient().Open(table)
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(table)
|
||||
r, err := tbl.ReadRow(ctx, row)
|
||||
if err != nil {
|
||||
log.Fatalf("Reading row: %v", err)
|
||||
@ -635,8 +933,9 @@ func doLS(ctx context.Context, args ...string) {
|
||||
|
||||
func doMDDocReal(ctx context.Context, args ...string) {
|
||||
data := map[string]interface{}{
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
"Commands": commands,
|
||||
"Flags": docFlags(),
|
||||
"ConfigHelp": configHelp,
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := mddocTemplate.Execute(&buf, data); err != nil {
|
||||
@ -666,6 +965,8 @@ The options are:
|
||||
-{{.Name}} string
|
||||
{{.Usage}}{{end}}
|
||||
|
||||
{{.ConfigHelp}}
|
||||
|
||||
{{range .Commands}}
|
||||
## {{.Desc}}
|
||||
|
||||
@ -680,7 +981,6 @@ func doRead(ctx context.Context, args ...string) {
|
||||
if len(args) < 1 {
|
||||
log.Fatalf("usage: cbt read <table> [args ...]")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
|
||||
parsed := make(map[string]string)
|
||||
for _, arg := range args[1:] {
|
||||
@ -695,7 +995,7 @@ func doRead(ctx context.Context, args ...string) {
|
||||
case "limit":
|
||||
// Be nicer; we used to support this, but renamed it to "end".
|
||||
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
|
||||
case "start", "end", "prefix", "count", "regex":
|
||||
case "start", "end", "prefix", "count", "regex", "app-profile":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
@ -726,6 +1026,7 @@ func doRead(ctx context.Context, args ...string) {
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: parsed["app-profile"]}).Open(args[0])
|
||||
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
|
||||
printRow(r)
|
||||
return true
|
||||
@ -739,12 +1040,16 @@ var setArg = regexp.MustCompile(`([^:]+):([^=]*)=(.*)`)
|
||||
|
||||
func doSet(ctx context.Context, args ...string) {
|
||||
if len(args) < 3 {
|
||||
log.Fatalf("usage: cbt set <table> <row> family:[column]=val[@ts] ...")
|
||||
log.Fatalf("usage: cbt set <table> <row> [app-profile=<app profile id>] family:[column]=val[@ts] ...")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
var appProfile string
|
||||
row := args[1]
|
||||
mut := bigtable.NewMutation()
|
||||
for _, arg := range args[2:] {
|
||||
if strings.HasPrefix(arg, "app-profile=") {
|
||||
appProfile = strings.Split(arg, "=")[1]
|
||||
continue
|
||||
}
|
||||
m := setArg.FindStringSubmatch(arg)
|
||||
if m == nil {
|
||||
log.Fatalf("Bad set arg %q", arg)
|
||||
@ -761,6 +1066,7 @@ func doSet(ctx context.Context, args ...string) {
|
||||
}
|
||||
mut.Set(m[1], m[2], ts, []byte(val))
|
||||
}
|
||||
tbl := getClient(bigtable.ClientConfig{AppProfile: appProfile}).Open(args[0])
|
||||
if err := tbl.Apply(ctx, row, mut); err != nil {
|
||||
log.Fatalf("Applying mutation: %v", err)
|
||||
}
|
||||
@ -773,25 +1079,169 @@ func doSetGCPolicy(ctx context.Context, args ...string) {
|
||||
table := args[0]
|
||||
fam := args[1]
|
||||
|
||||
pol, err := parseGCPolicy(args[2])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
|
||||
log.Fatalf("Setting GC policy: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doWaitForReplicaiton(ctx context.Context, args ...string) {
|
||||
if len(args) != 1 {
|
||||
log.Fatalf("usage: cbt waitforreplication <table>")
|
||||
}
|
||||
table := args[0]
|
||||
|
||||
fmt.Printf("Waiting for all writes up to %s to be replicated.\n", time.Now().Format("2006/01/02-15:04:05"))
|
||||
if err := getAdminClient().WaitForReplication(ctx, table); err != nil {
|
||||
log.Fatalf("Waiting for replication: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func parseGCPolicy(policyStr string) (bigtable.GCPolicy, error) {
|
||||
var pol bigtable.GCPolicy
|
||||
switch p := args[2]; {
|
||||
switch p := policyStr; {
|
||||
case strings.HasPrefix(p, "maxage="):
|
||||
d, err := parseDuration(p[7:])
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, err
|
||||
}
|
||||
pol = bigtable.MaxAgePolicy(d)
|
||||
case strings.HasPrefix(p, "maxversions="):
|
||||
n, err := strconv.ParseUint(p[12:], 10, 16)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
return nil, err
|
||||
}
|
||||
pol = bigtable.MaxVersionsPolicy(int(n))
|
||||
default:
|
||||
log.Fatalf("Bad GC policy %q", p)
|
||||
return nil, fmt.Errorf("Bad GC policy %q", p)
|
||||
}
|
||||
if err := getAdminClient().SetGCPolicy(ctx, table, fam, pol); err != nil {
|
||||
log.Fatalf("Setting GC policy: %v", err)
|
||||
return pol, nil
|
||||
}
|
||||
|
||||
func parseStorageType(storageTypeStr string) (bigtable.StorageType, error) {
|
||||
switch storageTypeStr {
|
||||
case "SSD":
|
||||
return bigtable.SSD, nil
|
||||
case "HDD":
|
||||
return bigtable.HDD, nil
|
||||
}
|
||||
return -1, fmt.Errorf("Invalid storage type: %v, must be SSD or HDD", storageTypeStr)
|
||||
}
|
||||
|
||||
func doCreateTableFromSnapshot(ctx context.Context, args ...string) {
|
||||
if len(args) != 3 {
|
||||
log.Fatal("usage: cbt createtablefromsnapshot <table> <cluster> <snapshot>")
|
||||
}
|
||||
tableName := args[0]
|
||||
clusterName := args[1]
|
||||
snapshotName := args[2]
|
||||
err := getAdminClient().CreateTableFromSnapshot(ctx, tableName, clusterName, snapshotName)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doSnapshotTable(ctx context.Context, args ...string) {
|
||||
if len(args) != 3 && len(args) != 4 {
|
||||
log.Fatal("usage: cbt createsnapshot <cluster> <snapshot> <table> [ttl=<d>]")
|
||||
}
|
||||
clusterName := args[0]
|
||||
snapshotName := args[1]
|
||||
tableName := args[2]
|
||||
ttl := bigtable.DefaultSnapshotDuration
|
||||
|
||||
for _, arg := range args[3:] {
|
||||
i := strings.Index(arg, "=")
|
||||
if i < 0 {
|
||||
log.Fatalf("Bad arg %q", arg)
|
||||
}
|
||||
key, val := arg[:i], arg[i+1:]
|
||||
switch key {
|
||||
default:
|
||||
log.Fatalf("Unknown arg key %q", key)
|
||||
case "ttl":
|
||||
var err error
|
||||
ttl, err = parseDuration(val)
|
||||
if err != nil {
|
||||
log.Fatalf("Invalid snapshot ttl value %q: %v", val, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := getAdminClient().SnapshotTable(ctx, tableName, clusterName, snapshotName, ttl)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to create Snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doListSnapshots(ctx context.Context, args ...string) {
|
||||
if len(args) != 0 && len(args) != 1 {
|
||||
log.Fatal("usage: cbt listsnapshots [<cluster>]")
|
||||
}
|
||||
|
||||
var cluster string
|
||||
|
||||
if len(args) == 0 {
|
||||
cluster = "-"
|
||||
} else {
|
||||
cluster = args[0]
|
||||
}
|
||||
|
||||
it := getAdminClient().ListSnapshots(ctx, cluster)
|
||||
|
||||
tw := tabwriter.NewWriter(os.Stdout, 10, 8, 4, '\t', 0)
|
||||
fmt.Fprintf(tw, "Snapshot\tSource Table\tCreated At\tExpires At\n")
|
||||
fmt.Fprintf(tw, "--------\t------------\t----------\t----------\n")
|
||||
timeLayout := "2006-01-02 15:04 MST"
|
||||
|
||||
for {
|
||||
snapshot, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to fetch snapshots %v", err)
|
||||
}
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\t%s\n", snapshot.Name, snapshot.SourceTable, snapshot.CreateTime.Format(timeLayout), snapshot.DeleteTime.Format(timeLayout))
|
||||
}
|
||||
tw.Flush()
|
||||
}
|
||||
|
||||
func doGetSnapshot(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatalf("usage: cbt getsnapshot <cluster> <snapshot>")
|
||||
}
|
||||
clusterName := args[0]
|
||||
snapshotName := args[1]
|
||||
|
||||
snapshot, err := getAdminClient().SnapshotInfo(ctx, clusterName, snapshotName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to get snapshot: %v", err)
|
||||
}
|
||||
|
||||
timeLayout := "2006-01-02 15:04 MST"
|
||||
|
||||
fmt.Printf("Name: %s\n", snapshot.Name)
|
||||
fmt.Printf("Source table: %s\n", snapshot.SourceTable)
|
||||
fmt.Printf("Created at: %s\n", snapshot.CreateTime.Format(timeLayout))
|
||||
fmt.Printf("Expires at: %s\n", snapshot.DeleteTime.Format(timeLayout))
|
||||
}
|
||||
|
||||
func doDeleteSnapshot(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deletesnapshot <cluster> <snapshot>")
|
||||
}
|
||||
cluster := args[0]
|
||||
snapshot := args[1]
|
||||
|
||||
err := getAdminClient().DeleteSnapshot(ctx, cluster, snapshot)
|
||||
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to delete snapshot: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
|
117
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
117
vendor/cloud.google.com/go/bigtable/cmd/cbt/cbtdoc.go
generated
vendored
@ -28,8 +28,13 @@ Usage:
|
||||
The commands are:
|
||||
|
||||
count Count rows in a table
|
||||
createinstance Create an instance with an initial cluster
|
||||
createcluster Create a cluster in the configured instance (replication alpha)
|
||||
createfamily Create a column family
|
||||
createtable Create a table
|
||||
updatecluster Update a cluster in the configured instance
|
||||
deleteinstance Deletes an instance
|
||||
deletecluster Deletes a cluster from the configured instance (replication alpha)
|
||||
deletecolumn Delete all cells in a column
|
||||
deletefamily Delete a column family
|
||||
deleterow Delete a row
|
||||
@ -37,12 +42,14 @@ The commands are:
|
||||
doc Print godoc-suitable documentation for cbt
|
||||
help Print help text
|
||||
listinstances List instances in a project
|
||||
listclusters List instances in an instance
|
||||
lookup Read from a single row
|
||||
ls List tables and column families
|
||||
mddoc Print documentation for cbt in Markdown format
|
||||
read Read rows
|
||||
set Set value of a cell
|
||||
setgcpolicy Set the GC policy for a column family
|
||||
waitforreplication Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
|
||||
version Print the current cbt version
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
@ -57,6 +64,22 @@ The options are:
|
||||
if set, use application credentials in this file
|
||||
|
||||
|
||||
Alpha features are not currently available to most Cloud Bigtable customers. The
|
||||
features might be changed in backward-incompatible ways and are not recommended
|
||||
for production use. They are not subject to any SLA or deprecation policy.
|
||||
|
||||
For convenience, values of the -project, -instance, -creds,
|
||||
-admin-endpoint and -data-endpoint flags may be specified in
|
||||
/usr/local/google/home/igorbernstein/.cbtrc in this format:
|
||||
project = my-project-123
|
||||
instance = my-instance
|
||||
creds = path-to-account-key.json
|
||||
admin-endpoint = hostname:port
|
||||
data-endpoint = hostname:port
|
||||
All values are optional, and all will be overridden by flags.
|
||||
|
||||
|
||||
|
||||
Count rows in a table
|
||||
|
||||
Usage:
|
||||
@ -65,6 +88,34 @@ Usage:
|
||||
|
||||
|
||||
|
||||
Create an instance with an initial cluster
|
||||
|
||||
Usage:
|
||||
cbt createinstance <instance-id> <display-name> <cluster-id> <zone> <num-nodes> <storage type>
|
||||
instance-id Permanent, unique id for the instance
|
||||
display-name Description of the instance
|
||||
cluster-id Permanent, unique id for the cluster in the instance
|
||||
zone The zone in which to create the cluster
|
||||
num-nodes The number of nodes to create
|
||||
storage-type SSD or HDD
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Create a cluster in the configured instance (replication alpha)
|
||||
|
||||
Usage:
|
||||
cbt createcluster <cluster-id> <zone> <num-nodes> <storage type>
|
||||
cluster-id Permanent, unique id for the cluster in the instance
|
||||
zone The zone in which to create the cluster
|
||||
num-nodes The number of nodes to create
|
||||
storage-type SSD or HDD
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Create a column family
|
||||
|
||||
Usage:
|
||||
@ -76,8 +127,36 @@ Usage:
|
||||
Create a table
|
||||
|
||||
Usage:
|
||||
cbt createtable <table> [initial_splits...]
|
||||
initial_splits=row A row key to be used to initially split the table into multiple tablets. Can be repeated to create multiple splits.
|
||||
cbt createtable <table> [families=family[:(maxage=<d> | maxversions=<n>)],...] [splits=split,...]
|
||||
families: Column families and their associated GC policies. See "setgcpolicy".
|
||||
Example: families=family1:maxage=1w,family2:maxversions=1
|
||||
splits: Row key to be used to initially split the table
|
||||
|
||||
|
||||
|
||||
|
||||
Update a cluster in the configured instance
|
||||
|
||||
Usage:
|
||||
cbt updatecluster <cluster-id> [num-nodes=num-nodes]
|
||||
cluster-id Permanent, unique id for the cluster in the instance
|
||||
num-nodes The number of nodes to update to
|
||||
|
||||
|
||||
|
||||
|
||||
Deletes an instance
|
||||
|
||||
Usage:
|
||||
cbt deleteinstance <instance>
|
||||
|
||||
|
||||
|
||||
|
||||
Deletes a cluster from the configured instance (replication alpha)
|
||||
|
||||
Usage:
|
||||
cbt deletecluster <cluster>
|
||||
|
||||
|
||||
|
||||
@ -85,7 +164,9 @@ Usage:
|
||||
Delete all cells in a column
|
||||
|
||||
Usage:
|
||||
cbt deletecolumn <table> <row> <family> <column>
|
||||
cbt deletecolumn <table> <row> <family> <column> [app-profile=<app profile id>]
|
||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
||||
|
||||
|
||||
|
||||
|
||||
@ -101,7 +182,9 @@ Usage:
|
||||
Delete a row
|
||||
|
||||
Usage:
|
||||
cbt deleterow <table> <row>
|
||||
cbt deleterow <table> <row> [app-profile=<app profile id>]
|
||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
||||
|
||||
|
||||
|
||||
|
||||
@ -138,10 +221,20 @@ Usage:
|
||||
|
||||
|
||||
|
||||
List instances in an instance
|
||||
|
||||
Usage:
|
||||
cbt listclusters
|
||||
|
||||
|
||||
|
||||
|
||||
Read from a single row
|
||||
|
||||
Usage:
|
||||
cbt lookup <table> <row>
|
||||
cbt lookup <table> <row> [app-profile=<app profile id>]
|
||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
||||
|
||||
|
||||
|
||||
|
||||
@ -166,12 +259,13 @@ Usage:
|
||||
Read rows
|
||||
|
||||
Usage:
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>]
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>] [app-profile=<app profile id>]
|
||||
start=<row> Start reading at this row
|
||||
end=<row> Stop reading before this row
|
||||
prefix=<prefix> Read rows with this prefix
|
||||
regex=<regex> Read rows with keys matching this regex
|
||||
count=<n> Read only this many rows
|
||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
||||
|
||||
|
||||
|
||||
@ -180,7 +274,8 @@ Usage:
|
||||
Set value of a cell
|
||||
|
||||
Usage:
|
||||
cbt set <table> <row> family:column=val[@ts] ...
|
||||
cbt set <table> <row> [app-profile=<app profile id>] family:column=val[@ts] ...
|
||||
app-profile=<app profile id> The app profile id to use for the request (replication alpha)
|
||||
family:column=val[@ts] may be repeated to set multiple cells.
|
||||
|
||||
ts is an optional integer timestamp.
|
||||
@ -201,6 +296,14 @@ Usage:
|
||||
|
||||
|
||||
|
||||
Blocks until all the completed writes have been replicated to all the clusters (replication alpha)
|
||||
|
||||
Usage:
|
||||
cbt waitforreplication <table>
|
||||
|
||||
|
||||
|
||||
|
||||
Print the current cbt version
|
||||
|
||||
Usage:
|
||||
|
9
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
9
vendor/cloud.google.com/go/bigtable/cmd/loadtest/loadtest.go
generated
vendored
@ -107,11 +107,12 @@ func main() {
|
||||
|
||||
// Create a scratch table.
|
||||
log.Printf("Setting up scratch table...")
|
||||
if err := adminClient.CreateTable(context.Background(), *scratchTable); err != nil {
|
||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
|
||||
tblConf := bigtable.TableConf{
|
||||
TableID: *scratchTable,
|
||||
Families: map[string]bigtable.GCPolicy{"f": bigtable.MaxVersionsPolicy(1)},
|
||||
}
|
||||
if err := adminClient.CreateColumnFamily(context.Background(), *scratchTable, "f"); err != nil {
|
||||
log.Fatalf("Making scratch table column family: %v", err)
|
||||
if err := adminClient.CreateTableFromConf(context.Background(), &tblConf); err != nil {
|
||||
log.Fatalf("Making scratch table %q: %v", *scratchTable, err)
|
||||
}
|
||||
// Upon a successful run, delete the table. Don't bother checking for errors.
|
||||
defer adminClient.DeleteTable(context.Background(), *scratchTable)
|
||||
|
2
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/export_test.go
generated
vendored
@ -98,7 +98,7 @@ type EmulatedEnv struct {
|
||||
|
||||
// NewEmulatedEnv builds and starts the emulator based environment
|
||||
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
|
||||
srv, err := bttest.NewServer("localhost:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
49
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
49
vendor/cloud.google.com/go/bigtable/filter.go
generated
vendored
@ -51,7 +51,7 @@ func (cf chainFilter) proto() *btpb.RowFilter {
|
||||
chain.Filters = append(chain.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Chain_{chain},
|
||||
Filter: &btpb.RowFilter_Chain_{Chain: chain},
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ func (ilf interleaveFilter) proto() *btpb.RowFilter {
|
||||
inter.Filters = append(inter.Filters, sf.proto())
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_Interleave_{inter},
|
||||
Filter: &btpb.RowFilter_Interleave_{Interleave: inter},
|
||||
}
|
||||
}
|
||||
|
||||
@ -91,7 +91,7 @@ type rowKeyFilter string
|
||||
func (rkf rowKeyFilter) String() string { return fmt.Sprintf("row(%s)", string(rkf)) }
|
||||
|
||||
func (rkf rowKeyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{[]byte(rkf)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_RowKeyRegexFilter{RowKeyRegexFilter: []byte(rkf)}}
|
||||
}
|
||||
|
||||
// FamilyFilter returns a filter that matches cells whose family name
|
||||
@ -104,7 +104,7 @@ type familyFilter string
|
||||
func (ff familyFilter) String() string { return fmt.Sprintf("col(%s:)", string(ff)) }
|
||||
|
||||
func (ff familyFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{string(ff)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_FamilyNameRegexFilter{FamilyNameRegexFilter: string(ff)}}
|
||||
}
|
||||
|
||||
// ColumnFilter returns a filter that matches cells whose column name
|
||||
@ -117,7 +117,7 @@ type columnFilter string
|
||||
func (cf columnFilter) String() string { return fmt.Sprintf("col(.*:%s)", string(cf)) }
|
||||
|
||||
func (cf columnFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{[]byte(cf)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnQualifierRegexFilter{ColumnQualifierRegexFilter: []byte(cf)}}
|
||||
}
|
||||
|
||||
// ValueFilter returns a filter that matches cells whose value
|
||||
@ -130,7 +130,7 @@ type valueFilter string
|
||||
func (vf valueFilter) String() string { return fmt.Sprintf("value_match(%s)", string(vf)) }
|
||||
|
||||
func (vf valueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{[]byte(vf)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRegexFilter{ValueRegexFilter: []byte(vf)}}
|
||||
}
|
||||
|
||||
// LatestNFilter returns a filter that matches the most recent N cells in each column.
|
||||
@ -141,7 +141,7 @@ type latestNFilter int32
|
||||
func (lnf latestNFilter) String() string { return fmt.Sprintf("col(*,%d)", lnf) }
|
||||
|
||||
func (lnf latestNFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{int32(lnf)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerColumnLimitFilter{CellsPerColumnLimitFilter: int32(lnf)}}
|
||||
}
|
||||
|
||||
// StripValueFilter returns a filter that replaces each value with the empty string.
|
||||
@ -151,7 +151,7 @@ type stripValueFilter struct{}
|
||||
|
||||
func (stripValueFilter) String() string { return "strip_value()" }
|
||||
func (stripValueFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{true}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_StripValueTransformer{StripValueTransformer: true}}
|
||||
}
|
||||
|
||||
// TimestampRangeFilter returns a filter that matches any cells whose timestamp is within the given time bounds. A zero
|
||||
@ -186,11 +186,10 @@ func (trf timestampRangeFilter) String() string {
|
||||
|
||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{
|
||||
Filter: &btpb.RowFilter_TimestampRangeFilter{
|
||||
&btpb.TimestampRange{
|
||||
int64(trf.startTime.TruncateToMilliseconds()),
|
||||
int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
Filter: &btpb.RowFilter_TimestampRangeFilter{TimestampRangeFilter: &btpb.TimestampRange{
|
||||
StartTimestampMicros: int64(trf.startTime.TruncateToMilliseconds()),
|
||||
EndTimestampMicros: int64(trf.endTime.TruncateToMilliseconds()),
|
||||
},
|
||||
}}
|
||||
}
|
||||
|
||||
@ -213,12 +212,12 @@ func (crf columnRangeFilter) String() string {
|
||||
func (crf columnRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ColumnRange{FamilyName: crf.family}
|
||||
if crf.start != "" {
|
||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{[]byte(crf.start)}
|
||||
r.StartQualifier = &btpb.ColumnRange_StartQualifierClosed{StartQualifierClosed: []byte(crf.start)}
|
||||
}
|
||||
if crf.end != "" {
|
||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{[]byte(crf.end)}
|
||||
r.EndQualifier = &btpb.ColumnRange_EndQualifierOpen{EndQualifierOpen: []byte(crf.end)}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ColumnRangeFilter{r}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ColumnRangeFilter{ColumnRangeFilter: r}}
|
||||
}
|
||||
|
||||
// ValueRangeFilter returns a filter that matches cells with values that fall within
|
||||
@ -239,12 +238,12 @@ func (vrf valueRangeFilter) String() string {
|
||||
func (vrf valueRangeFilter) proto() *btpb.RowFilter {
|
||||
r := &btpb.ValueRange{}
|
||||
if vrf.start != nil {
|
||||
r.StartValue = &btpb.ValueRange_StartValueClosed{vrf.start}
|
||||
r.StartValue = &btpb.ValueRange_StartValueClosed{StartValueClosed: vrf.start}
|
||||
}
|
||||
if vrf.end != nil {
|
||||
r.EndValue = &btpb.ValueRange_EndValueOpen{vrf.end}
|
||||
r.EndValue = &btpb.ValueRange_EndValueOpen{EndValueOpen: vrf.end}
|
||||
}
|
||||
return &btpb.RowFilter{&btpb.RowFilter_ValueRangeFilter{r}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_ValueRangeFilter{ValueRangeFilter: r}}
|
||||
}
|
||||
|
||||
// ConditionFilter returns a filter that evaluates to one of two possible filters depending
|
||||
@ -278,10 +277,10 @@ func (cf conditionFilter) proto() *btpb.RowFilter {
|
||||
ff = cf.falseFilter.proto()
|
||||
}
|
||||
return &btpb.RowFilter{
|
||||
&btpb.RowFilter_Condition_{&btpb.RowFilter_Condition{
|
||||
cf.predicateFilter.proto(),
|
||||
tf,
|
||||
ff,
|
||||
Filter: &btpb.RowFilter_Condition_{Condition: &btpb.RowFilter_Condition{
|
||||
PredicateFilter: cf.predicateFilter.proto(),
|
||||
TrueFilter: tf,
|
||||
FalseFilter: ff,
|
||||
}}}
|
||||
}
|
||||
|
||||
@ -297,7 +296,7 @@ func (cof cellsPerRowOffsetFilter) String() string {
|
||||
}
|
||||
|
||||
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{CellsPerRowOffsetFilter: int32(cof)}}
|
||||
}
|
||||
|
||||
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
|
||||
@ -312,7 +311,7 @@ func (clf cellsPerRowLimitFilter) String() string {
|
||||
}
|
||||
|
||||
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
|
||||
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{CellsPerRowLimitFilter: int32(clf)}}
|
||||
}
|
||||
|
||||
// TODO(dsymonds): More filters: sampling
|
||||
|
8
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
8
vendor/cloud.google.com/go/bigtable/gc.go
generated
vendored
@ -52,7 +52,7 @@ func (ip intersectionPolicy) proto() *bttdpb.GcRule {
|
||||
inter.Rules = append(inter.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Intersection_{inter},
|
||||
Rule: &bttdpb.GcRule_Intersection_{Intersection: inter},
|
||||
}
|
||||
}
|
||||
|
||||
@ -77,7 +77,7 @@ func (up unionPolicy) proto() *bttdpb.GcRule {
|
||||
union.Rules = append(union.Rules, sp.proto())
|
||||
}
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_Union_{union},
|
||||
Rule: &bttdpb.GcRule_Union_{Union: union},
|
||||
}
|
||||
}
|
||||
|
||||
@ -90,7 +90,7 @@ type maxVersionsPolicy int
|
||||
func (mvp maxVersionsPolicy) String() string { return fmt.Sprintf("versions() > %d", int(mvp)) }
|
||||
|
||||
func (mvp maxVersionsPolicy) proto() *bttdpb.GcRule {
|
||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{int32(mvp)}}
|
||||
return &bttdpb.GcRule{Rule: &bttdpb.GcRule_MaxNumVersions{MaxNumVersions: int32(mvp)}}
|
||||
}
|
||||
|
||||
// MaxAgePolicy returns a GC policy that applies to all cells
|
||||
@ -123,7 +123,7 @@ func (ma maxAgePolicy) proto() *bttdpb.GcRule {
|
||||
// Fix this if people care about GC policies over 290 years.
|
||||
ns := time.Duration(ma).Nanoseconds()
|
||||
return &bttdpb.GcRule{
|
||||
Rule: &bttdpb.GcRule_MaxAge{&durpb.Duration{
|
||||
Rule: &bttdpb.GcRule_MaxAge{MaxAge: &durpb.Duration{
|
||||
Seconds: ns / 1e9,
|
||||
Nanos: int32(ns % 1e9),
|
||||
}},
|
||||
|
2
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
2
vendor/cloud.google.com/go/bigtable/retry_test.go
generated
vendored
@ -33,7 +33,7 @@ import (
|
||||
)
|
||||
|
||||
func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err error) {
|
||||
srv, err := bttest.NewServer("127.0.0.1:0", opt...)
|
||||
srv, err := bttest.NewServer("localhost:0", opt...)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
30
vendor/cloud.google.com/go/cloud.go
generated
vendored
30
vendor/cloud.google.com/go/cloud.go
generated
vendored
@ -12,9 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package cloud is the root of the packages used to access Google Cloud
|
||||
// Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
// of sub-packages.
|
||||
//
|
||||
// This package documents how to authorize and authenticate the sub packages.
|
||||
/*
|
||||
Package cloud is the root of the packages used to access Google Cloud
|
||||
Services. See https://godoc.org/cloud.google.com/go for a full list
|
||||
of sub-packages.
|
||||
|
||||
Examples in this package show ways to authorize and authenticate the
|
||||
sub packages.
|
||||
|
||||
Connection Pooling
|
||||
|
||||
Connection pooling differs in clients based on their transport. Cloud
|
||||
clients either rely on HTTP or gRPC transports to communicate
|
||||
with Google Cloud.
|
||||
|
||||
Cloud clients that use HTTP (bigquery, compute, storage, and translate) rely on the
|
||||
underlying HTTP transport to cache connections for later re-use. These are cached to
|
||||
the default http.MaxIdleConns and http.MaxIdleConnsPerHost settings in
|
||||
http.DefaultTransport.
|
||||
|
||||
For gPRC clients (all others in this repo), connection pooling is configurable. Users
|
||||
of cloud client libraries may specify option.WithGRPCConnectionPool(n) as a client
|
||||
option to NewClient calls. This configures the underlying gRPC connections to be
|
||||
pooled and addressed in a round robin fashion.
|
||||
|
||||
*/
|
||||
package cloud // import "cloud.google.com/go"
|
||||
|
@ -15,9 +15,9 @@
|
||||
package breakpoints
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
@ -63,7 +63,7 @@ func TestBreakpointStore(t *testing.T) {
|
||||
p := &Program{breakpointPCs: make(map[uint64]bool)}
|
||||
bs := NewBreakpointStore(p)
|
||||
checkPCs := func(expected map[uint64]bool) {
|
||||
if !reflect.DeepEqual(p.breakpointPCs, expected) {
|
||||
if !testutil.Equal(p.breakpointPCs, expected) {
|
||||
t.Errorf("got breakpoint map %v want %v", p.breakpointPCs, expected)
|
||||
}
|
||||
}
|
||||
@ -83,7 +83,7 @@ func TestBreakpointStore(t *testing.T) {
|
||||
{testPC3, []*cd.Breakpoint{testBP2}},
|
||||
{testLogPC, []*cd.Breakpoint{testLogBP}},
|
||||
} {
|
||||
if bps := bs.BreakpointsAtPC(test.pc); !reflect.DeepEqual(bps, test.expected) {
|
||||
if bps := bs.BreakpointsAtPC(test.pc); !testutil.Equal(bps, test.expected) {
|
||||
t.Errorf("BreakpointsAtPC(%x): got %v want %v", test.pc, bps, test.expected)
|
||||
}
|
||||
}
|
||||
|
@ -16,9 +16,9 @@ package valuecollector
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/debug"
|
||||
cd "google.golang.org/api/clouddebugger/v2"
|
||||
)
|
||||
@ -43,15 +43,15 @@ func TestValueCollector(t *testing.T) {
|
||||
c := NewCollector(&Program{}, 26)
|
||||
// Add some variables of various types, whose values we want the collector to read.
|
||||
variablesToAdd := []debug.LocalVar{
|
||||
{Name: "a", Var: debug.Var{int16Type, 0x1}},
|
||||
{Name: "b", Var: debug.Var{stringType, 0x2}},
|
||||
{Name: "c", Var: debug.Var{structType, 0x3}},
|
||||
{Name: "d", Var: debug.Var{pointerType, 0x4}},
|
||||
{Name: "e", Var: debug.Var{arrayType, 0x5}},
|
||||
{Name: "f", Var: debug.Var{debugStringType, 0x6}},
|
||||
{Name: "g", Var: debug.Var{mapType, 0x7}},
|
||||
{Name: "h", Var: debug.Var{channelType, 0x8}},
|
||||
{Name: "i", Var: debug.Var{sliceType, 0x9}},
|
||||
{Name: "a", Var: debug.Var{TypeID: int16Type, Address: 0x1}},
|
||||
{Name: "b", Var: debug.Var{TypeID: stringType, Address: 0x2}},
|
||||
{Name: "c", Var: debug.Var{TypeID: structType, Address: 0x3}},
|
||||
{Name: "d", Var: debug.Var{TypeID: pointerType, Address: 0x4}},
|
||||
{Name: "e", Var: debug.Var{TypeID: arrayType, Address: 0x5}},
|
||||
{Name: "f", Var: debug.Var{TypeID: debugStringType, Address: 0x6}},
|
||||
{Name: "g", Var: debug.Var{TypeID: mapType, Address: 0x7}},
|
||||
{Name: "h", Var: debug.Var{TypeID: channelType, Address: 0x8}},
|
||||
{Name: "i", Var: debug.Var{TypeID: sliceType, Address: 0x9}},
|
||||
}
|
||||
expectedResults := []*cd.Variable{
|
||||
&cd.Variable{Name: "a", VarTableIndex: 1},
|
||||
@ -66,7 +66,7 @@ func TestValueCollector(t *testing.T) {
|
||||
}
|
||||
for i, v := range variablesToAdd {
|
||||
added := c.AddVariable(v)
|
||||
if !reflect.DeepEqual(added, expectedResults[i]) {
|
||||
if !testutil.Equal(added, expectedResults[i]) {
|
||||
t.Errorf("AddVariable: got %+v want %+v", *added, *expectedResults[i])
|
||||
}
|
||||
}
|
||||
@ -162,11 +162,11 @@ func TestValueCollector(t *testing.T) {
|
||||
&cd.Variable{Value: "1404"},
|
||||
&cd.Variable{Value: "2400"},
|
||||
}
|
||||
if !reflect.DeepEqual(v, expectedValues) {
|
||||
if !testutil.Equal(v, expectedValues) {
|
||||
t.Errorf("ReadValues: got %v want %v", v, expectedValues)
|
||||
// Do element-by-element comparisons, for more useful error messages.
|
||||
for i := range v {
|
||||
if i < len(expectedValues) && !reflect.DeepEqual(v[i], expectedValues[i]) {
|
||||
if i < len(expectedValues) && !testutil.Equal(v[i], expectedValues[i]) {
|
||||
t.Errorf("element %d: got %+v want %+v", i, *v[i], *expectedValues[i])
|
||||
}
|
||||
}
|
||||
@ -195,17 +195,17 @@ func (p *Program) Value(v debug.Var) (debug.Value, error) {
|
||||
Fields: []debug.StructField{
|
||||
{
|
||||
Name: "x",
|
||||
Var: debug.Var{int16Type, 0x1},
|
||||
Var: debug.Var{TypeID: int16Type, Address: 0x1},
|
||||
},
|
||||
{
|
||||
Name: "y",
|
||||
Var: debug.Var{stringType, 0x2},
|
||||
Var: debug.Var{TypeID: stringType, Address: 0x2},
|
||||
},
|
||||
},
|
||||
}, nil
|
||||
case pointerType:
|
||||
// A pointer to the first variable above.
|
||||
return debug.Pointer{int16Type, 0x1}, nil
|
||||
return debug.Pointer{TypeID: int16Type, Address: 0x1}, nil
|
||||
case arrayType:
|
||||
// An array of 4 32-bit-wide elements.
|
||||
return debug.Array{
|
||||
|
68
vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go
generated
vendored
Normal file
68
vendor/cloud.google.com/go/container/apiv1/ListClusters_smoke_test.go
generated
vendored
Normal file
@ -0,0 +1,68 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
containerpb "google.golang.org/genproto/googleapis/container/v1"
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestClusterManagerSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewClusterManagerClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var projectId2 string = projectId
|
||||
var zone string = "us-central1-a"
|
||||
var request = &containerpb.ListClustersRequest{
|
||||
ProjectId: projectId2,
|
||||
Zone: zone,
|
||||
}
|
||||
|
||||
if _, err := c.ListClusters(ctx, request); err != nil {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
4
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
generated
vendored
4
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
64
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go
generated
vendored
64
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleClusterManagerClient_ListClusters() {
|
||||
}
|
||||
|
||||
req := &containerpb.ListClustersRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListClusters(ctx, req)
|
||||
if err != nil {
|
||||
@ -58,7 +58,7 @@ func ExampleClusterManagerClient_GetCluster() {
|
||||
}
|
||||
|
||||
req := &containerpb.GetClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func ExampleClusterManagerClient_CreateCluster() {
|
||||
}
|
||||
|
||||
req := &containerpb.CreateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateCluster(ctx, req)
|
||||
if err != nil {
|
||||
@ -94,7 +94,7 @@ func ExampleClusterManagerClient_UpdateCluster() {
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateCluster(ctx, req)
|
||||
if err != nil {
|
||||
@ -112,7 +112,7 @@ func ExampleClusterManagerClient_UpdateNodePool() {
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateNodePool(ctx, req)
|
||||
if err != nil {
|
||||
@ -130,7 +130,7 @@ func ExampleClusterManagerClient_SetNodePoolAutoscaling() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolAutoscalingRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolAutoscaling(ctx, req)
|
||||
if err != nil {
|
||||
@ -148,7 +148,7 @@ func ExampleClusterManagerClient_SetLoggingService() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetLoggingServiceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLoggingService(ctx, req)
|
||||
if err != nil {
|
||||
@ -166,7 +166,7 @@ func ExampleClusterManagerClient_SetMonitoringService() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetMonitoringServiceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMonitoringService(ctx, req)
|
||||
if err != nil {
|
||||
@ -184,7 +184,7 @@ func ExampleClusterManagerClient_SetAddonsConfig() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetAddonsConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetAddonsConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -202,7 +202,7 @@ func ExampleClusterManagerClient_SetLocations() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetLocationsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLocations(ctx, req)
|
||||
if err != nil {
|
||||
@ -220,7 +220,7 @@ func ExampleClusterManagerClient_UpdateMaster() {
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateMasterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateMaster(ctx, req)
|
||||
if err != nil {
|
||||
@ -238,7 +238,7 @@ func ExampleClusterManagerClient_SetMasterAuth() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetMasterAuthRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMasterAuth(ctx, req)
|
||||
if err != nil {
|
||||
@ -256,7 +256,7 @@ func ExampleClusterManagerClient_DeleteCluster() {
|
||||
}
|
||||
|
||||
req := &containerpb.DeleteClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeleteCluster(ctx, req)
|
||||
if err != nil {
|
||||
@ -274,7 +274,7 @@ func ExampleClusterManagerClient_ListOperations() {
|
||||
}
|
||||
|
||||
req := &containerpb.ListOperationsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListOperations(ctx, req)
|
||||
if err != nil {
|
||||
@ -292,7 +292,7 @@ func ExampleClusterManagerClient_GetOperation() {
|
||||
}
|
||||
|
||||
req := &containerpb.GetOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetOperation(ctx, req)
|
||||
if err != nil {
|
||||
@ -310,7 +310,7 @@ func ExampleClusterManagerClient_CancelOperation() {
|
||||
}
|
||||
|
||||
req := &containerpb.CancelOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.CancelOperation(ctx, req)
|
||||
if err != nil {
|
||||
@ -326,7 +326,7 @@ func ExampleClusterManagerClient_GetServerConfig() {
|
||||
}
|
||||
|
||||
req := &containerpb.GetServerConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetServerConfig(ctx, req)
|
||||
if err != nil {
|
||||
@ -344,7 +344,7 @@ func ExampleClusterManagerClient_ListNodePools() {
|
||||
}
|
||||
|
||||
req := &containerpb.ListNodePoolsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListNodePools(ctx, req)
|
||||
if err != nil {
|
||||
@ -362,7 +362,7 @@ func ExampleClusterManagerClient_GetNodePool() {
|
||||
}
|
||||
|
||||
req := &containerpb.GetNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetNodePool(ctx, req)
|
||||
if err != nil {
|
||||
@ -380,7 +380,7 @@ func ExampleClusterManagerClient_CreateNodePool() {
|
||||
}
|
||||
|
||||
req := &containerpb.CreateNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateNodePool(ctx, req)
|
||||
if err != nil {
|
||||
@ -398,7 +398,7 @@ func ExampleClusterManagerClient_DeleteNodePool() {
|
||||
}
|
||||
|
||||
req := &containerpb.DeleteNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeleteNodePool(ctx, req)
|
||||
if err != nil {
|
||||
@ -416,7 +416,7 @@ func ExampleClusterManagerClient_RollbackNodePoolUpgrade() {
|
||||
}
|
||||
|
||||
req := &containerpb.RollbackNodePoolUpgradeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RollbackNodePoolUpgrade(ctx, req)
|
||||
if err != nil {
|
||||
@ -434,7 +434,7 @@ func ExampleClusterManagerClient_SetNodePoolManagement() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolManagementRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolManagement(ctx, req)
|
||||
if err != nil {
|
||||
@ -452,7 +452,7 @@ func ExampleClusterManagerClient_SetLabels() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetLabelsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLabels(ctx, req)
|
||||
if err != nil {
|
||||
@ -470,7 +470,7 @@ func ExampleClusterManagerClient_SetLegacyAbac() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetLegacyAbacRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLegacyAbac(ctx, req)
|
||||
if err != nil {
|
||||
@ -488,7 +488,7 @@ func ExampleClusterManagerClient_StartIPRotation() {
|
||||
}
|
||||
|
||||
req := &containerpb.StartIPRotationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.StartIPRotation(ctx, req)
|
||||
if err != nil {
|
||||
@ -506,7 +506,7 @@ func ExampleClusterManagerClient_CompleteIPRotation() {
|
||||
}
|
||||
|
||||
req := &containerpb.CompleteIPRotationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CompleteIPRotation(ctx, req)
|
||||
if err != nil {
|
||||
@ -524,7 +524,7 @@ func ExampleClusterManagerClient_SetNodePoolSize() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolSizeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolSize(ctx, req)
|
||||
if err != nil {
|
||||
@ -542,7 +542,7 @@ func ExampleClusterManagerClient_SetNetworkPolicy() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetNetworkPolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNetworkPolicy(ctx, req)
|
||||
if err != nil {
|
||||
@ -560,7 +560,7 @@ func ExampleClusterManagerClient_SetMaintenancePolicy() {
|
||||
}
|
||||
|
||||
req := &containerpb.SetMaintenancePolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMaintenancePolicy(ctx, req)
|
||||
if err != nil {
|
||||
|
4
vendor/cloud.google.com/go/container/apiv1/doc.go
generated
vendored
4
vendor/cloud.google.com/go/container/apiv1/doc.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
4
vendor/cloud.google.com/go/container/apiv1/mock_test.go
generated
vendored
4
vendor/cloud.google.com/go/container/apiv1/mock_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
2
vendor/cloud.google.com/go/container/container.go
generated
vendored
2
vendor/cloud.google.com/go/container/container.go
generated
vendored
@ -14,7 +14,7 @@
|
||||
|
||||
// Package container contains a deprecated Google Container Engine client.
|
||||
//
|
||||
// Deprecated: Use google.golang.org/api/container instead.
|
||||
// Deprecated: Use cloud.google.com/go/container/apiv1 instead.
|
||||
package container // import "cloud.google.com/go/container"
|
||||
|
||||
import (
|
||||
|
69
vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go
generated
vendored
Normal file
69
vendor/cloud.google.com/go/dataproc/apiv1/ListClusters_smoke_test.go
generated
vendored
Normal file
@ -0,0 +1,69 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dataproc
|
||||
|
||||
import (
|
||||
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
||||
func TestClusterControllerSmoke(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("skipping smoke test in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
|
||||
projectId := testutil.ProjID()
|
||||
_ = projectId
|
||||
|
||||
c, err := NewClusterControllerClient(ctx, option.WithTokenSource(ts))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var projectId2 string = projectId
|
||||
var region string = "global"
|
||||
var request = &dataprocpb.ListClustersRequest{
|
||||
ProjectId: projectId2,
|
||||
Region: region,
|
||||
}
|
||||
|
||||
iter := c.ListClusters(ctx, request)
|
||||
if _, err := iter.Next(); err != nil && err != iterator.Done {
|
||||
t.Error(err)
|
||||
}
|
||||
}
|
593
vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go
generated
vendored
Normal file
593
vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client.go
generated
vendored
Normal file
@ -0,0 +1,593 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dataproc
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/longrunning"
|
||||
lroauto "cloud.google.com/go/longrunning/autogen"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
|
||||
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ClusterControllerCallOptions contains the retry settings for each method of ClusterControllerClient.
|
||||
type ClusterControllerCallOptions struct {
|
||||
CreateCluster []gax.CallOption
|
||||
UpdateCluster []gax.CallOption
|
||||
DeleteCluster []gax.CallOption
|
||||
GetCluster []gax.CallOption
|
||||
ListClusters []gax.CallOption
|
||||
DiagnoseCluster []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClusterControllerClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dataproc.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultClusterControllerCallOptions() *ClusterControllerCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &ClusterControllerCallOptions{
|
||||
CreateCluster: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteCluster: retry[[2]string{"default", "idempotent"}],
|
||||
GetCluster: retry[[2]string{"default", "idempotent"}],
|
||||
ListClusters: retry[[2]string{"default", "idempotent"}],
|
||||
DiagnoseCluster: retry[[2]string{"default", "non_idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// ClusterControllerClient is a client for interacting with Google Cloud Dataproc API.
|
||||
type ClusterControllerClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
clusterControllerClient dataprocpb.ClusterControllerClient
|
||||
|
||||
// LROClient is used internally to handle longrunning operations.
|
||||
// It is exposed so that its CallOptions can be modified if required.
|
||||
// Users should not Close this client.
|
||||
LROClient *lroauto.OperationsClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *ClusterControllerCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClusterControllerClient creates a new cluster controller client.
|
||||
//
|
||||
// The ClusterControllerService provides methods to manage clusters
|
||||
// of Google Compute Engine instances.
|
||||
func NewClusterControllerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterControllerClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClusterControllerClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ClusterControllerClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultClusterControllerCallOptions(),
|
||||
|
||||
clusterControllerClient: dataprocpb.NewClusterControllerClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
|
||||
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
// This error "should not happen", since we are just reusing old connection
|
||||
// and never actually need to dial.
|
||||
// If this does happen, we could leak conn. However, we cannot close conn:
|
||||
// If the user invoked the function with option.WithGRPCConn,
|
||||
// we would close a connection that's still in use.
|
||||
// TODO(pongad): investigate error conditions.
|
||||
return nil, err
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *ClusterControllerClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *ClusterControllerClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *ClusterControllerClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// CreateCluster creates a cluster in a project.
|
||||
func (c *ClusterControllerClient) CreateCluster(ctx context.Context, req *dataprocpb.CreateClusterRequest, opts ...gax.CallOption) (*CreateClusterOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.CreateCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &CreateClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateCluster updates a cluster in a project.
|
||||
func (c *ClusterControllerClient) UpdateCluster(ctx context.Context, req *dataprocpb.UpdateClusterRequest, opts ...gax.CallOption) (*UpdateClusterOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.UpdateCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &UpdateClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// DeleteCluster deletes a cluster in a project.
|
||||
func (c *ClusterControllerClient) DeleteCluster(ctx context.Context, req *dataprocpb.DeleteClusterRequest, opts ...gax.CallOption) (*DeleteClusterOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.DeleteCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DeleteClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// GetCluster gets the resource representation for a cluster in a project.
|
||||
func (c *ClusterControllerClient) GetCluster(ctx context.Context, req *dataprocpb.GetClusterRequest, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
|
||||
var resp *dataprocpb.Cluster
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.GetCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListClusters lists all regions/{region}/clusters in a project.
|
||||
func (c *ClusterControllerClient) ListClusters(ctx context.Context, req *dataprocpb.ListClustersRequest, opts ...gax.CallOption) *ClusterIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
|
||||
it := &ClusterIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Cluster, string, error) {
|
||||
var resp *dataprocpb.ListClustersResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.ListClusters(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Clusters, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// DiagnoseCluster gets cluster diagnostic information.
|
||||
// After the operation completes, the Operation.response field
|
||||
// contains DiagnoseClusterOutputLocation.
|
||||
func (c *ClusterControllerClient) DiagnoseCluster(ctx context.Context, req *dataprocpb.DiagnoseClusterRequest, opts ...gax.CallOption) (*DiagnoseClusterOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DiagnoseCluster[0:len(c.CallOptions.DiagnoseCluster):len(c.CallOptions.DiagnoseCluster)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterControllerClient.DiagnoseCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &DiagnoseClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ClusterIterator manages a stream of *dataprocpb.Cluster.
|
||||
type ClusterIterator struct {
|
||||
items []*dataprocpb.Cluster
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Cluster, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *ClusterIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *ClusterIterator) Next() (*dataprocpb.Cluster, error) {
|
||||
var item *dataprocpb.Cluster
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *ClusterIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *ClusterIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// CreateClusterOperation manages a long-running operation from CreateCluster.
|
||||
type CreateClusterOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// CreateClusterOperation returns a new CreateClusterOperation from a given name.
|
||||
// The name must be that of a previously created CreateClusterOperation, possibly from a different process.
|
||||
func (c *ClusterControllerClient) CreateClusterOperation(name string) *CreateClusterOperation {
|
||||
return &CreateClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *CreateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
|
||||
var resp dataprocpb.Cluster
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *CreateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
|
||||
var resp dataprocpb.Cluster
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *CreateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
|
||||
var meta dataprocpb.ClusterOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *CreateClusterOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *CreateClusterOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// Delete deletes a long-running operation.
|
||||
// This method indicates that the client is no longer interested in the operation result.
|
||||
// It does not cancel the operation.
|
||||
func (op *CreateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Delete(ctx, opts...)
|
||||
}
|
||||
|
||||
// DeleteClusterOperation manages a long-running operation from DeleteCluster.
|
||||
type DeleteClusterOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// DeleteClusterOperation returns a new DeleteClusterOperation from a given name.
|
||||
// The name must be that of a previously created DeleteClusterOperation, possibly from a different process.
|
||||
func (c *ClusterControllerClient) DeleteClusterOperation(name string) *DeleteClusterOperation {
|
||||
return &DeleteClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *DeleteClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *DeleteClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *DeleteClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
|
||||
var meta dataprocpb.ClusterOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *DeleteClusterOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *DeleteClusterOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// Delete deletes a long-running operation.
|
||||
// This method indicates that the client is no longer interested in the operation result.
|
||||
// It does not cancel the operation.
|
||||
func (op *DeleteClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Delete(ctx, opts...)
|
||||
}
|
||||
|
||||
// DiagnoseClusterOperation manages a long-running operation from DiagnoseCluster.
|
||||
type DiagnoseClusterOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// DiagnoseClusterOperation returns a new DiagnoseClusterOperation from a given name.
|
||||
// The name must be that of a previously created DiagnoseClusterOperation, possibly from a different process.
|
||||
func (c *ClusterControllerClient) DiagnoseClusterOperation(name string) *DiagnoseClusterOperation {
|
||||
return &DiagnoseClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning any error encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *DiagnoseClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.WaitWithInterval(ctx, nil, 10000*time.Millisecond, opts...)
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully, op.Done will return true.
|
||||
func (op *DiagnoseClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Poll(ctx, nil, opts...)
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *DiagnoseClusterOperation) Metadata() (*dataprocpb.DiagnoseClusterResults, error) {
|
||||
var meta dataprocpb.DiagnoseClusterResults
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *DiagnoseClusterOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *DiagnoseClusterOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// Delete deletes a long-running operation.
|
||||
// This method indicates that the client is no longer interested in the operation result.
|
||||
// It does not cancel the operation.
|
||||
func (op *DiagnoseClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Delete(ctx, opts...)
|
||||
}
|
||||
|
||||
// UpdateClusterOperation manages a long-running operation from UpdateCluster.
|
||||
type UpdateClusterOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// UpdateClusterOperation returns a new UpdateClusterOperation from a given name.
|
||||
// The name must be that of a previously created UpdateClusterOperation, possibly from a different process.
|
||||
func (c *ClusterControllerClient) UpdateClusterOperation(name string) *UpdateClusterOperation {
|
||||
return &UpdateClusterOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *UpdateClusterOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
|
||||
var resp dataprocpb.Cluster
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 10000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *UpdateClusterOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dataprocpb.Cluster, error) {
|
||||
var resp dataprocpb.Cluster
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *UpdateClusterOperation) Metadata() (*dataprocpb.ClusterOperationMetadata, error) {
|
||||
var meta dataprocpb.ClusterOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *UpdateClusterOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *UpdateClusterOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// Delete deletes a long-running operation.
|
||||
// This method indicates that the client is no longer interested in the operation result.
|
||||
// It does not cancel the operation.
|
||||
func (op *UpdateClusterOperation) Delete(ctx context.Context, opts ...gax.CallOption) error {
|
||||
return op.lro.Delete(ctx, opts...)
|
||||
}
|
160
vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go
generated
vendored
Normal file
160
vendor/cloud.google.com/go/dataproc/apiv1/cluster_controller_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,160 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dataproc_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dataproc/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
|
||||
)
|
||||
|
||||
func ExampleNewClusterControllerClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_CreateCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.CreateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.CreateCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_UpdateCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.UpdateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.UpdateCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_DeleteCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.DeleteClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.DeleteCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_GetCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.GetClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_ListClusters() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.ListClustersRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListClusters(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClusterControllerClient_DiagnoseCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewClusterControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.DiagnoseClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.DiagnoseCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
err = op.Wait(ctx)
|
||||
// TODO: Handle error.
|
||||
}
|
46
vendor/cloud.google.com/go/dataproc/apiv1/doc.go
generated
vendored
Normal file
46
vendor/cloud.google.com/go/dataproc/apiv1/doc.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package dataproc is an auto-generated package for the
|
||||
// Google Cloud Dataproc API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Manages Hadoop-based clusters and jobs on Google Cloud Platform.
|
||||
package dataproc // import "cloud.google.com/go/dataproc/apiv1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
285
vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go
generated
vendored
Normal file
285
vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client.go
generated
vendored
Normal file
@ -0,0 +1,285 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dataproc
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// JobControllerCallOptions contains the retry settings for each method of JobControllerClient.
|
||||
type JobControllerCallOptions struct {
|
||||
SubmitJob []gax.CallOption
|
||||
GetJob []gax.CallOption
|
||||
ListJobs []gax.CallOption
|
||||
UpdateJob []gax.CallOption
|
||||
CancelJob []gax.CallOption
|
||||
DeleteJob []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultJobControllerClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("dataproc.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultJobControllerCallOptions() *JobControllerCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &JobControllerCallOptions{
|
||||
SubmitJob: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetJob: retry[[2]string{"default", "idempotent"}],
|
||||
ListJobs: retry[[2]string{"default", "idempotent"}],
|
||||
UpdateJob: retry[[2]string{"default", "non_idempotent"}],
|
||||
CancelJob: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteJob: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// JobControllerClient is a client for interacting with Google Cloud Dataproc API.
|
||||
type JobControllerClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
jobControllerClient dataprocpb.JobControllerClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *JobControllerCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewJobControllerClient creates a new job controller client.
|
||||
//
|
||||
// The JobController provides methods to manage jobs.
|
||||
func NewJobControllerClient(ctx context.Context, opts ...option.ClientOption) (*JobControllerClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultJobControllerClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &JobControllerClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultJobControllerCallOptions(),
|
||||
|
||||
jobControllerClient: dataprocpb.NewJobControllerClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *JobControllerClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *JobControllerClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *JobControllerClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// SubmitJob submits a job to a cluster.
|
||||
func (c *JobControllerClient) SubmitJob(ctx context.Context, req *dataprocpb.SubmitJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SubmitJob[0:len(c.CallOptions.SubmitJob):len(c.CallOptions.SubmitJob)], opts...)
|
||||
var resp *dataprocpb.Job
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.jobControllerClient.SubmitJob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetJob gets the resource representation for a job in a project.
|
||||
func (c *JobControllerClient) GetJob(ctx context.Context, req *dataprocpb.GetJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetJob[0:len(c.CallOptions.GetJob):len(c.CallOptions.GetJob)], opts...)
|
||||
var resp *dataprocpb.Job
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.jobControllerClient.GetJob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListJobs lists regions/{region}/jobs in a project.
|
||||
func (c *JobControllerClient) ListJobs(ctx context.Context, req *dataprocpb.ListJobsRequest, opts ...gax.CallOption) *JobIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListJobs[0:len(c.CallOptions.ListJobs):len(c.CallOptions.ListJobs)], opts...)
|
||||
it := &JobIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*dataprocpb.Job, string, error) {
|
||||
var resp *dataprocpb.ListJobsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.jobControllerClient.ListJobs(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.Jobs, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// UpdateJob updates a job in a project.
|
||||
func (c *JobControllerClient) UpdateJob(ctx context.Context, req *dataprocpb.UpdateJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateJob[0:len(c.CallOptions.UpdateJob):len(c.CallOptions.UpdateJob)], opts...)
|
||||
var resp *dataprocpb.Job
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.jobControllerClient.UpdateJob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CancelJob starts a job cancellation request. To access the job resource
|
||||
// after cancellation, call
|
||||
// regions/{region}/jobs.list (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or
|
||||
// regions/{region}/jobs.get (at /dataproc/docs/reference/rest/v1/projects.regions.jobs/get).
|
||||
func (c *JobControllerClient) CancelJob(ctx context.Context, req *dataprocpb.CancelJobRequest, opts ...gax.CallOption) (*dataprocpb.Job, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CancelJob[0:len(c.CallOptions.CancelJob):len(c.CallOptions.CancelJob)], opts...)
|
||||
var resp *dataprocpb.Job
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.jobControllerClient.CancelJob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteJob deletes the job from the project. If the job is active, the delete fails,
|
||||
// and the response returns FAILED_PRECONDITION.
|
||||
func (c *JobControllerClient) DeleteJob(ctx context.Context, req *dataprocpb.DeleteJobRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteJob[0:len(c.CallOptions.DeleteJob):len(c.CallOptions.DeleteJob)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.jobControllerClient.DeleteJob(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// JobIterator manages a stream of *dataprocpb.Job.
|
||||
type JobIterator struct {
|
||||
items []*dataprocpb.Job
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*dataprocpb.Job, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *JobIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *JobIterator) Next() (*dataprocpb.Job, error) {
|
||||
var item *dataprocpb.Job
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *JobIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *JobIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
146
vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go
generated
vendored
Normal file
146
vendor/cloud.google.com/go/dataproc/apiv1/job_controller_client_example_test.go
generated
vendored
Normal file
@ -0,0 +1,146 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package dataproc_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/dataproc/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
dataprocpb "google.golang.org/genproto/googleapis/cloud/dataproc/v1"
|
||||
)
|
||||
|
||||
func ExampleNewJobControllerClient() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_SubmitJob() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.SubmitJobRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SubmitJob(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_GetJob() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.GetJobRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetJob(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_ListJobs() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.ListJobsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListJobs(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_UpdateJob() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.UpdateJobRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateJob(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_CancelJob() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.CancelJobRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CancelJob(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleJobControllerClient_DeleteJob() {
|
||||
ctx := context.Background()
|
||||
c, err := dataproc.NewJobControllerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dataprocpb.DeleteJobRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteJob(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
1196
vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go
generated
vendored
Normal file
1196
vendor/cloud.google.com/go/dataproc/apiv1/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
37
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
37
vendor/cloud.google.com/go/datastore/datastore.go
generated
vendored
@ -453,6 +453,7 @@ func (c *Client) Put(ctx context.Context, key *Key, src interface{}) (*Key, erro
|
||||
//
|
||||
// src must satisfy the same conditions as the dst argument to GetMulti.
|
||||
func (c *Client) PutMulti(ctx context.Context, keys []*Key, src interface{}) ([]*Key, error) {
|
||||
// TODO(jba): rewrite in terms of Mutate.
|
||||
mutations, err := putMutations(keys, src)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -541,6 +542,7 @@ func (c *Client) Delete(ctx context.Context, key *Key) error {
|
||||
|
||||
// DeleteMulti is a batch version of Delete.
|
||||
func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
|
||||
// TODO(jba): rewrite in terms of Mutate.
|
||||
mutations, err := deleteMutations(keys)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -572,3 +574,38 @@ func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
|
||||
}
|
||||
return mutations, nil
|
||||
}
|
||||
|
||||
// Mutate applies one or more mutations atomically.
|
||||
// It returns the keys of the argument Mutations, in the same order.
|
||||
//
|
||||
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
|
||||
// Mutate returns a MultiError in this case even if there is only one Mutation.
|
||||
func (c *Client) Mutate(ctx context.Context, muts ...*Mutation) ([]*Key, error) {
|
||||
pmuts, err := mutationProtos(muts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
req := &pb.CommitRequest{
|
||||
ProjectId: c.dataset,
|
||||
Mutations: pmuts,
|
||||
Mode: pb.CommitRequest_NON_TRANSACTIONAL,
|
||||
}
|
||||
resp, err := c.client.Commit(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Copy any newly minted keys into the returned keys.
|
||||
ret := make([]*Key, len(muts))
|
||||
for i, mut := range muts {
|
||||
if mut.key.Incomplete() {
|
||||
// This key is in the mutation results.
|
||||
ret[i], err = protoToKey(resp.MutationResults[i].Key)
|
||||
if err != nil {
|
||||
return nil, errors.New("datastore: internal error: server returned an invalid key")
|
||||
}
|
||||
} else {
|
||||
ret[i] = mut.key
|
||||
}
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
64
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
64
vendor/cloud.google.com/go/datastore/datastore_test.go
generated
vendored
@ -263,6 +263,43 @@ type Y2 struct {
|
||||
F []int64
|
||||
}
|
||||
|
||||
type Pointers struct {
|
||||
Pi *int
|
||||
Ps *string
|
||||
Pb *bool
|
||||
Pf *float64
|
||||
Pg *GeoPoint
|
||||
Pt *time.Time
|
||||
}
|
||||
|
||||
type PointersOmitEmpty struct {
|
||||
Pi *int `datastore:",omitempty"`
|
||||
Ps *string `datastore:",omitempty"`
|
||||
Pb *bool `datastore:",omitempty"`
|
||||
Pf *float64 `datastore:",omitempty"`
|
||||
Pg *GeoPoint `datastore:",omitempty"`
|
||||
Pt *time.Time `datastore:",omitempty"`
|
||||
}
|
||||
|
||||
func populatedPointers() *Pointers {
|
||||
var (
|
||||
i int
|
||||
s string
|
||||
b bool
|
||||
f float64
|
||||
g GeoPoint
|
||||
t time.Time
|
||||
)
|
||||
return &Pointers{
|
||||
Pi: &i,
|
||||
Ps: &s,
|
||||
Pb: &b,
|
||||
Pf: &f,
|
||||
Pg: &g,
|
||||
Pt: &t,
|
||||
}
|
||||
}
|
||||
|
||||
type Tagged struct {
|
||||
A int `datastore:"a,noindex"`
|
||||
B []int `datastore:"b"`
|
||||
@ -406,10 +443,6 @@ type PtrToStructField struct {
|
||||
|
||||
var two int = 2
|
||||
|
||||
type PtrToInt struct {
|
||||
I *int
|
||||
}
|
||||
|
||||
type EmbeddedTime struct {
|
||||
time.Time
|
||||
}
|
||||
@ -1645,15 +1678,6 @@ var testCases = []testCase{
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"save struct with pointer to int field",
|
||||
&PtrToInt{
|
||||
I: &two,
|
||||
},
|
||||
&PtrToInt{},
|
||||
"unsupported struct field",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"struct with nil ptr to struct fields",
|
||||
&PtrToStructField{
|
||||
@ -1903,6 +1927,20 @@ var testCases = []testCase{
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"pointer fields: nil",
|
||||
&Pointers{},
|
||||
&Pointers{},
|
||||
"",
|
||||
"",
|
||||
},
|
||||
{
|
||||
"pointer fields: populated with zeroes",
|
||||
populatedPointers(),
|
||||
populatedPointers(),
|
||||
"",
|
||||
"",
|
||||
},
|
||||
}
|
||||
|
||||
// checkErr returns the empty string if either both want and err are zero,
|
||||
|
30
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
30
vendor/cloud.google.com/go/datastore/doc.go
generated
vendored
@ -15,8 +15,6 @@
|
||||
/*
|
||||
Package datastore provides a client for Google Cloud Datastore.
|
||||
|
||||
Note: This package is in beta. Some backwards-incompatible changes may occur.
|
||||
|
||||
|
||||
Basic Operations
|
||||
|
||||
@ -43,7 +41,8 @@ Valid value types are:
|
||||
- time.Time (stored with microsecond precision),
|
||||
- structs whose fields are all valid value types,
|
||||
- pointers to structs whose fields are all valid value types,
|
||||
- slices of any of the above.
|
||||
- slices of any of the above,
|
||||
- pointers to a signed integer, bool, string, float32, or float64.
|
||||
|
||||
Slices of structs are valid, as are structs that contain slices.
|
||||
|
||||
@ -86,6 +85,10 @@ GetMulti, PutMulti and DeleteMulti are batch versions of the Get, Put and
|
||||
Delete functions. They take a []*Key instead of a *Key, and may return a
|
||||
datastore.MultiError when encountering partial failure.
|
||||
|
||||
Mutate generalizes PutMulti and DeleteMulti to a sequence of any Datastore mutations.
|
||||
It takes a series of mutations created with NewInsert, NewUpdate, NewUpsert and
|
||||
NewDelete and applies them atomically.
|
||||
|
||||
|
||||
Properties
|
||||
|
||||
@ -118,9 +121,10 @@ field name. A "-" tag name means that the datastore will ignore that field.
|
||||
|
||||
The only valid options are "omitempty", "noindex" and "flatten".
|
||||
|
||||
If the options include "omitempty" and the value of the field is empty, then the field will be omitted on Save.
|
||||
The empty values are false, 0, any nil interface value, and any array, slice, map, or string of length zero.
|
||||
Struct field values will never be empty.
|
||||
If the options include "omitempty" and the value of the field is empty, then the
|
||||
field will be omitted on Save. The empty values are false, 0, any nil pointer or
|
||||
interface value, and any array, slice, map, or string of length zero. Struct field
|
||||
values will never be empty, except for nil pointers.
|
||||
|
||||
If options include "noindex" then the field will not be indexed. All fields are indexed
|
||||
by default. Strings or byte slices longer than 1500 bytes cannot be indexed;
|
||||
@ -154,6 +158,17 @@ Example code:
|
||||
}
|
||||
|
||||
|
||||
Pointer Fields
|
||||
|
||||
A struct field can be a pointer to a signed integer, floating-point number, string or bool.
|
||||
Putting a non-nil pointer will store its dereferenced value. Putting a nil pointer will
|
||||
store a Datastore NULL, unless the field is marked omitempty, in which case no property
|
||||
will be stored.
|
||||
|
||||
Getting a NULL into a pointer field sets the pointer to nil. Getting any other value
|
||||
allocates new storage with the value, and sets the field to point to it.
|
||||
|
||||
|
||||
Key Field
|
||||
|
||||
If the struct contains a *datastore.Key field tagged with the name "__key__",
|
||||
@ -436,6 +451,9 @@ Example code:
|
||||
fmt.Printf("Count=%d\n", count)
|
||||
}
|
||||
|
||||
Pass the ReadOnly option to RunInTransaction if your transaction is used only for Get,
|
||||
GetMulti or queries. Read-only transactions are more efficient.
|
||||
|
||||
Google Cloud Datastore Emulator
|
||||
|
||||
This package supports the Cloud Datastore emulator, which is useful for testing and
|
||||
|
22
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
22
vendor/cloud.google.com/go/datastore/example_test.go
generated
vendored
@ -396,6 +396,28 @@ func ExampleClient_GetAll() {
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_Mutate() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
key1 := datastore.NameKey("Post", "post1", nil)
|
||||
key2 := datastore.NameKey("Post", "post2", nil)
|
||||
key3 := datastore.NameKey("Post", "post3", nil)
|
||||
key4 := datastore.NameKey("Post", "post4", nil)
|
||||
|
||||
_, err = client.Mutate(ctx,
|
||||
datastore.NewInsert(key1, Post{Title: "Post 1"}),
|
||||
datastore.NewUpsert(key2, Post{Title: "Post 2"}),
|
||||
datastore.NewUpdate(key3, Post{Title: "Post 3"}),
|
||||
datastore.NewDelete(key4))
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleCommit_Key() {
|
||||
ctx := context.Background()
|
||||
client, err := datastore.NewClient(ctx, "")
|
||||
|
380
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
380
vendor/cloud.google.com/go/datastore/integration_test.go
generated
vendored
@ -15,8 +15,13 @@
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"flag"
|
||||
"fmt"
|
||||
"log"
|
||||
"net"
|
||||
"os"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
@ -25,25 +30,143 @@ import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"cloud.google.com/go/rpcreplay"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// TODO(djd): Make test entity clean up more robust: some test entities may
|
||||
// be left behind if tests are aborted, the transport fails, etc.
|
||||
|
||||
var timeNow = time.Now()
|
||||
|
||||
// suffix is a timestamp-based suffix which is appended to key names,
|
||||
// particularly for the root keys of entity groups. This reduces flakiness
|
||||
// when the tests are run in parallel.
|
||||
var suffix = fmt.Sprintf("-t%d", time.Now().UnixNano())
|
||||
var suffix string
|
||||
|
||||
func newClient(ctx context.Context, t *testing.T) *Client {
|
||||
const replayFilename = "datastore.replay"
|
||||
|
||||
type replayInfo struct {
|
||||
ProjectID string
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
record = flag.Bool("record", false, "record RPCs")
|
||||
|
||||
newTestClient = func(ctx context.Context, t *testing.T) *Client {
|
||||
return newClient(ctx, t, nil)
|
||||
}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
os.Exit(testMain(m))
|
||||
}
|
||||
|
||||
func testMain(m *testing.M) int {
|
||||
flag.Parse()
|
||||
if testing.Short() {
|
||||
if *record {
|
||||
log.Fatal("cannot combine -short and -record")
|
||||
}
|
||||
if _, err := os.Stat(replayFilename); err == nil {
|
||||
initReplay()
|
||||
}
|
||||
} else if *record {
|
||||
if testutil.ProjID() == "" {
|
||||
log.Fatal("must record with a project ID")
|
||||
}
|
||||
b, err := json.Marshal(replayInfo{
|
||||
ProjectID: testutil.ProjID(),
|
||||
Time: timeNow,
|
||||
})
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
rec, err := rpcreplay.NewRecorder(replayFilename, b)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
if err := rec.Close(); err != nil {
|
||||
log.Fatalf("closing recorder: %v", err)
|
||||
}
|
||||
}()
|
||||
newTestClient = func(ctx context.Context, t *testing.T) *Client {
|
||||
return newClient(ctx, t, rec.DialOptions())
|
||||
}
|
||||
log.Printf("recording to %s", replayFilename)
|
||||
}
|
||||
suffix = fmt.Sprintf("-t%d", timeNow.UnixNano())
|
||||
return m.Run()
|
||||
}
|
||||
|
||||
func initReplay() {
|
||||
rep, err := rpcreplay.NewReplayer(replayFilename)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
defer rep.Close()
|
||||
|
||||
var ri replayInfo
|
||||
if err := json.Unmarshal(rep.Initial(), &ri); err != nil {
|
||||
log.Fatalf("unmarshaling initial replay info: %v", err)
|
||||
}
|
||||
timeNow = ri.Time.In(time.Local)
|
||||
|
||||
conn, err := replayConn(rep)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
newTestClient = func(ctx context.Context, t *testing.T) *Client {
|
||||
client, err := NewClient(ctx, ri.ProjectID, option.WithGRPCConn(conn))
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
return client
|
||||
}
|
||||
log.Printf("replaying from %s", replayFilename)
|
||||
}
|
||||
|
||||
func replayConn(rep *rpcreplay.Replayer) (*grpc.ClientConn, error) {
|
||||
// If we make a real connection we need creds from somewhere, and they
|
||||
// might not be available, for instance on Travis.
|
||||
// Replaying doesn't require a connection live at all, but we need
|
||||
// something to attach gRPC interceptors to.
|
||||
// So we start a local listener and connect to it, then close them down.
|
||||
// TODO(jba): build something like this into the replayer?
|
||||
l, err := net.Listen("tcp", "127.0.0.1:0")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn, err := grpc.Dial(l.Addr().String(),
|
||||
append([]grpc.DialOption{grpc.WithInsecure()}, rep.DialOptions()...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
conn.Close()
|
||||
l.Close()
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func newClient(ctx context.Context, t *testing.T, dialOpts []grpc.DialOption) *Client {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ts := testutil.TokenSource(ctx, ScopeDatastore)
|
||||
if ts == nil {
|
||||
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
}
|
||||
client, err := NewClient(ctx, testutil.ProjID(), option.WithTokenSource(ts))
|
||||
opts := []option.ClientOption{option.WithTokenSource(ts)}
|
||||
for _, opt := range dialOpts {
|
||||
opts = append(opts, option.WithGRPCDialOption(opt))
|
||||
}
|
||||
client, err := NewClient(ctx, testutil.ProjID(), opts...)
|
||||
if err != nil {
|
||||
t.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
@ -51,11 +174,8 @@ func newClient(ctx context.Context, t *testing.T) *Client {
|
||||
}
|
||||
|
||||
func TestBasics(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*20)
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type X struct {
|
||||
@ -64,7 +184,7 @@ func TestBasics(t *testing.T) {
|
||||
T time.Time
|
||||
}
|
||||
|
||||
x0 := X{66, "99", time.Now().Truncate(time.Millisecond)}
|
||||
x0 := X{66, "99", timeNow.Truncate(time.Millisecond)}
|
||||
k, err := client.Put(ctx, IncompleteKey("BasicsX", nil), &x0)
|
||||
if err != nil {
|
||||
t.Fatalf("client.Put: %v", err)
|
||||
@ -84,12 +204,8 @@ func TestBasics(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestTopLevelKeyLoaded(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Second*20)
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
completeKey := NameKey("EntityWithKey", "myent", nil)
|
||||
@ -124,11 +240,8 @@ func TestTopLevelKeyLoaded(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestListValues(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
p0 := PropertyList{
|
||||
@ -151,11 +264,8 @@ func TestListValues(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetMulti(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type X struct {
|
||||
@ -225,11 +335,8 @@ func (z Z) String() string {
|
||||
}
|
||||
|
||||
func TestUnindexableValues(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
x1500 := strings.Repeat("x", 1500)
|
||||
@ -256,11 +363,8 @@ func TestUnindexableValues(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNilKey(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
testCases := []struct {
|
||||
@ -341,15 +445,12 @@ func testSmallQueries(t *testing.T, ctx context.Context, client *Client, parent
|
||||
}
|
||||
|
||||
func TestFilters(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
parent := NameKey("SQParent", "TestFilters"+suffix, nil)
|
||||
now := time.Now().Truncate(time.Millisecond).Unix()
|
||||
now := timeNow.Truncate(time.Millisecond).Unix()
|
||||
children := []*SQChild{
|
||||
{I: 0, T: now, U: now},
|
||||
{I: 1, T: now, U: now},
|
||||
@ -427,16 +528,15 @@ func TestFilters(t *testing.T) {
|
||||
})
|
||||
}
|
||||
|
||||
type ckey struct{}
|
||||
|
||||
func TestLargeQuery(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
parent := NameKey("LQParent", "TestFilters"+suffix, nil)
|
||||
now := time.Now().Truncate(time.Millisecond).Unix()
|
||||
now := timeNow.Truncate(time.Millisecond).Unix()
|
||||
|
||||
// Make a large number of children entities.
|
||||
const n = 800
|
||||
@ -552,6 +652,7 @@ func TestLargeQuery(t *testing.T) {
|
||||
go func(count, limit, offset, want int) {
|
||||
defer wg.Done()
|
||||
|
||||
ctx := context.WithValue(ctx, ckey{}, fmt.Sprintf("c=%d,l=%d,o=%d", count, limit, offset))
|
||||
// Run iterator through count calls to Next.
|
||||
it := client.Run(ctx, q.Limit(limit).Offset(offset).KeysOnly())
|
||||
for i := 0; i < count; i++ {
|
||||
@ -588,7 +689,6 @@ func TestLargeQuery(t *testing.T) {
|
||||
}
|
||||
}(tt.count, tt.limit, tt.offset, tt.want)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
}
|
||||
|
||||
@ -596,15 +696,12 @@ func TestEventualConsistency(t *testing.T) {
|
||||
// TODO(jba): either make this actually test eventual consistency, or
|
||||
// delete it. Currently it behaves the same with or without the
|
||||
// EventualConsistency call.
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
parent := NameKey("SQParent", "TestEventualConsistency"+suffix, nil)
|
||||
now := time.Now().Truncate(time.Millisecond).Unix()
|
||||
now := timeNow.Truncate(time.Millisecond).Unix()
|
||||
children := []*SQChild{
|
||||
{I: 0, T: now, U: now},
|
||||
{I: 1, T: now, U: now},
|
||||
@ -623,15 +720,12 @@ func TestEventualConsistency(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestProjection(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
parent := NameKey("SQParent", "TestProjection"+suffix, nil)
|
||||
now := time.Now().Truncate(time.Millisecond).Unix()
|
||||
now := timeNow.Truncate(time.Millisecond).Unix()
|
||||
children := []*SQChild{
|
||||
{I: 1 << 0, J: 100, T: now, U: now},
|
||||
{I: 1 << 1, J: 100, T: now, U: now},
|
||||
@ -669,11 +763,8 @@ func TestProjection(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestAllocateIDs(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
keys := make([]*Key, 5)
|
||||
@ -695,11 +786,8 @@ func TestAllocateIDs(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestGetAllWithFieldMismatch(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type Fat struct {
|
||||
@ -742,11 +830,8 @@ func TestGetAllWithFieldMismatch(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestKindlessQueries(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type Dee struct {
|
||||
@ -866,11 +951,8 @@ loop:
|
||||
}
|
||||
|
||||
func TestTransaction(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type Counter struct {
|
||||
@ -914,7 +996,7 @@ func TestTransaction(t *testing.T) {
|
||||
|
||||
for i, tt := range tests {
|
||||
// Put a new counter.
|
||||
c := &Counter{N: 10, T: time.Now()}
|
||||
c := &Counter{N: 10, T: timeNow}
|
||||
key, err := client.Put(ctx, IncompleteKey("TransCounter", nil), c)
|
||||
if err != nil {
|
||||
t.Errorf("%s: client.Put: %v", tt.desc, err)
|
||||
@ -971,12 +1053,54 @@ func TestTransaction(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilPointers(t *testing.T) {
|
||||
func TestReadOnlyTransaction(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newClient(ctx, t, nil)
|
||||
defer client.Close()
|
||||
|
||||
type value struct{ N int }
|
||||
|
||||
// Put a value.
|
||||
const n = 5
|
||||
v := &value{N: n}
|
||||
key, err := client.Put(ctx, IncompleteKey("roTxn", nil), v)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer client.Delete(ctx, key)
|
||||
|
||||
// Read it from a read-only transaction.
|
||||
_, err = client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||
if err := tx.Get(key, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, ReadOnly)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if v.N != n {
|
||||
t.Fatalf("got %d, want %d", v.N, n)
|
||||
}
|
||||
|
||||
// Attempting to write from a read-only transaction is an error.
|
||||
_, err = client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||
if _, err := tx.Put(key, v); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}, ReadOnly)
|
||||
if err == nil {
|
||||
t.Fatal("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNilPointers(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type X struct {
|
||||
@ -1012,11 +1136,8 @@ func TestNilPointers(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestNestedRepeatedElementNoIndex(t *testing.T) {
|
||||
if testing.Short() {
|
||||
t.Skip("Integration tests skipped in short mode")
|
||||
}
|
||||
ctx := context.Background()
|
||||
client := newClient(ctx, t)
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type Inner struct {
|
||||
@ -1041,3 +1162,116 @@ func TestNestedRepeatedElementNoIndex(t *testing.T) {
|
||||
t.Fatalf("client.Delete: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestPointerFields(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
want := populatedPointers()
|
||||
key, err := client.Put(ctx, IncompleteKey("pointers", nil), want)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
var got Pointers
|
||||
if err := client.Get(ctx, key, &got); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got.Pi == nil || *got.Pi != *want.Pi {
|
||||
t.Errorf("Pi: got %v, want %v", got.Pi, *want.Pi)
|
||||
}
|
||||
if got.Ps == nil || *got.Ps != *want.Ps {
|
||||
t.Errorf("Ps: got %v, want %v", got.Ps, *want.Ps)
|
||||
}
|
||||
if got.Pb == nil || *got.Pb != *want.Pb {
|
||||
t.Errorf("Pb: got %v, want %v", got.Pb, *want.Pb)
|
||||
}
|
||||
if got.Pf == nil || *got.Pf != *want.Pf {
|
||||
t.Errorf("Pf: got %v, want %v", got.Pf, *want.Pf)
|
||||
}
|
||||
if got.Pg == nil || *got.Pg != *want.Pg {
|
||||
t.Errorf("Pg: got %v, want %v", got.Pg, *want.Pg)
|
||||
}
|
||||
if got.Pt == nil || !got.Pt.Equal(*want.Pt) {
|
||||
t.Errorf("Pt: got %v, want %v", got.Pt, *want.Pt)
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutate(t *testing.T) {
|
||||
// test Client.Mutate
|
||||
testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) {
|
||||
return client.Mutate(ctx, muts...)
|
||||
})
|
||||
// test Transaction.Mutate
|
||||
testMutate(t, func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error) {
|
||||
var pkeys []*PendingKey
|
||||
commit, err := client.RunInTransaction(ctx, func(tx *Transaction) error {
|
||||
var err error
|
||||
pkeys, err = tx.Mutate(muts...)
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var keys []*Key
|
||||
for _, pk := range pkeys {
|
||||
keys = append(keys, commit.Key(pk))
|
||||
}
|
||||
return keys, nil
|
||||
})
|
||||
}
|
||||
|
||||
func testMutate(t *testing.T, mutate func(ctx context.Context, client *Client, muts ...*Mutation) ([]*Key, error)) {
|
||||
ctx := context.Background()
|
||||
client := newTestClient(ctx, t)
|
||||
defer client.Close()
|
||||
|
||||
type T struct{ I int }
|
||||
|
||||
check := func(k *Key, want interface{}) {
|
||||
var x T
|
||||
err := client.Get(ctx, k, &x)
|
||||
switch want := want.(type) {
|
||||
case error:
|
||||
if err != want {
|
||||
t.Errorf("key %s: got error %v, want %v", k, err, want)
|
||||
}
|
||||
case int:
|
||||
if err != nil {
|
||||
t.Fatalf("key %s: %v", k, err)
|
||||
}
|
||||
if x.I != want {
|
||||
t.Errorf("key %s: got %d, want %d", k, x.I, want)
|
||||
}
|
||||
default:
|
||||
panic("check: bad arg")
|
||||
}
|
||||
}
|
||||
|
||||
keys, err := mutate(ctx, client,
|
||||
NewInsert(IncompleteKey("t", nil), &T{1}),
|
||||
NewUpsert(IncompleteKey("t", nil), &T{2}),
|
||||
)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
check(keys[0], 1)
|
||||
check(keys[1], 2)
|
||||
|
||||
_, err = mutate(ctx, client,
|
||||
NewUpdate(keys[0], &T{3}),
|
||||
NewDelete(keys[1]),
|
||||
)
|
||||
check(keys[0], 3)
|
||||
check(keys[1], ErrNoSuchEntity)
|
||||
|
||||
_, err = mutate(ctx, client, NewInsert(keys[0], &T{4}))
|
||||
if got, want := status.Code(err), codes.AlreadyExists; got != want {
|
||||
t.Errorf("Insert existing key: got %s, want %s", got, want)
|
||||
}
|
||||
|
||||
_, err = mutate(ctx, client, NewUpdate(keys[1], &T{4}))
|
||||
if got, want := status.Code(err), codes.NotFound; got != want {
|
||||
t.Errorf("Update non-existing key: got %s, want %s", got, want)
|
||||
}
|
||||
}
|
||||
|
43
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
43
vendor/cloud.google.com/go/datastore/load.go
generated
vendored
@ -60,6 +60,10 @@ func typeMismatchReason(p Property, v reflect.Value) string {
|
||||
return fmt.Sprintf("type mismatch: %s versus %v", entityType, v.Type())
|
||||
}
|
||||
|
||||
func overflowReason(x interface{}, v reflect.Value) string {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
}
|
||||
|
||||
type propertyLoader struct {
|
||||
// m holds the number of times a substruct field like "Foo.Bar.Baz" has
|
||||
// been seen so far. The map is constructed lazily.
|
||||
@ -243,7 +247,7 @@ func plsFieldLoad(v reflect.Value, p Property, subfields []string) (ok bool, err
|
||||
}
|
||||
|
||||
// setVal sets 'v' to the value of the Property 'p'.
|
||||
func setVal(v reflect.Value, p Property) string {
|
||||
func setVal(v reflect.Value, p Property) (s string) {
|
||||
pValue := p.Value
|
||||
switch v.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
@ -252,7 +256,7 @@ func setVal(v reflect.Value, p Property) string {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
if v.OverflowInt(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
return overflowReason(x, v)
|
||||
}
|
||||
v.SetInt(x)
|
||||
case reflect.Bool:
|
||||
@ -273,12 +277,12 @@ func setVal(v reflect.Value, p Property) string {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
if v.OverflowFloat(x) {
|
||||
return fmt.Sprintf("value %v overflows struct field of type %v", x, v.Type())
|
||||
return overflowReason(x, v)
|
||||
}
|
||||
v.SetFloat(x)
|
||||
case reflect.Ptr:
|
||||
// v must be either a pointer to a Key or Entity.
|
||||
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct {
|
||||
// v must be a pointer to either a Key, an Entity, or one of the supported basic types.
|
||||
if v.Type() != typeOfKeyPtr && v.Type().Elem().Kind() != reflect.Struct && !isValidPointerType(v.Type().Elem()) {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
|
||||
@ -290,21 +294,38 @@ func setVal(v reflect.Value, p Property) string {
|
||||
return ""
|
||||
}
|
||||
|
||||
switch x := pValue.(type) {
|
||||
case *Key:
|
||||
if x, ok := p.Value.(*Key); ok {
|
||||
if _, ok := v.Interface().(*Key); !ok {
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
v.Set(reflect.ValueOf(x))
|
||||
return ""
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
switch x := pValue.(type) {
|
||||
case *Entity:
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
err := loadEntity(v.Interface(), x)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
case int64:
|
||||
if v.Elem().OverflowInt(x) {
|
||||
return overflowReason(x, v.Elem())
|
||||
}
|
||||
v.Elem().SetInt(x)
|
||||
case float64:
|
||||
if v.Elem().OverflowFloat(x) {
|
||||
return overflowReason(x, v.Elem())
|
||||
}
|
||||
v.Elem().SetFloat(x)
|
||||
case bool:
|
||||
v.Elem().SetBool(x)
|
||||
case string:
|
||||
v.Elem().SetString(x)
|
||||
case GeoPoint, time.Time:
|
||||
v.Elem().Set(reflect.ValueOf(x))
|
||||
default:
|
||||
return typeMismatchReason(p, v)
|
||||
}
|
||||
|
56
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
56
vendor/cloud.google.com/go/datastore/load_test.go
generated
vendored
@ -17,6 +17,7 @@ package datastore
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
@ -755,3 +756,58 @@ func TestKeyLoader(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestLoadPointers(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
in []Property
|
||||
want Pointers
|
||||
}{
|
||||
{
|
||||
desc: "nil properties load as nil pointers",
|
||||
in: []Property{
|
||||
Property{Name: "Pi", Value: nil},
|
||||
Property{Name: "Ps", Value: nil},
|
||||
Property{Name: "Pb", Value: nil},
|
||||
Property{Name: "Pf", Value: nil},
|
||||
Property{Name: "Pg", Value: nil},
|
||||
Property{Name: "Pt", Value: nil},
|
||||
},
|
||||
want: Pointers{},
|
||||
},
|
||||
{
|
||||
desc: "missing properties load as nil pointers",
|
||||
in: []Property(nil),
|
||||
want: Pointers{},
|
||||
},
|
||||
{
|
||||
desc: "non-nil properties load as the appropriate values",
|
||||
in: []Property{
|
||||
Property{Name: "Pi", Value: int64(1)},
|
||||
Property{Name: "Ps", Value: "x"},
|
||||
Property{Name: "Pb", Value: true},
|
||||
Property{Name: "Pf", Value: 3.14},
|
||||
Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}},
|
||||
Property{Name: "Pt", Value: time.Unix(100, 0)},
|
||||
},
|
||||
want: func() Pointers {
|
||||
p := populatedPointers()
|
||||
*p.Pi = 1
|
||||
*p.Ps = "x"
|
||||
*p.Pb = true
|
||||
*p.Pf = 3.14
|
||||
*p.Pg = GeoPoint{Lat: 1, Lng: 2}
|
||||
*p.Pt = time.Unix(100, 0)
|
||||
return *p
|
||||
}(),
|
||||
},
|
||||
} {
|
||||
var got Pointers
|
||||
if err := LoadStruct(&got, test.in); err != nil {
|
||||
t.Fatalf("%s: %v", test.desc, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%s:\ngot %+v\nwant %+v", test.desc, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
129
vendor/cloud.google.com/go/datastore/mutation.go
generated
vendored
Normal file
129
vendor/cloud.google.com/go/datastore/mutation.go
generated
vendored
Normal file
@ -0,0 +1,129 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
// A Mutation represents a change to a Datastore entity.
|
||||
type Mutation struct {
|
||||
key *Key // needed for transaction PendingKeys and to dedup deletions
|
||||
mut *pb.Mutation
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *Mutation) isDelete() bool {
|
||||
_, ok := m.mut.Operation.(*pb.Mutation_Delete)
|
||||
return ok
|
||||
}
|
||||
|
||||
// NewInsert creates a mutation that will save the entity src into the datastore with
|
||||
// key k, returning an error if k already exists.
|
||||
// See Client.Put for valid values of src.
|
||||
func NewInsert(k *Key, src interface{}) *Mutation {
|
||||
if !k.valid() {
|
||||
return &Mutation{err: ErrInvalidKey}
|
||||
}
|
||||
p, err := saveEntity(k, src)
|
||||
if err != nil {
|
||||
return &Mutation{err: err}
|
||||
}
|
||||
return &Mutation{
|
||||
key: k,
|
||||
mut: &pb.Mutation{Operation: &pb.Mutation_Insert{Insert: p}},
|
||||
}
|
||||
}
|
||||
|
||||
// NewUpsert creates a mutation that saves the entity src into the datastore with key
|
||||
// k, whether or not k exists. See Client.Put for valid values of src.
|
||||
func NewUpsert(k *Key, src interface{}) *Mutation {
|
||||
if !k.valid() {
|
||||
return &Mutation{err: ErrInvalidKey}
|
||||
}
|
||||
p, err := saveEntity(k, src)
|
||||
if err != nil {
|
||||
return &Mutation{err: err}
|
||||
}
|
||||
return &Mutation{
|
||||
key: k,
|
||||
mut: &pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: p}},
|
||||
}
|
||||
}
|
||||
|
||||
// NewUpdate creates a mutation that replaces the entity in the datastore with key k,
|
||||
// returning an error if k does not exist. See Client.Put for valid values of src.
|
||||
func NewUpdate(k *Key, src interface{}) *Mutation {
|
||||
if !k.valid() {
|
||||
return &Mutation{err: ErrInvalidKey}
|
||||
}
|
||||
if k.Incomplete() {
|
||||
return &Mutation{err: fmt.Errorf("datastore: can't update the incomplete key: %v", k)}
|
||||
}
|
||||
p, err := saveEntity(k, src)
|
||||
if err != nil {
|
||||
return &Mutation{err: err}
|
||||
}
|
||||
return &Mutation{
|
||||
key: k,
|
||||
mut: &pb.Mutation{Operation: &pb.Mutation_Update{Update: p}},
|
||||
}
|
||||
}
|
||||
|
||||
// NewDelete creates a mutation that deletes the entity with key k.
|
||||
func NewDelete(k *Key) *Mutation {
|
||||
if !k.valid() {
|
||||
return &Mutation{err: ErrInvalidKey}
|
||||
}
|
||||
if k.Incomplete() {
|
||||
return &Mutation{err: fmt.Errorf("datastore: can't delete the incomplete key: %v", k)}
|
||||
}
|
||||
return &Mutation{
|
||||
key: k,
|
||||
mut: &pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(k)}},
|
||||
}
|
||||
}
|
||||
|
||||
func mutationProtos(muts []*Mutation) ([]*pb.Mutation, error) {
|
||||
// If any of the mutations have errors, collect and return them.
|
||||
var merr MultiError
|
||||
for i, m := range muts {
|
||||
if m.err != nil {
|
||||
if merr == nil {
|
||||
merr = make(MultiError, len(muts))
|
||||
}
|
||||
merr[i] = m.err
|
||||
}
|
||||
}
|
||||
if merr != nil {
|
||||
return nil, merr
|
||||
}
|
||||
var protos []*pb.Mutation
|
||||
// Collect protos. Remove duplicate deletions (see deleteMutations).
|
||||
seen := map[string]bool{}
|
||||
for _, m := range muts {
|
||||
if m.isDelete() {
|
||||
ks := m.key.String()
|
||||
if seen[ks] {
|
||||
continue
|
||||
}
|
||||
seen[ks] = true
|
||||
}
|
||||
protos = append(protos, m.mut)
|
||||
}
|
||||
return protos, nil
|
||||
}
|
150
vendor/cloud.google.com/go/datastore/mutation_test.go
generated
vendored
Normal file
150
vendor/cloud.google.com/go/datastore/mutation_test.go
generated
vendored
Normal file
@ -0,0 +1,150 @@
|
||||
// Copyright 2018 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
func TestMutationProtos(t *testing.T) {
|
||||
var keys []*Key
|
||||
for i := 1; i <= 4; i++ {
|
||||
k := IDKey("kind", int64(i), nil)
|
||||
keys = append(keys, k)
|
||||
}
|
||||
entity := &PropertyList{{Name: "n", Value: "v"}}
|
||||
entityForKey := func(k *Key) *pb.Entity {
|
||||
return &pb.Entity{
|
||||
Key: keyToProto(k),
|
||||
Properties: map[string]*pb.Value{
|
||||
"n": &pb.Value{ValueType: &pb.Value_StringValue{StringValue: "v"}},
|
||||
},
|
||||
}
|
||||
}
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
in []*Mutation
|
||||
want []*pb.Mutation
|
||||
}{
|
||||
{
|
||||
desc: "nil",
|
||||
in: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
desc: "empty",
|
||||
in: []*Mutation{},
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
desc: "various",
|
||||
in: []*Mutation{
|
||||
NewInsert(keys[0], entity),
|
||||
NewUpsert(keys[1], entity),
|
||||
NewUpdate(keys[2], entity),
|
||||
NewDelete(keys[3]),
|
||||
},
|
||||
want: []*pb.Mutation{
|
||||
&pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[0])}},
|
||||
&pb.Mutation{Operation: &pb.Mutation_Upsert{Upsert: entityForKey(keys[1])}},
|
||||
&pb.Mutation{Operation: &pb.Mutation_Update{Update: entityForKey(keys[2])}},
|
||||
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[3])}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "duplicate deletes",
|
||||
in: []*Mutation{
|
||||
NewDelete(keys[0]),
|
||||
NewInsert(keys[1], entity),
|
||||
NewDelete(keys[0]),
|
||||
NewDelete(keys[2]),
|
||||
NewDelete(keys[0]),
|
||||
},
|
||||
want: []*pb.Mutation{
|
||||
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[0])}},
|
||||
&pb.Mutation{Operation: &pb.Mutation_Insert{Insert: entityForKey(keys[1])}},
|
||||
&pb.Mutation{Operation: &pb.Mutation_Delete{Delete: keyToProto(keys[2])}},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := mutationProtos(test.in)
|
||||
if err != nil {
|
||||
t.Errorf("%s: %v", test.desc, err)
|
||||
continue
|
||||
}
|
||||
if diff := testutil.Diff(got, test.want); diff != "" {
|
||||
t.Errorf("%s: %s", test.desc, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestMutationProtosErrors(t *testing.T) {
|
||||
entity := &PropertyList{{Name: "n", Value: "v"}}
|
||||
k := IDKey("kind", 1, nil)
|
||||
ik := IncompleteKey("kind", nil)
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
in []*Mutation
|
||||
want []int // non-nil indexes of MultiError
|
||||
}{
|
||||
{
|
||||
desc: "invalid key",
|
||||
in: []*Mutation{
|
||||
NewInsert(nil, entity),
|
||||
NewUpdate(nil, entity),
|
||||
NewUpsert(nil, entity),
|
||||
NewDelete(nil),
|
||||
},
|
||||
want: []int{0, 1, 2, 3},
|
||||
},
|
||||
{
|
||||
desc: "incomplete key",
|
||||
in: []*Mutation{
|
||||
NewInsert(ik, entity),
|
||||
NewUpdate(ik, entity),
|
||||
NewUpsert(ik, entity),
|
||||
NewDelete(ik),
|
||||
},
|
||||
want: []int{1, 3},
|
||||
},
|
||||
{
|
||||
desc: "bad entity",
|
||||
in: []*Mutation{
|
||||
NewInsert(k, 1),
|
||||
NewUpdate(k, 2),
|
||||
NewUpsert(k, 3),
|
||||
},
|
||||
want: []int{0, 1, 2},
|
||||
},
|
||||
} {
|
||||
_, err := mutationProtos(test.in)
|
||||
if err == nil {
|
||||
t.Errorf("%s: got nil, want error", test.desc)
|
||||
continue
|
||||
}
|
||||
var got []int
|
||||
for i, err := range err.(MultiError) {
|
||||
if err != nil {
|
||||
got = append(got, i)
|
||||
}
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%s: got errors at %v, want at %v", test.desc, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
43
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
43
vendor/cloud.google.com/go/datastore/save.go
generated
vendored
@ -88,9 +88,19 @@ func saveStructProperty(props *[]Property, name string, opts saveOpts, v reflect
|
||||
return saveSliceProperty(props, name, opts, v)
|
||||
}
|
||||
case reflect.Ptr:
|
||||
if isValidPointerType(v.Type().Elem()) {
|
||||
if v.IsNil() {
|
||||
// Nil pointer becomes a nil property value (unless omitempty, handled above).
|
||||
p.Value = nil
|
||||
*props = append(*props, p)
|
||||
return nil
|
||||
}
|
||||
return saveStructProperty(props, name, opts, v.Elem())
|
||||
}
|
||||
if v.Type().Elem().Kind() != reflect.Struct {
|
||||
return fmt.Errorf("datastore: unsupported struct field type: %s", v.Type())
|
||||
}
|
||||
// Pointer to struct is a special case.
|
||||
if v.IsNil() {
|
||||
return nil
|
||||
}
|
||||
@ -395,10 +405,18 @@ func interfaceToProto(iv interface{}, noIndex bool) (*pb.Value, error) {
|
||||
// than the top-level value.
|
||||
val.ExcludeFromIndexes = false
|
||||
default:
|
||||
if iv != nil {
|
||||
return nil, fmt.Errorf("invalid Value type %t", iv)
|
||||
rv := reflect.ValueOf(iv)
|
||||
if !rv.IsValid() {
|
||||
val.ValueType = &pb.Value_NullValue{}
|
||||
} else if rv.Kind() == reflect.Ptr { // non-nil pointer: dereference
|
||||
if rv.IsNil() {
|
||||
val.ValueType = &pb.Value_NullValue{}
|
||||
return val, nil
|
||||
}
|
||||
return interfaceToProto(rv.Elem().Interface(), noIndex)
|
||||
} else {
|
||||
return nil, fmt.Errorf("invalid Value type %T", iv)
|
||||
}
|
||||
val.ValueType = &pb.Value_NullValue{}
|
||||
}
|
||||
// TODO(jbd): Support EntityValue.
|
||||
return val, nil
|
||||
@ -423,3 +441,22 @@ func isEmptyValue(v reflect.Value) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isValidPointerType reports whether a struct field can be a pointer to type t
|
||||
// for the purposes of saving and loading.
|
||||
func isValidPointerType(t reflect.Type) bool {
|
||||
if t == typeOfTime || t == typeOfGeoPoint {
|
||||
return true
|
||||
}
|
||||
switch t.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return true
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
97
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
97
vendor/cloud.google.com/go/datastore/save_test.go
generated
vendored
@ -16,22 +16,32 @@ package datastore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
func TestInterfaceToProtoNilKey(t *testing.T) {
|
||||
var iv *Key
|
||||
pv, err := interfaceToProto(iv, false)
|
||||
if err != nil {
|
||||
t.Fatalf("nil key: interfaceToProto: %v", err)
|
||||
}
|
||||
|
||||
_, ok := pv.ValueType.(*pb.Value_NullValue)
|
||||
if !ok {
|
||||
t.Errorf("nil key: type:\ngot: %T\nwant: %T", pv.ValueType, &pb.Value_NullValue{})
|
||||
func TestInterfaceToProtoNil(t *testing.T) {
|
||||
// A nil *Key, or a nil value of any other pointer type, should convert to a NullValue.
|
||||
for _, in := range []interface{}{
|
||||
(*Key)(nil),
|
||||
(*int)(nil),
|
||||
(*string)(nil),
|
||||
(*bool)(nil),
|
||||
(*float64)(nil),
|
||||
(*GeoPoint)(nil),
|
||||
(*time.Time)(nil),
|
||||
} {
|
||||
got, err := interfaceToProto(in, false)
|
||||
if err != nil {
|
||||
t.Fatalf("%T: %v", in, err)
|
||||
}
|
||||
_, ok := got.ValueType.(*pb.Value_NullValue)
|
||||
if !ok {
|
||||
t.Errorf("%T: got: %T\nwant: %T", in, got.ValueType, &pb.Value_NullValue{})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -193,3 +203,70 @@ func TestSaveEntityNested(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestSavePointers(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
desc string
|
||||
in interface{}
|
||||
want []Property
|
||||
}{
|
||||
{
|
||||
desc: "nil pointers save as nil-valued properties",
|
||||
in: &Pointers{},
|
||||
want: []Property{
|
||||
Property{Name: "Pi", Value: nil},
|
||||
Property{Name: "Ps", Value: nil},
|
||||
Property{Name: "Pb", Value: nil},
|
||||
Property{Name: "Pf", Value: nil},
|
||||
Property{Name: "Pg", Value: nil},
|
||||
Property{Name: "Pt", Value: nil},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "nil omitempty pointers not saved",
|
||||
in: &PointersOmitEmpty{},
|
||||
want: []Property(nil),
|
||||
},
|
||||
{
|
||||
desc: "non-nil zero-valued pointers save as zero values",
|
||||
in: populatedPointers(),
|
||||
want: []Property{
|
||||
Property{Name: "Pi", Value: int64(0)},
|
||||
Property{Name: "Ps", Value: ""},
|
||||
Property{Name: "Pb", Value: false},
|
||||
Property{Name: "Pf", Value: 0.0},
|
||||
Property{Name: "Pg", Value: GeoPoint{}},
|
||||
Property{Name: "Pt", Value: time.Time{}},
|
||||
},
|
||||
},
|
||||
{
|
||||
desc: "non-nil non-zero-valued pointers save as the appropriate values",
|
||||
in: func() *Pointers {
|
||||
p := populatedPointers()
|
||||
*p.Pi = 1
|
||||
*p.Ps = "x"
|
||||
*p.Pb = true
|
||||
*p.Pf = 3.14
|
||||
*p.Pg = GeoPoint{Lat: 1, Lng: 2}
|
||||
*p.Pt = time.Unix(100, 0)
|
||||
return p
|
||||
}(),
|
||||
want: []Property{
|
||||
Property{Name: "Pi", Value: int64(1)},
|
||||
Property{Name: "Ps", Value: "x"},
|
||||
Property{Name: "Pb", Value: true},
|
||||
Property{Name: "Pf", Value: 3.14},
|
||||
Property{Name: "Pg", Value: GeoPoint{Lat: 1, Lng: 2}},
|
||||
Property{Name: "Pt", Value: time.Unix(100, 0)},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := SaveStruct(test.in)
|
||||
if err != nil {
|
||||
t.Fatalf("%s: %v", test.desc, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%s\ngot %#v\nwant %#v\n", test.desc, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
85
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
85
vendor/cloud.google.com/go/datastore/transaction.go
generated
vendored
@ -32,6 +32,8 @@ var errExpiredTransaction = errors.New("datastore: transaction expired")
|
||||
|
||||
type transactionSettings struct {
|
||||
attempts int
|
||||
readOnly bool
|
||||
prevID []byte // ID of the transaction to retry
|
||||
}
|
||||
|
||||
// newTransactionSettings creates a transactionSettings with a given TransactionOption slice.
|
||||
@ -62,6 +64,19 @@ func (w maxAttempts) apply(s *transactionSettings) {
|
||||
}
|
||||
}
|
||||
|
||||
// ReadOnly is a TransactionOption that marks the transaction as read-only.
|
||||
var ReadOnly TransactionOption
|
||||
|
||||
func init() {
|
||||
ReadOnly = readOnly{}
|
||||
}
|
||||
|
||||
type readOnly struct{}
|
||||
|
||||
func (readOnly) apply(s *transactionSettings) {
|
||||
s.readOnly = true
|
||||
}
|
||||
|
||||
// Transaction represents a set of datastore operations to be committed atomically.
|
||||
//
|
||||
// Operations are enqueued by calling the Put and Delete methods on Transaction
|
||||
@ -86,14 +101,26 @@ func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption)
|
||||
return nil, errors.New("datastore: NewTransaction does not accept MaxAttempts option")
|
||||
}
|
||||
}
|
||||
req := &pb.BeginTransactionRequest{
|
||||
ProjectId: c.dataset,
|
||||
return c.newTransaction(ctx, newTransactionSettings(opts))
|
||||
}
|
||||
|
||||
func (c *Client) newTransaction(ctx context.Context, s *transactionSettings) (*Transaction, error) {
|
||||
req := &pb.BeginTransactionRequest{ProjectId: c.dataset}
|
||||
if s.readOnly {
|
||||
req.TransactionOptions = &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}},
|
||||
}
|
||||
} else if s.prevID != nil {
|
||||
req.TransactionOptions = &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{
|
||||
PreviousTransaction: s.prevID,
|
||||
}},
|
||||
}
|
||||
}
|
||||
resp, err := c.client.BeginTransaction(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Transaction{
|
||||
id: resp.Transaction,
|
||||
ctx: ctx,
|
||||
@ -128,7 +155,7 @@ func (c *Client) NewTransaction(ctx context.Context, opts ...TransactionOption)
|
||||
func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) error, opts ...TransactionOption) (*Commit, error) {
|
||||
settings := newTransactionSettings(opts)
|
||||
for n := 0; n < settings.attempts; n++ {
|
||||
tx, err := c.NewTransaction(ctx)
|
||||
tx, err := c.newTransaction(ctx, settings)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -139,6 +166,11 @@ func (c *Client) RunInTransaction(ctx context.Context, f func(tx *Transaction) e
|
||||
if cmt, err := tx.Commit(); err != ErrConcurrentTransaction {
|
||||
return cmt, err
|
||||
}
|
||||
// Pass this transaction's ID to the retry transaction to preserve
|
||||
// transaction priority.
|
||||
if !settings.readOnly {
|
||||
settings.prevID = tx.id
|
||||
}
|
||||
}
|
||||
return nil, ErrConcurrentTransaction
|
||||
}
|
||||
@ -241,6 +273,7 @@ func (t *Transaction) Put(key *Key, src interface{}) (*PendingKey, error) {
|
||||
// PutMulti is a batch version of Put. One PendingKey is returned for each
|
||||
// element of src in the same order.
|
||||
func (t *Transaction) PutMulti(keys []*Key, src interface{}) ([]*PendingKey, error) {
|
||||
// TODO(jba): rewrite in terms of Mutate.
|
||||
if t.id == nil {
|
||||
return nil, errExpiredTransaction
|
||||
}
|
||||
@ -280,6 +313,7 @@ func (t *Transaction) Delete(key *Key) error {
|
||||
|
||||
// DeleteMulti is a batch version of Delete.
|
||||
func (t *Transaction) DeleteMulti(keys []*Key) error {
|
||||
// TODO(jba): rewrite in terms of Mutate.
|
||||
if t.id == nil {
|
||||
return errExpiredTransaction
|
||||
}
|
||||
@ -291,12 +325,53 @@ func (t *Transaction) DeleteMulti(keys []*Key) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Mutate adds the mutations to the transaction. They will all be applied atomically
|
||||
// upon calling Commit. Mutate returns a PendingKey for each Mutation in the argument
|
||||
// list, in the same order. PendingKeys for Delete mutations are always nil.
|
||||
//
|
||||
// If any of the mutations are invalid, Mutate returns a MultiError with the errors.
|
||||
// Mutate returns a MultiError in this case even if there is only one Mutation.
|
||||
//
|
||||
// For an example, see Client.Mutate.
|
||||
func (t *Transaction) Mutate(muts ...*Mutation) ([]*PendingKey, error) {
|
||||
if t.id == nil {
|
||||
return nil, errExpiredTransaction
|
||||
}
|
||||
pmuts, err := mutationProtos(muts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
origin := len(t.mutations)
|
||||
t.mutations = append(t.mutations, pmuts...)
|
||||
// Prepare the returned handles, pre-populating where possible.
|
||||
ret := make([]*PendingKey, len(muts))
|
||||
for i, mut := range muts {
|
||||
if mut.isDelete() {
|
||||
continue
|
||||
}
|
||||
p := &PendingKey{}
|
||||
if mut.key.Incomplete() {
|
||||
// This key will be in the final commit result.
|
||||
t.pending[origin+i] = p
|
||||
} else {
|
||||
p.key = mut.key
|
||||
}
|
||||
ret[i] = p
|
||||
}
|
||||
return ret, nil
|
||||
}
|
||||
|
||||
// Commit represents the result of a committed transaction.
|
||||
type Commit struct{}
|
||||
|
||||
// Key resolves a pending key handle into a final key.
|
||||
func (c *Commit) Key(p *PendingKey) *Key {
|
||||
if c != p.commit {
|
||||
if p == nil { // if called on a *PendingKey from a Delete mutation
|
||||
return nil
|
||||
}
|
||||
// If p.commit is nil, the PendingKey did not come from an incomplete key,
|
||||
// so p.key is valid.
|
||||
if p.commit != nil && c != p.commit {
|
||||
panic("PendingKey was not created by corresponding transaction")
|
||||
}
|
||||
return p.key
|
||||
|
78
vendor/cloud.google.com/go/datastore/transaction_test.go
generated
vendored
Normal file
78
vendor/cloud.google.com/go/datastore/transaction_test.go
generated
vendored
Normal file
@ -0,0 +1,78 @@
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package datastore
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"golang.org/x/net/context"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
func TestNewTransaction(t *testing.T) {
|
||||
var got *pb.BeginTransactionRequest
|
||||
client := &Client{
|
||||
dataset: "project",
|
||||
client: &fakeDatastoreClient{
|
||||
beginTransaction: func(req *pb.BeginTransactionRequest) (*pb.BeginTransactionResponse, error) {
|
||||
got = req
|
||||
return &pb.BeginTransactionResponse{
|
||||
Transaction: []byte("tid"),
|
||||
}, nil
|
||||
},
|
||||
},
|
||||
}
|
||||
ctx := context.Background()
|
||||
for _, test := range []struct {
|
||||
settings *transactionSettings
|
||||
want *pb.BeginTransactionRequest
|
||||
}{
|
||||
{
|
||||
&transactionSettings{},
|
||||
&pb.BeginTransactionRequest{ProjectId: "project"},
|
||||
},
|
||||
{
|
||||
&transactionSettings{readOnly: true},
|
||||
&pb.BeginTransactionRequest{
|
||||
ProjectId: "project",
|
||||
TransactionOptions: &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadOnly_{ReadOnly: &pb.TransactionOptions_ReadOnly{}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
&transactionSettings{prevID: []byte("tid")},
|
||||
&pb.BeginTransactionRequest{
|
||||
ProjectId: "project",
|
||||
TransactionOptions: &pb.TransactionOptions{
|
||||
Mode: &pb.TransactionOptions_ReadWrite_{ReadWrite: &pb.TransactionOptions_ReadWrite{
|
||||
PreviousTransaction: []byte("tid"),
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
_, err := client.newTransaction(ctx, test.settings)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !proto.Equal(got, test.want) {
|
||||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.settings, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
4
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
4
vendor/cloud.google.com/go/debugger/apiv2/controller2_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
10
vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go
generated
vendored
10
vendor/cloud.google.com/go/debugger/apiv2/controller2_client_example_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleController2Client_RegisterDebuggee() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.RegisterDebuggeeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RegisterDebuggee(ctx, req)
|
||||
if err != nil {
|
||||
@ -58,7 +58,7 @@ func ExampleController2Client_ListActiveBreakpoints() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.ListActiveBreakpointsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListActiveBreakpoints(ctx, req)
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func ExampleController2Client_UpdateActiveBreakpoint() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.UpdateActiveBreakpointRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateActiveBreakpoint(ctx, req)
|
||||
if err != nil {
|
||||
|
4
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
4
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
14
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go
generated
vendored
14
vendor/cloud.google.com/go/debugger/apiv2/debugger2_client_example_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleDebugger2Client_SetBreakpoint() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.SetBreakpointRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetBreakpoint(ctx, req)
|
||||
if err != nil {
|
||||
@ -58,7 +58,7 @@ func ExampleDebugger2Client_GetBreakpoint() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.GetBreakpointRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetBreakpoint(ctx, req)
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func ExampleDebugger2Client_DeleteBreakpoint() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.DeleteBreakpointRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteBreakpoint(ctx, req)
|
||||
if err != nil {
|
||||
@ -92,7 +92,7 @@ func ExampleDebugger2Client_ListBreakpoints() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.ListBreakpointsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListBreakpoints(ctx, req)
|
||||
if err != nil {
|
||||
@ -110,7 +110,7 @@ func ExampleDebugger2Client_ListDebuggees() {
|
||||
}
|
||||
|
||||
req := &clouddebuggerpb.ListDebuggeesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListDebuggees(ctx, req)
|
||||
if err != nil {
|
||||
|
4
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
4
vendor/cloud.google.com/go/debugger/apiv2/doc.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
4
vendor/cloud.google.com/go/debugger/apiv2/mock_test.go
generated
vendored
4
vendor/cloud.google.com/go/debugger/apiv2/mock_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
6
vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go
generated
vendored
6
vendor/cloud.google.com/go/dlp/apiv2beta1/InspectContent_smoke_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -21,6 +21,7 @@ import (
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
|
12
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
12
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -153,14 +153,6 @@ func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
func ResultPath(result string) string {
|
||||
return "" +
|
||||
"inspect/results/" +
|
||||
result +
|
||||
""
|
||||
}
|
||||
|
||||
// InspectContent finds potentially sensitive info in a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
|
||||
|
20
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
20
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client_example_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleClient_InspectContent() {
|
||||
}
|
||||
|
||||
req := &dlppb.InspectContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.InspectContent(ctx, req)
|
||||
if err != nil {
|
||||
@ -58,7 +58,7 @@ func ExampleClient_RedactContent() {
|
||||
}
|
||||
|
||||
req := &dlppb.RedactContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RedactContent(ctx, req)
|
||||
if err != nil {
|
||||
@ -76,7 +76,7 @@ func ExampleClient_DeidentifyContent() {
|
||||
}
|
||||
|
||||
req := &dlppb.DeidentifyContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeidentifyContent(ctx, req)
|
||||
if err != nil {
|
||||
@ -94,7 +94,7 @@ func ExampleClient_AnalyzeDataSourceRisk() {
|
||||
}
|
||||
|
||||
req := &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.AnalyzeDataSourceRisk(ctx, req)
|
||||
if err != nil {
|
||||
@ -117,7 +117,7 @@ func ExampleClient_CreateInspectOperation() {
|
||||
}
|
||||
|
||||
req := &dlppb.CreateInspectOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.CreateInspectOperation(ctx, req)
|
||||
if err != nil {
|
||||
@ -140,7 +140,7 @@ func ExampleClient_ListInspectFindings() {
|
||||
}
|
||||
|
||||
req := &dlppb.ListInspectFindingsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListInspectFindings(ctx, req)
|
||||
if err != nil {
|
||||
@ -158,7 +158,7 @@ func ExampleClient_ListInfoTypes() {
|
||||
}
|
||||
|
||||
req := &dlppb.ListInfoTypesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListInfoTypes(ctx, req)
|
||||
if err != nil {
|
||||
@ -176,7 +176,7 @@ func ExampleClient_ListRootCategories() {
|
||||
}
|
||||
|
||||
req := &dlppb.ListRootCategoriesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListRootCategories(ctx, req)
|
||||
if err != nil {
|
||||
|
4
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
4
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
38
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
38
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -301,9 +301,20 @@ func TestDlpServiceRedactContent(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
@ -347,9 +358,20 @@ func TestDlpServiceRedactContentError(t *testing.T) {
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var name2 string = "EMAIL_ADDRESS"
|
||||
var infoType = &dlppb.InfoType{
|
||||
Name: name2,
|
||||
}
|
||||
var replaceWith string = "REDACTED"
|
||||
var replaceConfigsElement = &dlppb.RedactContentRequest_ReplaceConfig{
|
||||
InfoType: infoType,
|
||||
ReplaceWith: replaceWith,
|
||||
}
|
||||
var replaceConfigs = []*dlppb.RedactContentRequest_ReplaceConfig{replaceConfigsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
@ -656,7 +678,7 @@ func TestDlpServiceListInspectFindings(t *testing.T) {
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var formattedName string = ResultPath("[RESULT]")
|
||||
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
|
||||
var request = &dlppb.ListInspectFindingsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
@ -685,7 +707,7 @@ func TestDlpServiceListInspectFindingsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedName string = ResultPath("[RESULT]")
|
||||
var formattedName string = fmt.Sprintf("inspect/results/%s", "[RESULT]")
|
||||
var request = &dlppb.ListInspectFindingsRequest{
|
||||
Name: formattedName,
|
||||
}
|
||||
|
27
vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go
generated
vendored
Normal file
27
vendor/cloud.google.com/go/dlp/apiv2beta1/path_funcs.go
generated
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package dlp
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("inspect/results/%s", result)
|
||||
// instead.
|
||||
func ResultPath(result string) string {
|
||||
return "" +
|
||||
"inspect/results/" +
|
||||
result +
|
||||
""
|
||||
}
|
8
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
8
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -21,6 +21,7 @@ import (
|
||||
)
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
@ -31,6 +32,7 @@ import (
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
var _ = fmt.Sprintf
|
||||
var _ = iterator.Done
|
||||
var _ = strconv.FormatUint
|
||||
var _ = time.Now
|
||||
@ -53,7 +55,7 @@ func TestReportErrorsServiceSmoke(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
var formattedProjectName string = ReportErrorsProjectPath(projectId)
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", projectId)
|
||||
var message string = "[MESSAGE]"
|
||||
var service string = "[SERVICE]"
|
||||
var serviceContext = &clouderrorreportingpb.ServiceContext{
|
||||
|
4
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
4
vendor/cloud.google.com/go/errorreporting/apiv1beta1/doc.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
|
14
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
14
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_group_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -117,16 +117,6 @@ func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorGroupGroupPath returns the path for the group resource.
|
||||
func ErrorGroupGroupPath(project, group string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/groups/" +
|
||||
group +
|
||||
""
|
||||
}
|
||||
|
||||
// GetGroup get the specified group.
|
||||
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleErrorGroupClient_GetGroup() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.GetGroupRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetGroup(ctx, req)
|
||||
if err != nil {
|
||||
@ -58,7 +58,7 @@ func ExampleErrorGroupClient_UpdateGroup() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.UpdateGroupRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateGroup(ctx, req)
|
||||
if err != nil {
|
||||
|
12
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
12
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -122,14 +122,6 @@ func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorStatsProjectPath returns the path for the project resource.
|
||||
func ErrorStatsProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// ListGroupStats lists the specified groups.
|
||||
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
|
10
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go
generated
vendored
10
vendor/cloud.google.com/go/errorreporting/apiv1beta1/error_stats_client_example_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -41,7 +41,7 @@ func ExampleErrorStatsClient_ListGroupStats() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.ListGroupStatsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListGroupStats(ctx, req)
|
||||
for {
|
||||
@ -65,7 +65,7 @@ func ExampleErrorStatsClient_ListEvents() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.ListEventsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListEvents(ctx, req)
|
||||
for {
|
||||
@ -89,7 +89,7 @@ func ExampleErrorStatsClient_DeleteEvents() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.DeleteEventsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeleteEvents(ctx, req)
|
||||
if err != nil {
|
||||
|
24
vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go
generated
vendored
24
vendor/cloud.google.com/go/errorreporting/apiv1beta1/mock_test.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -208,7 +208,7 @@ func TestErrorGroupServiceGetGroup(t *testing.T) {
|
||||
|
||||
mockErrorGroup.resps = append(mockErrorGroup.resps[:0], expectedResponse)
|
||||
|
||||
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
|
||||
var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]")
|
||||
var request = &clouderrorreportingpb.GetGroupRequest{
|
||||
GroupName: formattedGroupName,
|
||||
}
|
||||
@ -237,7 +237,7 @@ func TestErrorGroupServiceGetGroupError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorGroup.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
|
||||
var formattedGroupName string = fmt.Sprintf("projects/%s/groups/%s", "[PROJECT]", "[GROUP]")
|
||||
var request = &clouderrorreportingpb.GetGroupRequest{
|
||||
GroupName: formattedGroupName,
|
||||
}
|
||||
@ -331,7 +331,7 @@ func TestErrorStatsServiceListGroupStats(t *testing.T) {
|
||||
|
||||
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
|
||||
var request = &clouderrorreportingpb.ListGroupStatsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
@ -372,7 +372,7 @@ func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
|
||||
var request = &clouderrorreportingpb.ListGroupStatsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
@ -407,7 +407,7 @@ func TestErrorStatsServiceListEvents(t *testing.T) {
|
||||
|
||||
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var groupId string = "groupId506361563"
|
||||
var request = &clouderrorreportingpb.ListEventsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
@ -448,7 +448,7 @@ func TestErrorStatsServiceListEventsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var groupId string = "groupId506361563"
|
||||
var request = &clouderrorreportingpb.ListEventsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
@ -477,7 +477,7 @@ func TestErrorStatsServiceDeleteEvents(t *testing.T) {
|
||||
|
||||
mockErrorStats.resps = append(mockErrorStats.resps[:0], expectedResponse)
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var request = &clouderrorreportingpb.DeleteEventsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
}
|
||||
@ -506,7 +506,7 @@ func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockErrorStats.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var request = &clouderrorreportingpb.DeleteEventsRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
}
|
||||
@ -533,7 +533,7 @@ func TestReportErrorsServiceReportErrorEvent(t *testing.T) {
|
||||
|
||||
mockReportErrors.resps = append(mockReportErrors.resps[:0], expectedResponse)
|
||||
|
||||
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
|
||||
var request = &clouderrorreportingpb.ReportErrorEventRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
@ -564,7 +564,7 @@ func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockReportErrors.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
|
||||
var formattedProjectName string = fmt.Sprintf("projects/%s", "[PROJECT]")
|
||||
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
|
||||
var request = &clouderrorreportingpb.ReportErrorEventRequest{
|
||||
ProjectName: formattedProjectName,
|
||||
|
51
vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go
generated
vendored
Normal file
51
vendor/cloud.google.com/go/errorreporting/apiv1beta1/path_funcs.go
generated
vendored
Normal file
@ -0,0 +1,51 @@
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errorreporting
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("inspect/results/%s", result)
|
||||
// instead.
|
||||
func ResultPath(result string) string {
|
||||
return "" +
|
||||
"inspect/results/" +
|
||||
result +
|
||||
""
|
||||
}
|
||||
|
||||
// ErrorStatsProjectPath returns the path for the project resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s", project)
|
||||
// instead.
|
||||
func ErrorStatsProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// ReportErrorsProjectPath returns the path for the project resource.
|
||||
//
|
||||
// Deprecated: Use
|
||||
// fmt.Sprintf("projects/%s", project)
|
||||
// instead.
|
||||
func ReportErrorsProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
12
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
12
vendor/cloud.google.com/go/errorreporting/apiv1beta1/report_errors_client.go
generated
vendored
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -99,14 +99,6 @@ func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ReportErrorsProjectPath returns the path for the project resource.
|
||||
func ReportErrorsProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// ReportErrorEvent report an individual error event.
|
||||
//
|
||||
// This endpoint accepts <strong>either</strong> an OAuth token,
|
||||
|
@ -1,10 +1,10 @@
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
// Copyright 2018 Google LLC
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
// https://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@ -40,7 +40,7 @@ func ExampleReportErrorsClient_ReportErrorEvent() {
|
||||
}
|
||||
|
||||
req := &clouderrorreportingpb.ReportErrorEventRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ReportErrorEvent(ctx, req)
|
||||
if err != nil {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user