vendor: update all dependencies

This commit is contained in:
Nick Craig-Wood
2018-06-17 17:59:12 +01:00
parent 3f0789e2db
commit 08021c4636
2474 changed files with 435818 additions and 282709 deletions

View File

@ -60,11 +60,6 @@
"location": "query",
"type": "string"
},
"bearer_token": {
"description": "OAuth bearer token.",
"location": "query",
"type": "string"
},
"callback": {
"description": "JSONP",
"location": "query",
@ -85,12 +80,6 @@
"location": "query",
"type": "string"
},
"pp": {
"default": "true",
"description": "Pretty-print response.",
"location": "query",
"type": "boolean"
},
"prettyPrint": {
"default": "true",
"description": "Returns response with indentations and line breaks.",
@ -992,7 +981,7 @@
}
}
},
"revision": "20180420",
"revision": "20180605",
"rootUrl": "https://ml.googleapis.com/",
"schemas": {
"GoogleApi__HttpBody": {
@ -1044,7 +1033,7 @@
"id": "GoogleCloudMlV1__AutoScaling",
"properties": {
"minNodes": {
"description": "Optional. The minimum number of nodes to allocate for this model. These\nnodes are always up, starting from the time the model is deployed, so the\ncost of operating this model will be at least\n`rate` * `min_nodes` * number of hours since last billing cycle,\nwhere `rate` is the cost per node-hour as documented in the\n[pricing guide](/ml-engine/docs/pricing),\neven if no predictions are performed. There is additional cost for each\nprediction performed.\n\nUnlike manual scaling, if the load gets too heavy for the nodes\nthat are up, the service will automatically add nodes to handle the\nincreased load as well as scale back as traffic drops, always maintaining\nat least `min_nodes`. You will be charged for the time in which additional\nnodes are used.\n\nIf not specified, `min_nodes` defaults to 0, in which case, when traffic\nto a model stops (and after a cool-down period), nodes will be shut down\nand no charges will be incurred until traffic to the model resumes.",
"description": "Optional. The minimum number of nodes to allocate for this model. These\nnodes are always up, starting from the time the model is deployed.\nTherefore, the cost of operating this model will be at least\n`rate` * `min_nodes` * number of hours since last billing cycle,\nwhere `rate` is the cost per node-hour as documented in the\n[pricing guide](/ml-engine/docs/pricing),\neven if no predictions are performed. There is additional cost for each\nprediction performed.\n\nUnlike manual scaling, if the load gets too heavy for the nodes\nthat are up, the service will automatically add nodes to handle the\nincreased load as well as scale back as traffic drops, always maintaining\nat least `min_nodes`. You will be charged for the time in which additional\nnodes are used.\n\nIf not specified, `min_nodes` defaults to 0, in which case, when traffic\nto a model stops (and after a cool-down period), nodes will be shut down\nand no charges will be incurred until traffic to the model resumes.\n\nYou can set `min_nodes` when creating the model version, and you can also\nupdate `min_nodes` for an existing version:\n\u003cpre\u003e\nupdate_body.json:\n{\n 'autoScaling': {\n 'minNodes': 5\n }\n}\n\u003c/pre\u003e\nHTTP request:\n\u003cpre\u003e\nPATCH https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes -d @./update_body.json\n\u003c/pre\u003e",
"format": "int32",
"type": "integer"
}
@ -1065,13 +1054,15 @@
"enumDescriptions": [
"Unspecified accelerator type. Default to no GPU.",
"Nvidia tesla k80 GPU.",
"Nvidia tesla P100 GPU."
"Nvidia tesla P100 GPU.",
"Nvidia tesla V100 GPU."
],
"items": {
"enum": [
"ACCELERATOR_TYPE_UNSPECIFIED",
"NVIDIA_TESLA_K80",
"NVIDIA_TESLA_P100"
"NVIDIA_TESLA_P100",
"NVIDIA_TESLA_V100"
],
"type": "string"
},
@ -1684,7 +1675,7 @@
"type": "string"
},
"masterType": {
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n \u003cdt\u003estandard\u003c/dt\u003e\n \u003cdd\u003e\n A basic machine configuration suitable for training simple models with\n small to moderate datasets.\n \u003c/dd\u003e\n \u003cdt\u003elarge_model\u003c/dt\u003e\n \u003cdd\u003e\n A machine with a lot of memory, specially suited for parameter servers\n when your model is large (having many hidden layers or layers with very\n large numbers of nodes).\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n \u003cdd\u003e\n A machine suitable for the master and workers of the cluster when your\n model requires more computation than the standard machine can handle\n satisfactorily.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_s\u003c/code\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ccode suppresswarning=\"true\"\u003ecomplex_model_m\u003c/code\u003e.\n \u003c/dd\u003e\n \u003cdt\u003estandard_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ccode suppresswarning=\"true\"\u003estandard\u003c/code\u003e that\n also includes a single NVIDIA Tesla K80 GPU. See more about\n \u003ca href=\"/ml-engine/docs/tensorflow/using-gpus\"\u003eusing GPUs to\n train your model\u003c/a\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to\n \u003ccode suppresswarning=\"true\"\u003ecomplex_model_m\u003c/code\u003e that also includes\n four NVIDIA Tesla K80 GPUs.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to\n \u003ccode suppresswarning=\"true\"\u003ecomplex_model_l\u003c/code\u003e that also includes\n eight NVIDIA Tesla K80 GPUs.\n \u003c/dd\u003e\n \u003cdt\u003estandard_p100\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ccode suppresswarning=\"true\"\u003estandard\u003c/code\u003e that\n also includes a single NVIDIA Tesla P100 GPU. The availability of these\n GPUs is in the Beta launch stage.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m_p100\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to\n \u003ccode suppresswarning=\"true\"\u003ecomplex_model_m\u003c/code\u003e that also includes\n four NVIDIA Tesla P100 GPUs. The availability of these GPUs is in\n the Beta launch stage.\n \u003c/dd\u003e\n \u003cdt\u003estandard_tpu\u003c/dt\u003e\n \u003cdd\u003e\n A TPU VM including one Cloud TPU. The availability of Cloud TPU is in\n \u003ci\u003eBeta\u003c/i\u003e launch stage. See more about\n \u003ca href=\"/ml-engine/docs/tensorflow/using-tpus\"\u003eusing TPUs to train\n your model\u003c/a\u003e.\n \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.",
"description": "Optional. Specifies the type of virtual machine to use for your training\njob's master worker.\n\nThe following types are supported:\n\n\u003cdl\u003e\n \u003cdt\u003estandard\u003c/dt\u003e\n \u003cdd\u003e\n A basic machine configuration suitable for training simple models with\n small to moderate datasets.\n \u003c/dd\u003e\n \u003cdt\u003elarge_model\u003c/dt\u003e\n \u003cdd\u003e\n A machine with a lot of memory, specially suited for parameter servers\n when your model is large (having many hidden layers or layers with very\n large numbers of nodes).\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_s\u003c/dt\u003e\n \u003cdd\u003e\n A machine suitable for the master and workers of the cluster when your\n model requires more computation than the standard machine can handle\n satisfactorily.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ci\u003ecomplex_model_s\u003c/i\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l\u003c/dt\u003e\n \u003cdd\u003e\n A machine with roughly twice the number of cores and roughly double the\n memory of \u003ci\u003ecomplex_model_m\u003c/i\u003e.\n \u003c/dd\u003e\n \u003cdt\u003estandard_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n also includes a single NVIDIA Tesla K80 GPU. See more about\n \u003ca href=\"/ml-engine/docs/tensorflow/using-gpus\"\u003eusing GPUs to\n train your model\u003c/a\u003e.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n four NVIDIA Tesla K80 GPUs.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_l_gpu\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ci\u003ecomplex_model_l\u003c/i\u003e that also includes\n eight NVIDIA Tesla K80 GPUs.\n \u003c/dd\u003e\n \u003cdt\u003estandard_p100\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ci\u003estandard\u003c/i\u003e that\n also includes a single NVIDIA Tesla P100 GPU. The availability of these\n GPUs is in the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n \u003c/dd\u003e\n \u003cdt\u003ecomplex_model_m_p100\u003c/dt\u003e\n \u003cdd\u003e\n A machine equivalent to \u003ci\u003ecomplex_model_m\u003c/i\u003e that also includes\n four NVIDIA Tesla P100 GPUs. The availability of these GPUs is in\n the \u003ci\u003eBeta\u003c/i\u003e launch stage.\n \u003c/dd\u003e\n \u003cdt\u003ecloud_tpu\u003c/dt\u003e\n \u003cdd\u003e\n A TPU VM including one Cloud TPU. The availability of Cloud TPU is in\n \u003ci\u003eBeta\u003c/i\u003e launch stage. See more about\n \u003ca href=\"/ml-engine/docs/tensorflow/using-tpus\"\u003eusing TPUs to train\n your model\u003c/a\u003e.\n \u003c/dd\u003e\n\u003c/dl\u003e\n\nYou must set this value when `scaleTier` is set to `CUSTOM`.",
"type": "string"
},
"packageUris": {
@ -1813,10 +1804,10 @@
"XGBOOST"
],
"enumDescriptions": [
"",
"",
"",
""
"Unspecified framework. Defaults to TensorFlow.",
"Tensorflow framework.",
"Scikit-learn framework.",
"XGBoost framework."
],
"type": "string"
},

View File

@ -282,9 +282,9 @@ func (s *GoogleCloudMlV1HyperparameterOutputHyperparameterMetric) UnmarshalJSON(
type GoogleCloudMlV1__AutoScaling struct {
// MinNodes: Optional. The minimum number of nodes to allocate for this
// model. These
// nodes are always up, starting from the time the model is deployed, so
// the
// cost of operating this model will be at least
// nodes are always up, starting from the time the model is
// deployed.
// Therefore, the cost of operating this model will be at least
// `rate` * `min_nodes` * number of hours since last billing
// cycle,
// where `rate` is the cost per node-hour as documented in the
@ -306,7 +306,27 @@ type GoogleCloudMlV1__AutoScaling struct {
// traffic
// to a model stops (and after a cool-down period), nodes will be shut
// down
// and no charges will be incurred until traffic to the model resumes.
// and no charges will be incurred until traffic to the model
// resumes.
//
// You can set `min_nodes` when creating the model version, and you can
// also
// update `min_nodes` for an existing
// version:
// <pre>
// update_body.json:
// {
// 'autoScaling': {
// 'minNodes': 5
// }
// }
// </pre>
// HTTP request:
// <pre>
// PATCH
// https://ml.googleapis.com/v1/{name=projects/*/models/*/versions/*}?update_mask=autoScaling.minNodes -d
// @./update_body.json
// </pre>
MinNodes int64 `json:"minNodes,omitempty"`
// ForceSendFields is a list of field names (e.g. "MinNodes") to
@ -345,6 +365,7 @@ type GoogleCloudMlV1__Capability struct {
// Default to no GPU.
// "NVIDIA_TESLA_K80" - Nvidia tesla k80 GPU.
// "NVIDIA_TESLA_P100" - Nvidia tesla P100 GPU.
// "NVIDIA_TESLA_V100" - Nvidia tesla V100 GPU.
AvailableAccelerators []string `json:"availableAccelerators,omitempty"`
// Possible values:
@ -1408,53 +1429,45 @@ type GoogleCloudMlV1__TrainingInput struct {
// <dd>
// A machine with roughly twice the number of cores and roughly double
// the
// memory of <code suppresswarning="true">complex_model_s</code>.
// memory of <i>complex_model_s</i>.
// </dd>
// <dt>complex_model_l</dt>
// <dd>
// A machine with roughly twice the number of cores and roughly double
// the
// memory of <code suppresswarning="true">complex_model_m</code>.
// memory of <i>complex_model_m</i>.
// </dd>
// <dt>standard_gpu</dt>
// <dd>
// A machine equivalent to <code
// suppresswarning="true">standard</code> that
// A machine equivalent to <i>standard</i> that
// also includes a single NVIDIA Tesla K80 GPU. See more about
// <a href="/ml-engine/docs/tensorflow/using-gpus">using GPUs to
// train your model</a>.
// </dd>
// <dt>complex_model_m_gpu</dt>
// <dd>
// A machine equivalent to
// <code suppresswarning="true">complex_model_m</code> that also
// includes
// A machine equivalent to <i>complex_model_m</i> that also includes
// four NVIDIA Tesla K80 GPUs.
// </dd>
// <dt>complex_model_l_gpu</dt>
// <dd>
// A machine equivalent to
// <code suppresswarning="true">complex_model_l</code> that also
// includes
// A machine equivalent to <i>complex_model_l</i> that also includes
// eight NVIDIA Tesla K80 GPUs.
// </dd>
// <dt>standard_p100</dt>
// <dd>
// A machine equivalent to <code
// suppresswarning="true">standard</code> that
// A machine equivalent to <i>standard</i> that
// also includes a single NVIDIA Tesla P100 GPU. The availability of
// these
// GPUs is in the Beta launch stage.
// GPUs is in the <i>Beta</i> launch stage.
// </dd>
// <dt>complex_model_m_p100</dt>
// <dd>
// A machine equivalent to
// <code suppresswarning="true">complex_model_m</code> that also
// includes
// A machine equivalent to <i>complex_model_m</i> that also includes
// four NVIDIA Tesla P100 GPUs. The availability of these GPUs is in
// the Beta launch stage.
// the <i>Beta</i> launch stage.
// </dd>
// <dt>standard_tpu</dt>
// <dt>cloud_tpu</dt>
// <dd>
// A TPU VM including one Cloud TPU. The availability of Cloud TPU is
// in
@ -1741,10 +1754,11 @@ type GoogleCloudMlV1__Version struct {
// the runtime version of the model to 1.4 or greater.
//
// Possible values:
// "FRAMEWORK_UNSPECIFIED"
// "TENSORFLOW"
// "SCIKIT_LEARN"
// "XGBOOST"
// "FRAMEWORK_UNSPECIFIED" - Unspecified framework. Defaults to
// TensorFlow.
// "TENSORFLOW" - Tensorflow framework.
// "SCIKIT_LEARN" - Scikit-learn framework.
// "XGBOOST" - XGBoost framework.
Framework string `json:"framework,omitempty"`
// IsDefault: Output only. If true, this version will be used to handle