diff --git a/.flake8 b/.flake8 new file mode 100644 index 00000000..55118b3f --- /dev/null +++ b/.flake8 @@ -0,0 +1,3 @@ +[flake8] +max-line-length = 120 +exclude = zrok_api, build \ No newline at end of file diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 00000000..6313b56c --- /dev/null +++ b/.gitattributes @@ -0,0 +1 @@ +* text=auto eol=lf diff --git a/.github/workflows/build-wheels.yml b/.github/workflows/build-wheels.yml index bd852b42..1d5009cc 100644 --- a/.github/workflows/build-wheels.yml +++ b/.github/workflows/build-wheels.yml @@ -35,6 +35,7 @@ jobs: - name: Build distro env: ZROK_VERSION: ${{ github.event.release.tag_name }} + ZROK_PY_NAME: ${{ vars.ZROK_PY_NAME || null }} run: | python setup.py sdist diff --git a/.github/workflows/ci-build.yml b/.github/workflows/ci-build.yml index b51535eb..18fe8e36 100644 --- a/.github/workflows/ci-build.yml +++ b/.github/workflows/ci-build.yml @@ -51,6 +51,19 @@ jobs: shell: bash run: go test -v ./... + - name: setup python + uses: actions/setup-python@v3 + with: + python-version: '3.10' + + - name: python deps + shell: bash + run: python -m pip install -U pip flake8 + + - name: python lint + shell: bash + run: flake8 sdk/python/sdk/zrok + - name: solve GOBIN id: solve_go_bin shell: bash diff --git a/.github/workflows/deploy-doc-site.yml b/.github/workflows/deploy-doc-site.yml index 01482a52..7b0d81cd 100644 --- a/.github/workflows/deploy-doc-site.yml +++ b/.github/workflows/deploy-doc-site.yml @@ -1,47 +1,47 @@ -name: Deploy Doc Site - -on: - push: - branches: - - main - -# allow GITHUB_TOKEN to be used by the peaceiris/actions-gh-pages action to push to gh-pages branch -permissions: - contents: write - -jobs: - build: - runs-on: ubuntu-latest - - steps: - - uses: actions/checkout@v3 - - - uses: actions/setup-node@v3 - name: setup npm - with: - node-version: 18 - check-latest: true - cache: "npm" - cache-dependency-path: website/package-lock.json - - - name: Run a multi-line script - run: | - npm install - npm run build - working-directory: website - - # Popular action to deploy to GitHub Pages: - # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus - - name: Deploy to GitHub Pages - uses: peaceiris/actions-gh-pages@v3 - with: - github_token: ${{ secrets.GITHUB_TOKEN }} - # Build output to publish to the `gh-pages` branch: - publish_dir: ./website/build - # The following lines assign commit authorship to the official - # GH-Actions bot for deploys to `gh-pages` branch: - # https://github.com/actions/checkout/issues/13#issuecomment-724415212 - # The GH actions bot is used by default if you didn't specify the two fields. - # You can swap them out with your own user credentials. - #user_name: github-actions[bot] - #user_email: 41898282+github-actions[bot]@users.noreply.github.com +name: Deploy Doc Site + +on: + push: + branches: + - main + +# allow GITHUB_TOKEN to be used by the peaceiris/actions-gh-pages action to push to gh-pages branch +permissions: + contents: write + +jobs: + build: + runs-on: ubuntu-latest + + steps: + - uses: actions/checkout@v3 + + - uses: actions/setup-node@v3 + name: setup npm + with: + node-version: 18 + check-latest: true + cache: "npm" + cache-dependency-path: website/package-lock.json + + - name: Run a multi-line script + run: | + npm install + npm run build + working-directory: website + + # Popular action to deploy to GitHub Pages: + # Docs: https://github.com/peaceiris/actions-gh-pages#%EF%B8%8F-docusaurus + - name: Deploy to GitHub Pages + uses: peaceiris/actions-gh-pages@v3 + with: + github_token: ${{ secrets.GITHUB_TOKEN }} + # Build output to publish to the `gh-pages` branch: + publish_dir: ./website/build + # The following lines assign commit authorship to the official + # GH-Actions bot for deploys to `gh-pages` branch: + # https://github.com/actions/checkout/issues/13#issuecomment-724415212 + # The GH actions bot is used by default if you didn't specify the two fields. + # You can swap them out with your own user credentials. + #user_name: github-actions[bot] + #user_email: 41898282+github-actions[bot]@users.noreply.github.com diff --git a/.github/workflows/publish-docker-images.yml b/.github/workflows/publish-docker-images.yml index 80f671e8..4471dcf0 100644 --- a/.github/workflows/publish-docker-images.yml +++ b/.github/workflows/publish-docker-images.yml @@ -72,11 +72,12 @@ jobs: - name: Set Up Container Image Tags for zrok CLI Container env: - RELEASE_REPO: openziti/zrok - ZROK_VERSION: ${{ steps.semver.outputs.zrok_semver }} + ZROK_CONTAINER_IMAGE_REPO: ${{ vars.ZROK_CONTAINER_IMAGE_REPO || 'openziti/zrok' }} + ZROK_CONTAINER_IMAGE_TAG: ${{ steps.semver.outputs.zrok_semver }} id: tagprep_cli run: | - echo DOCKER_TAGS="${RELEASE_REPO}:${ZROK_VERSION},${RELEASE_REPO}:latest" | tee -a $GITHUB_OUTPUT + echo DOCKER_TAGS="${ZROK_CONTAINER_IMAGE_REPO}:${ZROK_CONTAINER_IMAGE_TAG},${ZROK_CONTAINER_IMAGE_REPO}:latest" \ + | tee -a $GITHUB_OUTPUT # this is the CLI image with the Linux binary for each # arch that was downloaded in ./dist/ diff --git a/.gitignore b/.gitignore index 147cebcd..7464f12b 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,8 @@ go.work go.work.sum zrok-venv +# cache used by local dev cross-build script +/.npm npm-debug.log* yarn-debug.log* yarn-error.log* diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 00000000..6b8a9acb --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,256 @@ +# Example markdownlint YAML configuration with all properties set to their default value + +# Default state for all rules +default: true + +# Path to configuration file to extend +extends: null + +# MD001/heading-increment/header-increment - Heading levels should only increment by one level at a time +MD001: true + +# deprecated in favor of MD041 +# MD002/first-heading-h1/first-header-h1 - First heading should be a top-level heading + +# MD003/heading-style/header-style - Heading style +MD003: + # Heading style + style: "consistent" + +# MD004/ul-style - Unordered list style +MD004: + # List style + style: "consistent" + +# MD005/list-indent - Inconsistent indentation for list items at the same level +MD005: true + +# MD006/ul-start-left - Consider starting bulleted lists at the beginning of the line +MD006: true + +# MD007/ul-indent - Unordered list indentation +MD007: + # Spaces for indent + indent: 2 + # Whether to indent the first level of the list + start_indented: false + # Spaces for first level indent (when start_indented is set) + start_indent: 2 + +# MD009/no-trailing-spaces - Trailing spaces +MD009: false + # # Spaces for line break + # br_spaces: 2 + # # Allow spaces for empty lines in list items + # list_item_empty_lines: false + # # Include unnecessary breaks + # strict: false + +# MD010/no-hard-tabs - Hard tabs +MD010: + # Include code blocks + code_blocks: true + # Fenced code languages to ignore + ignore_code_languages: [] + # Number of spaces for each hard tab + spaces_per_tab: 1 + +# MD011/no-reversed-links - Reversed link syntax +MD011: true + +# MD012/no-multiple-blanks - Multiple consecutive blank lines +MD012: + # Consecutive blank lines + maximum: 1 + +# MD013/line-length - Line length +line_length: false +# MD013: +# # Number of characters +# line_length: 80 +# # Number of characters for headings +# heading_line_length: 80 +# # Number of characters for code blocks +# code_block_line_length: 80 +# # Include code blocks +# code_blocks: true +# # Include tables +# tables: true +# # Include headings +# headings: true +# # Include headings +# headers: true +# # Strict length checking +# strict: false +# # Stern length checking +# stern: false + +# MD014/commands-show-output - Dollar signs used before commands without showing output +MD014: true + +# MD018/no-missing-space-atx - No space after hash on atx style heading +MD018: true + +# MD019/no-multiple-space-atx - Multiple spaces after hash on atx style heading +MD019: true + +# MD020/no-missing-space-closed-atx - No space inside hashes on closed atx style heading +MD020: true + +# MD021/no-multiple-space-closed-atx - Multiple spaces inside hashes on closed atx style heading +MD021: true + +# MD022/blanks-around-headings/blanks-around-headers - Headings should be surrounded by blank lines +MD022: + # Blank lines above heading + lines_above: 1 + # Blank lines below heading + lines_below: 1 + +# MD023/heading-start-left/header-start-left - Headings must start at the beginning of the line +MD023: true + +# MD024/no-duplicate-heading/no-duplicate-header - Multiple headings with the same content +MD024: + # Only check sibling headings + allow_different_nesting: false + # Only check sibling headings + siblings_only: false + +# MD025/single-title/single-h1 - Multiple top-level headings in the same document +MD025: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD026/no-trailing-punctuation - Trailing punctuation in heading +MD026: + # Punctuation characters + punctuation: ".,;:!。,;:!" + +# MD027/no-multiple-space-blockquote - Multiple spaces after blockquote symbol +MD027: true + +# MD028/no-blanks-blockquote - Blank line inside blockquote +MD028: true + +# MD029/ol-prefix - Ordered list item prefix +MD029: + # List style + style: "one_or_ordered" + +# MD030/list-marker-space - Spaces after list markers +MD030: + # Spaces for single-line unordered list items + ul_single: 1 + # Spaces for single-line ordered list items + ol_single: 1 + # Spaces for multi-line unordered list items + ul_multi: 1 + # Spaces for multi-line ordered list items + ol_multi: 1 + +# MD031/blanks-around-fences - Fenced code blocks should be surrounded by blank lines +MD031: + # Include list items + list_items: true + +# MD032/blanks-around-lists - Lists should be surrounded by blank lines +MD032: true + +# MD033/no-inline-html - Inline HTML +MD033: + # Allowed elements + allowed_elements: [] + +# MD034/no-bare-urls - Bare URL used +MD034: true + +# MD035/hr-style - Horizontal rule style +MD035: + # Horizontal rule style + style: "consistent" + +# MD036/no-emphasis-as-heading/no-emphasis-as-header - Emphasis used instead of a heading +MD036: + # Punctuation characters + punctuation: ".,;:!?。,;:!?" + +# MD037/no-space-in-emphasis - Spaces inside emphasis markers +MD037: true + +# MD038/no-space-in-code - Spaces inside code span elements +MD038: true + +# MD039/no-space-in-links - Spaces inside link text +MD039: true + +# MD040/fenced-code-language - Fenced code blocks should have a language specified +MD040: true + +# MD041/first-line-heading/first-line-h1 - First line in a file should be a top-level heading +MD041: + # Heading level + level: 1 + # RegExp for matching title in front matter + front_matter_title: "^\\s*title\\s*[:=]" + +# MD042/no-empty-links - No empty links +MD042: true + +# disabled because many Markdown files are partials without headings and intended for transclusion +# MD043/required-headings/required-headers - Required heading structure +# MD043: +# # List of headings +# headings: [] +# # List of headings +# headers: [] + +# MD044/proper-names - Proper names should have the correct capitalization +MD044: + # List of proper names + names: [] + # Include code blocks + code_blocks: true + # Include HTML elements + html_elements: true + +# MD045/no-alt-text - Images should have alternate text (alt text) +MD045: true + +# MD046/code-block-style - Code block style +MD046: + # Block style + style: "consistent" + +# MD047/single-trailing-newline - Files should end with a single newline character +MD047: true + +# MD048/code-fence-style - Code fence style +MD048: + # Code fence style + style: "consistent" + +# MD049/emphasis-style - Emphasis style should be consistent +MD049: + # Emphasis style should be consistent + style: "consistent" + +# MD050/strong-style - Strong style should be consistent +MD050: + # Strong style should be consistent + style: "consistent" + +# MD051/link-fragments - Link fragments should be valid +MD051: true + +# MD052/reference-links-images - Reference links and images should use a label that is defined +MD052: true + +# MD053/link-image-reference-definitions - Link and image reference definitions should be needed +MD053: + # Ignored definitions + ignored_definitions: [ + "//" + ] diff --git a/ACKNOWLEDGEMENTS.md b/ACKNOWLEDGEMENTS.md new file mode 100644 index 00000000..690eacb9 --- /dev/null +++ b/ACKNOWLEDGEMENTS.md @@ -0,0 +1,92 @@ +# ACKNOWLEDGEMENTS + +## github.com/openziti/zrok/endpoints/socks + +Portions of the `socks` package is based on code from `https://github.com/tailscale/tailscale/blob/v1.58.2/net/socks5/socks5.go`, which included the following license: + +> BSD 3-Clause License +> +> Copyright (c) 2020 Tailscale Inc & AUTHORS. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions are met: +> +> 1. Redistributions of source code must retain the above copyright notice, this +> list of conditions and the following disclaimer. +> +> 2. Redistributions in binary form must reproduce the above copyright notice, +> this list of conditions and the following disclaimer in the documentation +> and/or other materials provided with the distribution. +> +> 3. Neither the name of the copyright holder nor the names of its +> contributors may be used to endorse or promote products derived from +> this software without specific prior written permission. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +> AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +> IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +> DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +> FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +> DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +> SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +> CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +> OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +## github.com/openziti/zrok/drives/davServer + +The `davServer` package is based on code from `https://cs.opensource.google/go/go/`, which included the following license: + +> Copyright (c) 2009 The Go Authors. All rights reserved. +> +> Redistribution and use in source and binary forms, with or without +> modification, are permitted provided that the following conditions are +> met: +> +> * Redistributions of source code must retain the above copyright +> notice, this list of conditions and the following disclaimer. +> * Redistributions in binary form must reproduce the above +> copyright notice, this list of conditions and the following disclaimer +> in the documentation and/or other materials provided with the +> distribution. +> * Neither the name of Google Inc. nor the names of its +> contributors may be used to endorse or promote products derived from +> this software without specific prior written permission. +> +> THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +> "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +> LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +> A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +> OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +> SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +> LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +> DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +> THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +> (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +> OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE + +## github.com/openziti/zrok/drives/davClient + +The `davClient` package is based on code from `github.com/emersion/go-webdav`, which included the following license: + +> The MIT License (MIT) +> +> Copyright (c) 2020 Simon Ser +> +> Permission is hereby granted, free of charge, to any person obtaining a copy +> of this software and associated documentation files (the "Software"), to deal +> in the Software without restriction, including without limitation the rights +> to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +> copies of the Software, and to permit persons to whom the Software is +> furnished to do so, subject to the following conditions: +> +> The above copyright notice and this permission notice shall be included in all +> copies or substantial portions of the Software. +> +> THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +> IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +> FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +> AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +> LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +> OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +> SOFTWARE. \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index acfe7ad5..f65bc57b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,83 @@ # CHANGELOG +## v0.4.25 + +FEATURE: New action in the web console that allows changing the password of the logged-in account (https://github.com/openziti/zrok/issues/148) + +FEATURE: The web console now supports revoking your current account token and generating a new one (https://github.com/openziti/zrok/issues/191) + +CHANGE: When specifying OAuth configuration for public shares from the `zrok share public` or `zrok reserve` public commands, the flags and functionality for restricting the allowed email addresses of the authenticating users has changed. The old flag was `--oauth-email-domains`, which took a string value that needed to be contained in the user's email address. The new flag is `--oauth-email-address-patterns`, which accepts a glob-style filter, using https://github.com/gobwas/glob (https://github.com/openziti/zrok/issues/413) + +CHANGE: Creating a reserved share checks for token collision and returns a more appropriate error message (https://github.com/openziti/zrok/issues/531) + +CHANGE: Update UI to add a 'true' value on `reserved` boolean (https://github.com/openziti/zrok/issues/443) + +FIX: Fixed bug where a second password reset request would for any account would fail (https://github.com/openziti/zrok/issues/452) + +## v0.4.24 + +FEATURE: New `socks` backend mode for use with private sharing. Use `zrok share private --backend-mode socks` and then `zrok access private` that share from somewhere else... very lightweight VPN-like functionality (https://github.com/openziti/zrok/issues/558) + +FEATURE: New `zrok admin create account` command that allows populating accounts directly into the underlying controller database (https://github.com/openziti/zrok/issues/551) + +CHANGE: The `zrok test loopback public` utility to report non-`200` errors and also ensure that the listening side of the test is fully established before starting loopback testing. + +CHANGE: The OpenZiti SDK for golang (https://github.com/openziti/sdk-golang) has been updated to version `v0.22.28` + +## v0.4.23 + +FEATURE: New CLI commands have been implemented for working with the `drive` share backend mode (part of the "zrok Drives" functionality). These commands include `zrok cp`, `zrok mkdir` `zrok mv`, `zrok ls`, and `zrok rm`. These are initial, minimal versions of these commands and very likely contain bugs and ergonomic annoyances. There is a guide available at (`docs/guides/drives/cli.md`) that explains how to work with these tools in detail (https://github.com/openziti/zrok/issues/438) + +FEATURE: Python SDK now has a decorator for integrating with various server side frameworks. See the `http-server` example. + +FEATURE: Python SDK share and access handling now supports context management. + +FEATURE: TLS for `zrok` controller and frontends. Add the `tls:` stanza to your controller configuration (see `etc/ctrl.yml`) to enable TLS support for the controller API. Add the `tls:` stanza to your frontend configuration (see `etc/frontend.yml`) to enable TLS support for frontends (be sure to check your `public` frontend template) (#24)(https://github.com/openziti/zrok/issues/24) + +CHANGE: Improved OpenZiti resource cleanup resilience. Previous resource cleanup would stop when an error was encountered at any stage of the cleanup process (serps, sps, config, service). New cleanup implementation logs errors but continues to clean up anything that it can (https://github.com/openziti/zrok/issues/533) + +CHANGE: Instead of setting the `ListenOptions.MaxConnections` property to `64`, use the default value of `3`. This property actually controls the number of terminators created on the underlying OpenZiti network. This property is actually getting renamed to `ListenOptions.MaxTerminators` in an upcoming release of `github.com/openziti/sdk-golang` (https://github.com/openziti/zrok/issues/535) + +CHANGE: Versioning for the Python SDK has been updated to use versioneer for management. + +CHANGE: Python SDK package name has been renamed to `zrok`, dropping the `-sdk` postfix. [pypi](https://pypi.org/project/zrok). + +## v0.4.22 + +FIX: The goreleaser action is not updated to work with the latest golang build. Modifed `go.mod` to comply with what goreleaser expects + +## v0.4.21 + +FEATURE: The web console now supports deleting `zrok access` frontends (https://github.com/openziti/zrok/issues/504) + +CHANGE: The web console now displays the frontend token as the label for any `zrok access` frontends throughout the user interface (https://github.com/openziti/zrok/issues/504) + +CHANGE: Updated `github.com/rubenv/sql-migrate` to `v1.6.0` + +CHANGE: Updated `github.com/openziti/sdk-golang` to `v0.22.6` + +FIX: The migration `sqlite3/015_v0_4_19_share_unique_name_constraint.sql` has been adjusted to delete the old `shares_old` table as the last step of the migration process. Not sure exactly why, but SQLite is unhappy otherwise (https://github.com/openziti/zrok/issues/504) + +FIX: Email addresses have been made case-insensitive. Please note that there is a migration included in this release (`016_v0_4_21_lowercase_email.sql`) which will attempt to ensure that all email addresses in your existing database are stored in lowercase; **if this migration fails you will need to manually remediate the duplicate account entries** (https://github.com/openziti/zrok/issues/517) + +FIX: Stop sending authentication cookies to non-authenticated shares (https://github.com/openziti/zrok/issues/512) + +## v0.4.20 + +CHANGE: OpenZiti SDK updated to `v0.21.2`. All `ziti.ListenOptions` listener options configured to use `WaitForNEstablishedListeners: 1`. When a `zrok share` client or an `sdk.Share` client are connected to an OpenZiti router that supports "listener established" events, then listen calls will not return until the listener is fully established on the OpenZiti network. Previously a `zrok share` client could report that it is fully operational and listening before the listener is fully established on the OpenZiti network; in practice this produced a very small window of time when the share would not be ready to accept requests. This change eliminates this window of time (https://github.com/openziti/zrok/issues/490) + +FIX: Require the JWT in a zrok OAuth cookie to have an audience claim that matches the public share hostname. This prevents a cookie from one share from being use to log in to another share. + +## v0.4.19 + +FEATURE: Reserved shares now support unique names ("vanity tokens"). This allows for the creation of reserved shares with identifiable names rather than generated share tokens. Includes basic support for profanity checking (https://github.com/openziti/zrok/issues/401) + +CHANGE: The `publicProxy` endpoint implementation used in the `zrok access public` frontend has been updated to use the new `RefreshService(serviceName)` call instead of `RefreshServices()`. This should greatly improve the performance of requests against missing or non-responsive zrok shares (https://github.com/openziti/zrok/issues/487) + +CHANGE: The Python SDK has been updated to properly support the "reserved" flag on the `ShareRequest` passed to `CreateShare` + +CHANGE: Dependency updates; `github.com/openziti/sdk-golang@v0.20.145`; `github.com/caddyserver/caddy/v2@2.7.6`; indirect dependencies + ## v0.4.18 FEATURE: Python SDK added. Can be found on [pypi](https://test.pypi.org/project/zrok-sdk). `pastebin` example illustrates basic SDK usage (see `sdk/python/examples/README.md` for details) (https://github.com/openziti/zrok/issues/401) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 336dd084..515bb29f 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -4,3 +4,52 @@ NetFoundry welcomes all and any contributions. All open source projects managed [guide for contributions](https://netfoundry.github.io/policies/CONTRIBUTING.html). If you are eager to contribute to a NetFoundry-managed open source project please read and act accordingly. + +## Project Styles + +This project uses several programming languages. Each language has its own style guide specifying any language-specific +conventions and idioms. The log formatting examples for Go are applicable to all languages. + +### Markdown + ++ This project uses [GitHub Flavored Markdown](https://github.github.com/gfm/). ++ Wrap lines at 120 characters if the audience is expected to read the source. ++ Do not wrap lines if the audience is expected to read the rendered output as HTML. ++ Use [Markdownlint](https://github.com/DavidAnson/markdownlint) with [the configuration file](.markdownlint.yaml) committed to this repository to find formatting problems. + +### Go + ++ This project uses [Go](https://golang.org/) language conventions. ++ Organize imports as a single block in alphabetical order without empty lines. ++ Begin log messages with a lowercase letter and do not end with punctuation. This log formatting guidance applies to all languages, not only Go. + + Format log messages that report errors. + + ```go + logrus.Errorf("tried a thing and failed: %v", err) + ``` + + Format in-line information in informational log messages. + + ```go + logrus.Infof("the expected value '%v' arrived as '%v'", expected, actual) + ``` + + Format in-line information in error log messages. + + ```go + logrus.Errorf("the expected value '%v did not compute: %v", value, err) + ``` + ++ Format log messages with format strings and arguments like 'tried a thing and failed: %s'. + +### Python + ++ This project uses [Python](https://www.python.org/) language conventions PEP-8. ++ Use [flake8](https://flake8.pycqa.org/en/latest/) with [the configuration file](.flake8) committed to this repository to find formatting problems. ++ Use Go log formatting guidance for Python too. + +### Docusaurus + ++ This project uses [Docusaurus](https://docusaurus.io/) with NodeJS 18 to build static content for docs.zrok.io. ++ Use `npm` to manage Node modules, not `yarn` (Ken plans to switch from `npm` to `yarn` if no one else does). diff --git a/cmd/zrok/accessPrivate.go b/cmd/zrok/accessPrivate.go index e478c8a1..8bb1402f 100644 --- a/cmd/zrok/accessPrivate.go +++ b/cmd/zrok/accessPrivate.go @@ -143,6 +143,28 @@ func (cmd *accessPrivateCommand) run(_ *cobra.Command, args []string) { } }() + case "socks": + fe, err := tcpTunnel.NewFrontend(&tcpTunnel.FrontendConfig{ + BindAddress: cmd.bindAddress, + IdentityName: env.EnvironmentIdentityName(), + ShrToken: args[0], + RequestsChan: requests, + }) + if err != nil { + if !panicInstead { + tui.Error("unable to create private access", err) + } + panic(err) + } + go func() { + if err := fe.Run(); err != nil { + if !panicInstead { + tui.Error("error starting access", err) + } + panic(err) + } + }() + default: cfg := proxy.DefaultFrontendConfig(env.EnvironmentIdentityName()) cfg.ShrToken = shrToken diff --git a/cmd/zrok/adminCreateAccount.go b/cmd/zrok/adminCreateAccount.go new file mode 100644 index 00000000..d6b5bc2c --- /dev/null +++ b/cmd/zrok/adminCreateAccount.go @@ -0,0 +1,66 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/controller" + "github.com/openziti/zrok/controller/config" + "github.com/openziti/zrok/controller/store" + "github.com/spf13/cobra" +) + +func init() { + adminCreateCmd.AddCommand(newAdminCreateAccount().cmd) +} + +type adminCreateAccount struct { + cmd *cobra.Command +} + +func newAdminCreateAccount() *adminCreateAccount { + cmd := &cobra.Command{ + Use: "account ", + Short: "Pre-populate an account in the database; returns an enable token for the account", + Args: cobra.ExactArgs(3), + } + command := &adminCreateAccount{cmd: cmd} + cmd.Run = command.run + return command +} + +func (cmd *adminCreateAccount) run(_ *cobra.Command, args []string) { + cfg, err := config.LoadConfig(args[0]) + if err != nil { + panic(err) + } + str, err := store.Open(cfg.Store) + if err != nil { + panic(err) + } + token, err := controller.CreateToken() + if err != nil { + panic(err) + } + hpwd, err := controller.HashPassword(args[2]) + if err != nil { + panic(err) + } + trx, err := str.Begin() + if err != nil { + panic(err) + } + defer func() { + if err := trx.Commit(); err != nil { + panic(err) + } + }() + a := &store.Account{ + Email: args[1], + Salt: hpwd.Salt, + Password: hpwd.Password, + Token: token, + } + if _, err := str.CreateAccount(a, trx); err != nil { + panic(err) + } + fmt.Println(token) +} diff --git a/cmd/zrok/copy.go b/cmd/zrok/copy.go new file mode 100644 index 00000000..80528b16 --- /dev/null +++ b/cmd/zrok/copy.go @@ -0,0 +1,106 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/drives/sync" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/tui" + "github.com/spf13/cobra" + "net/url" + "os" +) + +func init() { + rootCmd.AddCommand(newCopyCommand().cmd) +} + +type copyCommand struct { + cmd *cobra.Command + sync bool + basicAuth string +} + +func newCopyCommand() *copyCommand { + cmd := &cobra.Command{ + Use: "copy [] ( defaults to 'file://.`)", + Short: "Copy (unidirectional sync) zrok drive contents from to ('http://', 'file://', and 'zrok://' supported)", + Aliases: []string{"cp"}, + Args: cobra.RangeArgs(1, 2), + } + command := ©Command{cmd: cmd} + cmd.Run = command.run + cmd.Flags().BoolVarP(&command.sync, "sync", "s", false, "Only copy modified files (one-way synchronize)") + cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication ") + return command +} + +func (cmd *copyCommand) run(_ *cobra.Command, args []string) { + if cmd.basicAuth == "" { + cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH") + } + + sourceUrl, err := url.Parse(args[0]) + if err != nil { + tui.Error(fmt.Sprintf("invalid source '%v'", args[0]), err) + } + if sourceUrl.Scheme == "" { + sourceUrl.Scheme = "file" + } + + targetStr := "." + if len(args) == 2 { + targetStr = args[1] + } + targetUrl, err := url.Parse(targetStr) + if err != nil { + tui.Error(fmt.Sprintf("invalid target '%v'", targetStr), err) + } + if targetUrl.Scheme == "" { + targetUrl.Scheme = "file" + } + + root, err := environment.LoadRoot() + if err != nil { + tui.Error("error loading root", err) + } + + var allocatedAccesses []*sdk.Access + if sourceUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: sourceUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + allocatedAccesses = append(allocatedAccesses, access) + } + if targetUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + allocatedAccesses = append(allocatedAccesses, access) + } + defer func() { + for _, access := range allocatedAccesses { + err := sdk.DeleteAccess(root, access) + if err != nil { + tui.Warning("error deleting target access", err) + } + } + }() + + source, err := sync.TargetForURL(sourceUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", sourceUrl), err) + } + target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err) + } + + if err := sync.OneWay(source, target, cmd.sync); err != nil { + tui.Error("error copying", err) + } + + fmt.Println("copy complete!") +} diff --git a/cmd/zrok/ls.go b/cmd/zrok/ls.go new file mode 100644 index 00000000..b76ab8e5 --- /dev/null +++ b/cmd/zrok/ls.go @@ -0,0 +1,95 @@ +package main + +import ( + "fmt" + "github.com/jedib0t/go-pretty/v6/table" + "github.com/openziti/zrok/drives/sync" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/tui" + "github.com/openziti/zrok/util" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "net/url" + "os" + "sort" +) + +func init() { + rootCmd.AddCommand(newLsCommand().cmd) +} + +type lsCommand struct { + cmd *cobra.Command + basicAuth string +} + +func newLsCommand() *lsCommand { + cmd := &cobra.Command{ + Use: "ls ", + Short: "List the contents of drive ('http://', 'zrok://','file://')", + Aliases: []string{"dir"}, + Args: cobra.ExactArgs(1), + } + command := &lsCommand{cmd: cmd} + cmd.Run = command.run + cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication ") + return command +} + +func (cmd *lsCommand) run(_ *cobra.Command, args []string) { + if cmd.basicAuth == "" { + cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH") + } + + targetUrl, err := url.Parse(args[0]) + if err != nil { + tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err) + } + if targetUrl.Scheme == "" { + targetUrl.Scheme = "file" + } + + root, err := environment.LoadRoot() + if err != nil { + tui.Error("error loading root", err) + } + + if targetUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + defer func() { + if err := sdk.DeleteAccess(root, access); err != nil { + logrus.Warningf("error freeing access: %v", err) + } + }() + } + + target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err) + } + + objects, err := target.Dir("/") + if err != nil { + tui.Error("error listing directory", err) + } + sort.Slice(objects, func(i, j int) bool { + return objects[i].Path < objects[j].Path + }) + + tw := table.NewWriter() + tw.SetOutputMirror(os.Stdout) + tw.SetStyle(table.StyleLight) + tw.AppendHeader(table.Row{"type", "Name", "Size", "Modified"}) + for _, object := range objects { + if object.IsDir { + tw.AppendRow(table.Row{"DIR", object.Path, "", ""}) + } else { + tw.AppendRow(table.Row{"", object.Path, util.BytesToSize(object.Size), object.Modified.Local()}) + } + } + tw.Render() +} diff --git a/cmd/zrok/md.go b/cmd/zrok/md.go new file mode 100644 index 00000000..02e36c3c --- /dev/null +++ b/cmd/zrok/md.go @@ -0,0 +1,75 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/drives/sync" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/tui" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "net/url" + "os" +) + +func init() { + rootCmd.AddCommand(newMdCommand().cmd) +} + +type mdCommand struct { + cmd *cobra.Command + basicAuth string +} + +func newMdCommand() *mdCommand { + cmd := &cobra.Command{ + Use: "md ", + Short: "Make directory at ('http://', 'zrok://', 'file://')", + Aliases: []string{"mkdir"}, + Args: cobra.ExactArgs(1), + } + command := &mdCommand{cmd: cmd} + cmd.Run = command.run + cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication ") + return command +} + +func (cmd *mdCommand) run(_ *cobra.Command, args []string) { + if cmd.basicAuth == "" { + cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH") + } + + targetUrl, err := url.Parse(args[0]) + if err != nil { + tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err) + } + if targetUrl.Scheme == "" { + targetUrl.Scheme = "file" + } + + root, err := environment.LoadRoot() + if err != nil { + tui.Error("error loading root", err) + } + + if targetUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + defer func() { + if err := sdk.DeleteAccess(root, access); err != nil { + logrus.Warningf("error freeing access: %v", err) + } + }() + } + + target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err) + } + + if err := target.Mkdir("/"); err != nil { + tui.Error("error creating directory", err) + } +} diff --git a/cmd/zrok/mv.go b/cmd/zrok/mv.go new file mode 100644 index 00000000..963239c2 --- /dev/null +++ b/cmd/zrok/mv.go @@ -0,0 +1,75 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/drives/sync" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/tui" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "net/url" + "os" +) + +func init() { + rootCmd.AddCommand(newMvCommand().cmd) +} + +type mvCommand struct { + cmd *cobra.Command + basicAuth string +} + +func newMvCommand() *mvCommand { + cmd := &cobra.Command{ + Use: "mv ", + Short: "Move the drive to ('http://', 'zrok://', 'file://')", + Aliases: []string{"move"}, + Args: cobra.ExactArgs(2), + } + command := &mvCommand{cmd: cmd} + cmd.Run = command.run + cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication ") + return command +} + +func (cmd *mvCommand) run(_ *cobra.Command, args []string) { + if cmd.basicAuth == "" { + cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH") + } + + targetUrl, err := url.Parse(args[0]) + if err != nil { + tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err) + } + if targetUrl.Scheme == "" { + targetUrl.Scheme = "file" + } + + root, err := environment.LoadRoot() + if err != nil { + tui.Error("error loading root", err) + } + + if targetUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + defer func() { + if err := sdk.DeleteAccess(root, access); err != nil { + logrus.Warningf("error freeing access: %v", err) + } + }() + } + + target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err) + } + + if err := target.Move("/", args[1]); err != nil { + tui.Error("error moving", err) + } +} diff --git a/cmd/zrok/reserve.go b/cmd/zrok/reserve.go index 5eab436b..4fb76a42 100644 --- a/cmd/zrok/reserve.go +++ b/cmd/zrok/reserve.go @@ -6,6 +6,7 @@ import ( "github.com/openziti/zrok/environment" "github.com/openziti/zrok/sdk/golang/sdk" "github.com/openziti/zrok/tui" + "github.com/openziti/zrok/util" "github.com/sirupsen/logrus" "github.com/spf13/cobra" "slices" @@ -17,29 +18,31 @@ func init() { } type reserveCommand struct { - basicAuth []string - frontendSelection []string - backendMode string - jsonOutput bool - oauthProvider string - oauthEmailDomains []string - oauthCheckInterval time.Duration - cmd *cobra.Command + uniqueName string + basicAuth []string + frontendSelection []string + backendMode string + jsonOutput bool + oauthProvider string + oauthEmailAddressPatterns []string + oauthCheckInterval time.Duration + cmd *cobra.Command } func newReserveCommand() *reserveCommand { cmd := &cobra.Command{ - Use: "reserve ", + Use: "reserve []", Short: "Create a reserved share", - Args: cobra.ExactArgs(2), + Args: cobra.RangeArgs(1, 2), } command := &reserveCommand{cmd: cmd} + cmd.Flags().StringVarP(&command.uniqueName, "unique-name", "n", "", "A unique name for the reserved share (defaults to generated identifier)") cmd.Flags().StringArrayVar(&command.frontendSelection, "frontends", []string{"public"}, "Selected frontends to use for the share") - cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel)") + cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode (public|private: proxy, web, caddy, drive) (private: tcpTunnel, udpTunnel, socks)") cmd.Flags().BoolVarP(&command.jsonOutput, "json-output", "j", false, "Emit JSON describing the created reserved share") cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (,...)") cmd.Flags().StringVar(&command.oauthProvider, "oauth-provider", "", "Enable OAuth provider [google, github]") - cmd.Flags().StringArrayVar(&command.oauthEmailDomains, "oauth-email-domains", []string{}, "Allow only these email domains to authenticate via OAuth") + cmd.Flags().StringArrayVar(&command.oauthEmailAddressPatterns, "oauth-email-address-patterns", []string{}, "Allow only these email domains to authenticate via OAuth") cmd.Flags().DurationVar(&command.oauthCheckInterval, "oauth-check-interval", 3*time.Hour, "Maximum lifetime for OAuth authentication; reauthenticate after expiry") cmd.MarkFlagsMutuallyExclusive("basic-auth", "oauth-provider") @@ -49,16 +52,23 @@ func newReserveCommand() *reserveCommand { func (cmd *reserveCommand) run(_ *cobra.Command, args []string) { shareMode := sdk.ShareMode(args[0]) - privateOnlyModes := []string{"tcpTunnel", "udpTunnel"} + privateOnlyModes := []string{"tcpTunnel", "udpTunnel", "socks"} if shareMode != sdk.PublicShareMode && shareMode != sdk.PrivateShareMode { tui.Error("invalid sharing mode; expecting 'public' or 'private'", nil) } else if shareMode == sdk.PublicShareMode && slices.Contains(privateOnlyModes, cmd.backendMode) { tui.Error(fmt.Sprintf("invalid sharing mode for a %s share: %s", sdk.PublicShareMode, cmd.backendMode), nil) } + if cmd.uniqueName != "" && !util.IsValidUniqueName(cmd.uniqueName) { + tui.Error("invalid unique name; must be lowercase alphanumeric, between 4 and 32 characters in length, screened for profanity", nil) + } + var target string switch cmd.backendMode { case "proxy": + if len(args) != 2 { + tui.Error("the 'proxy' backend mode expects a ", nil) + } v, err := parseUrl(args[1]) if err != nil { tui.Error("invalid target endpoint URL", err) @@ -66,22 +76,42 @@ func (cmd *reserveCommand) run(_ *cobra.Command, args []string) { target = v case "web": + if len(args) != 2 { + tui.Error("the 'web' backend mode expects a ", nil) + } target = args[1] case "tcpTunnel": + if len(args) != 2 { + tui.Error("the 'tcpTunnel' backend mode expects a ", nil) + } target = args[1] case "udpTunnel": + if len(args) != 2 { + tui.Error("the 'udpTunnel' backend mode expects a ", nil) + } target = args[1] case "caddy": + if len(args) != 2 { + tui.Error("the 'caddy' backend mode expects a ", nil) + } target = args[1] case "drive": + if len(args) != 2 { + tui.Error("the 'drive' backend mode expects a ", nil) + } target = args[1] + case "socks": + if len(args) != 1 { + tui.Error("the 'socks' backend mode does not expect ", nil) + } + default: - tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive}", cmd.backendMode), nil) + tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive, socks}", cmd.backendMode), nil) } env, err := environment.LoadRoot() @@ -95,6 +125,7 @@ func (cmd *reserveCommand) run(_ *cobra.Command, args []string) { req := &sdk.ShareRequest{ Reserved: true, + UniqueName: cmd.uniqueName, BackendMode: sdk.BackendMode(cmd.backendMode), ShareMode: shareMode, BasicAuth: cmd.basicAuth, @@ -108,7 +139,7 @@ func (cmd *reserveCommand) run(_ *cobra.Command, args []string) { tui.Error("--oauth-provider only supported for public shares", nil) } req.OauthProvider = cmd.oauthProvider - req.OauthEmailDomains = cmd.oauthEmailDomains + req.OauthEmailAddressPatterns = cmd.oauthEmailAddressPatterns req.OauthAuthorizationCheckInterval = cmd.oauthCheckInterval } shr, err := sdk.CreateShare(env, req) diff --git a/cmd/zrok/rm.go b/cmd/zrok/rm.go new file mode 100644 index 00000000..25d4d5b8 --- /dev/null +++ b/cmd/zrok/rm.go @@ -0,0 +1,75 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/drives/sync" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/tui" + "github.com/sirupsen/logrus" + "github.com/spf13/cobra" + "net/url" + "os" +) + +func init() { + rootCmd.AddCommand(newRmCommand().cmd) +} + +type rmCommand struct { + cmd *cobra.Command + basicAuth string +} + +func newRmCommand() *rmCommand { + cmd := &cobra.Command{ + Use: "rm ", + Short: "Remove (delete) the contents of drive ('http://', 'zrok://', 'file://')", + Aliases: []string{"del"}, + Args: cobra.ExactArgs(1), + } + command := &rmCommand{cmd: cmd} + cmd.Run = command.run + cmd.Flags().StringVarP(&command.basicAuth, "basic-auth", "a", "", "Basic authentication ") + return command +} + +func (cmd *rmCommand) run(_ *cobra.Command, args []string) { + if cmd.basicAuth == "" { + cmd.basicAuth = os.Getenv("ZROK_DRIVES_BASIC_AUTH") + } + + targetUrl, err := url.Parse(args[0]) + if err != nil { + tui.Error(fmt.Sprintf("invalid target '%v'", args[0]), err) + } + if targetUrl.Scheme == "" { + targetUrl.Scheme = "file" + } + + root, err := environment.LoadRoot() + if err != nil { + tui.Error("error loading root", err) + } + + if targetUrl.Scheme == "zrok" { + access, err := sdk.CreateAccess(root, &sdk.AccessRequest{ShareToken: targetUrl.Host}) + if err != nil { + tui.Error("error creating access", err) + } + defer func() { + if err := sdk.DeleteAccess(root, access); err != nil { + logrus.Warningf("error freeing access: %v", err) + } + }() + } + + target, err := sync.TargetForURL(targetUrl, root, cmd.basicAuth) + if err != nil { + tui.Error(fmt.Sprintf("error creating target for '%v'", targetUrl), err) + } + + if err := target.Rm("/"); err != nil { + tui.Error("error removing", err) + } +} diff --git a/cmd/zrok/sharePrivate.go b/cmd/zrok/sharePrivate.go index 15275b50..7c01e31a 100644 --- a/cmd/zrok/sharePrivate.go +++ b/cmd/zrok/sharePrivate.go @@ -6,6 +6,7 @@ import ( "github.com/openziti/zrok/endpoints" "github.com/openziti/zrok/endpoints/drive" "github.com/openziti/zrok/endpoints/proxy" + "github.com/openziti/zrok/endpoints/socks" "github.com/openziti/zrok/endpoints/tcpTunnel" "github.com/openziti/zrok/endpoints/udpTunnel" "github.com/openziti/zrok/environment" @@ -33,13 +34,13 @@ type sharePrivateCommand struct { func newSharePrivateCommand() *sharePrivateCommand { cmd := &cobra.Command{ - Use: "private ", + Use: "private []", Short: "Share a target resource privately", - Args: cobra.ExactArgs(1), + Args: cobra.RangeArgs(0, 1), } command := &sharePrivateCommand{cmd: cmd} cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (,...") - cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, tcpTunnel, udpTunnel, caddy}") + cmd.Flags().StringVarP(&command.backendMode, "backend-mode", "b", "proxy", "The backend mode {proxy, web, tcpTunnel, udpTunnel, caddy, drive, socks}") cmd.Flags().BoolVar(&command.headless, "headless", false, "Disable TUI and run headless") cmd.Flags().BoolVar(&command.insecure, "insecure", false, "Enable insecure TLS certificate validation for ") cmd.Run = command.run @@ -51,6 +52,9 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) { switch cmd.backendMode { case "proxy": + if len(args) != 1 { + tui.Error("the 'proxy' backend mode expects a ", nil) + } v, err := parseUrl(args[0]) if err != nil { if !panicInstead { @@ -61,21 +65,41 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) { target = v case "web": + if len(args) != 1 { + tui.Error("the 'web' backend mode expects a ", nil) + } target = args[0] case "tcpTunnel": + if len(args) != 1 { + tui.Error("the 'tcpTunnel' backend mode expects a ", nil) + } target = args[0] case "udpTunnel": + if len(args) != 1 { + tui.Error("the 'udpTunnel' backend mode expects a ", nil) + } target = args[0] case "caddy": + if len(args) != 1 { + tui.Error("the 'caddy' backend mode expects a ", nil) + } target = args[0] cmd.headless = true case "drive": + if len(args) != 1 { + tui.Error("the 'drive' backend mode expects a ", nil) + } target = args[0] + case "socks": + if len(args) != 0 { + tui.Error("the 'socks' backend mode does not expect ", nil) + } + default: tui.Error(fmt.Sprintf("invalid backend mode '%v'; expected {proxy, web, tcpTunnel, udpTunnel, caddy, drive}", cmd.backendMode), nil) } @@ -264,6 +288,27 @@ func (cmd *sharePrivateCommand) run(_ *cobra.Command, args []string) { } }() + case "socks": + cfg := &socks.BackendConfig{ + IdentityPath: zif, + ShrToken: shr.Token, + Requests: requests, + } + + be, err := socks.NewBackend(cfg) + if err != nil { + if !panicInstead { + tui.Error("error creating socks backend", err) + } + panic(err) + } + + go func() { + if err := be.Run(); err != nil { + logrus.Errorf("error running socks backend: %v", err) + } + }() + default: tui.Error("invalid backend mode", nil) } diff --git a/cmd/zrok/sharePublic.go b/cmd/zrok/sharePublic.go index 10045510..80e859fb 100644 --- a/cmd/zrok/sharePublic.go +++ b/cmd/zrok/sharePublic.go @@ -3,6 +3,7 @@ package main import ( "fmt" tea "github.com/charmbracelet/bubbletea" + "github.com/gobwas/glob" "github.com/openziti/zrok/endpoints" drive "github.com/openziti/zrok/endpoints/drive" "github.com/openziti/zrok/endpoints/proxy" @@ -24,15 +25,15 @@ func init() { } type sharePublicCommand struct { - basicAuth []string - frontendSelection []string - backendMode string - headless bool - insecure bool - oauthProvider string - oauthEmailDomains []string - oauthCheckInterval time.Duration - cmd *cobra.Command + basicAuth []string + frontendSelection []string + backendMode string + headless bool + insecure bool + oauthProvider string + oauthEmailAddressPatterns []string + oauthCheckInterval time.Duration + cmd *cobra.Command } func newSharePublicCommand() *sharePublicCommand { @@ -49,7 +50,7 @@ func newSharePublicCommand() *sharePublicCommand { cmd.Flags().StringArrayVar(&command.basicAuth, "basic-auth", []string{}, "Basic authentication users (,...)") cmd.Flags().StringVar(&command.oauthProvider, "oauth-provider", "", "Enable OAuth provider [google, github]") - cmd.Flags().StringArrayVar(&command.oauthEmailDomains, "oauth-email-domains", []string{}, "Allow only these email domains to authenticate via OAuth") + cmd.Flags().StringArrayVar(&command.oauthEmailAddressPatterns, "oauth-email-address-patterns", []string{}, "Allow only these email domain globs to authenticate via OAuth") cmd.Flags().DurationVar(&command.oauthCheckInterval, "oauth-check-interval", 3*time.Hour, "Maximum lifetime for OAuth authentication; reauthenticate after expiry") cmd.MarkFlagsMutuallyExclusive("basic-auth", "oauth-provider") @@ -114,8 +115,18 @@ func (cmd *sharePublicCommand) run(_ *cobra.Command, args []string) { } if cmd.oauthProvider != "" { req.OauthProvider = cmd.oauthProvider - req.OauthEmailDomains = cmd.oauthEmailDomains + req.OauthEmailAddressPatterns = cmd.oauthEmailAddressPatterns req.OauthAuthorizationCheckInterval = cmd.oauthCheckInterval + + for _, g := range cmd.oauthEmailAddressPatterns { + _, err := glob.Compile(g) + if err != nil { + if !panicInstead { + tui.Error(fmt.Sprintf("unable to create share, invalid oauth email glob (%v)", g), err) + } + panic(err) + } + } } shr, err := sdk.CreateShare(root, req) if err != nil { diff --git a/cmd/zrok/shareReserved.go b/cmd/zrok/shareReserved.go index b3c20d84..8275f198 100644 --- a/cmd/zrok/shareReserved.go +++ b/cmd/zrok/shareReserved.go @@ -7,6 +7,7 @@ import ( "github.com/openziti/zrok/endpoints" "github.com/openziti/zrok/endpoints/drive" "github.com/openziti/zrok/endpoints/proxy" + "github.com/openziti/zrok/endpoints/socks" "github.com/openziti/zrok/endpoints/tcpTunnel" "github.com/openziti/zrok/endpoints/udpTunnel" "github.com/openziti/zrok/environment" @@ -92,23 +93,25 @@ func (cmd *shareReservedCommand) run(_ *cobra.Command, args []string) { panic(err) } - logrus.Infof("sharing target: '%v'", target) + if resp.Payload.BackendMode != "socks" { + logrus.Infof("sharing target: '%v'", target) - if resp.Payload.BackendProxyEndpoint != target { - upReq := share.NewUpdateShareParams() - upReq.Body = &rest_model_zrok.UpdateShareRequest{ - ShrToken: shrToken, - BackendProxyEndpoint: target, - } - if _, err := zrok.Share.UpdateShare(upReq, auth); err != nil { - if !panicInstead { - tui.Error("unable to update backend proxy endpoint", err) + if resp.Payload.BackendProxyEndpoint != target { + upReq := share.NewUpdateShareParams() + upReq.Body = &rest_model_zrok.UpdateShareRequest{ + ShrToken: shrToken, + BackendProxyEndpoint: target, } - panic(err) + if _, err := zrok.Share.UpdateShare(upReq, auth); err != nil { + if !panicInstead { + tui.Error("unable to update backend proxy endpoint", err) + } + panic(err) + } + logrus.Infof("updated backend proxy endpoint to: %v", target) + } else { + logrus.Infof("using existing backend proxy endpoint: %v", target) } - logrus.Infof("updated backend proxy endpoint to: %v", target) - } else { - logrus.Infof("using existing backend proxy endpoint: %v", target) } var shareDescription string @@ -258,6 +261,27 @@ func (cmd *shareReservedCommand) run(_ *cobra.Command, args []string) { } }() + case "socks": + cfg := &socks.BackendConfig{ + IdentityPath: zif, + ShrToken: shrToken, + Requests: requests, + } + + be, err := socks.NewBackend(cfg) + if err != nil { + if !panicInstead { + tui.Error("error creating socks backend", err) + } + panic(err) + } + + go func() { + if err := be.Run(); err != nil { + logrus.Errorf("error running socks backend: %v", err) + } + }() + default: tui.Error("invalid backend mode", nil) } diff --git a/cmd/zrok/testLoopPublic.go b/cmd/zrok/testLoopPublic.go index 14e442f3..a47ef721 100644 --- a/cmd/zrok/testLoopPublic.go +++ b/cmd/zrok/testLoopPublic.go @@ -136,35 +136,42 @@ func (l *looper) run() { l.startup() logrus.Infof("looper #%d, shrToken: %v, frontend: %v", l.id, l.shrToken, l.proxyEndpoint) - go l.serviceListener() - l.dwell() - l.iterate() + if l.serviceListener() { + l.dwell() + l.iterate() + } logrus.Infof("looper #%d: complete", l.id) l.shutdown() } -func (l *looper) serviceListener() { +func (l *looper) serviceListener() bool { zcfg, err := ziti.NewConfigFromFile(l.zif) if err != nil { logrus.Errorf("error opening ziti config '%v': %v", l.zif, err) - return + return false } - opts := ziti.ListenOptions{ - ConnectTimeout: 5 * time.Minute, - MaxConnections: 10, + options := ziti.ListenOptions{ + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, } zctx, err := ziti.NewContext(zcfg) if err != nil { logrus.Errorf("error loading ziti context: %v", err) - return + return false } - if l.listener, err = zctx.ListenWithOptions(l.shrToken, &opts); err == nil { + + if l.listener, err = zctx.ListenWithOptions(l.shrToken, &options); err != nil { + logrus.Errorf("looper #%d, error listening: %v", l.id, err) + return false + } + + go func() { if err := http.Serve(l.listener, l); err != nil { logrus.Errorf("looper #%d, error serving: %v", l.id, err) } - } else { - logrus.Errorf("looper #%d, error listening: %v", l.id, err) - } + }() + + return true } func (l *looper) ServeHTTP(w http.ResponseWriter, r *http.Request) { @@ -239,6 +246,9 @@ func (l *looper) iterate() { if req, err := http.NewRequest("POST", l.proxyEndpoint, bytes.NewBufferString(outbase64)); err == nil { client := &http.Client{Timeout: time.Second * time.Duration(l.cmd.timeoutSeconds)} if resp, err := client.Do(req); err == nil { + if resp.StatusCode != 200 { + logrus.Errorf("looper #%d unexpected response status code %v!", l.id, resp.StatusCode) + } inpayload := new(bytes.Buffer) io.Copy(inpayload, resp.Body) inbase64 := inpayload.String() diff --git a/cmd/zrok/testWebsocket.go b/cmd/zrok/testWebsocket.go index ee45cb88..57789e95 100644 --- a/cmd/zrok/testWebsocket.go +++ b/cmd/zrok/testWebsocket.go @@ -87,7 +87,7 @@ func (cmd *testWebsocketCommand) run(_ *cobra.Command, args []string) { addr = cmd.serviceName } else { if len(args) == 0 { - logrus.Error("Address required if not using ziti") + logrus.Error("address required if not using ziti") flag.Usage() os.Exit(1) } @@ -102,13 +102,13 @@ func (cmd *testWebsocketCommand) run(_ *cobra.Command, args []string) { } defer c.Close(websocket.StatusInternalError, "the sky is falling") - logrus.Info("Writing to server...") + logrus.Info("writing to server...") err = wsjson.Write(ctx, c, "hi") if err != nil { logrus.Error(err) return } - logrus.Info("Reading response...") + logrus.Info("reading response...") typ, dat, err := c.Read(ctx) if err != nil { logrus.Error(err) diff --git a/controller/access.go b/controller/access.go index 56f9c8c5..3d84323a 100644 --- a/controller/access.go +++ b/controller/access.go @@ -62,7 +62,7 @@ func (h *accessHandler) Handle(params share.AccessParams, principal *rest_model_ return share.NewAccessNotFound() } - feToken, err := createToken() + feToken, err := CreateToken() if err != nil { logrus.Error(err) return share.NewAccessInternalServerError() diff --git a/controller/changePassword.go b/controller/changePassword.go new file mode 100644 index 00000000..ab05a494 --- /dev/null +++ b/controller/changePassword.go @@ -0,0 +1,75 @@ +package controller + +import ( + "github.com/go-openapi/runtime/middleware" + "github.com/openziti/zrok/controller/config" + "github.com/openziti/zrok/rest_model_zrok" + "github.com/openziti/zrok/rest_server_zrok/operations/account" + "github.com/sirupsen/logrus" +) + +type changePasswordHandler struct { + cfg *config.Config +} + +func newChangePasswordHandler(cfg *config.Config) *changePasswordHandler { + return &changePasswordHandler{ + cfg: cfg, + } +} + +func (handler *changePasswordHandler) Handle(params account.ChangePasswordParams, principal *rest_model_zrok.Principal) middleware.Responder { + if params.Body == nil || params.Body.Email == "" || params.Body.OldPassword == "" || params.Body.NewPassword == "" { + logrus.Error("missing email, old, or new password") + return account.NewChangePasswordUnauthorized() + } + logrus.Infof("received change password request for email '%v'", params.Body.Email) + + tx, err := str.Begin() + if err != nil { + logrus.Errorf("error starting transaction: %v", err) + return account.NewChangePasswordUnauthorized() + } + defer func() { _ = tx.Rollback() }() + + a, err := str.FindAccountWithEmail(params.Body.Email, tx) + if err != nil { + logrus.Errorf("error finding account '%v': %v", params.Body.Email, err) + return account.NewChangePasswordUnauthorized() + } + ohpwd, err := rehashPassword(params.Body.OldPassword, a.Salt) + if err != nil { + logrus.Errorf("error hashing password for '%v': %v", params.Body.Email, err) + return account.NewChangePasswordUnauthorized() + } + if a.Password != ohpwd.Password { + logrus.Errorf("password mismatch for account '%v'", params.Body.Email) + return account.NewChangePasswordUnauthorized() + } + + if err := validatePassword(handler.cfg, params.Body.NewPassword); err != nil { + logrus.Errorf("password not valid for request '%v': %v", a.Email, err) + return account.NewChangePasswordUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error())) + } + + nhpwd, err := HashPassword(params.Body.NewPassword) + if err != nil { + logrus.Errorf("error hashing password for '%v': %v", a.Email, err) + return account.NewChangePasswordInternalServerError() + } + a.Salt = nhpwd.Salt + a.Password = nhpwd.Password + + if _, err := str.UpdateAccount(a, tx); err != nil { + logrus.Errorf("error updating for '%v': %v", a.Email, err) + return account.NewChangePasswordInternalServerError() + } + + if err := tx.Commit(); err != nil { + logrus.Errorf("error committing '%v': %v", a.Email, err) + return account.NewChangePasswordInternalServerError() + } + + logrus.Infof("change password for '%v'", a.Email) + return account.NewChangePasswordOK() +} diff --git a/controller/config/config.go b/controller/config/config.go index 396bd3d1..8e67f8c0 100644 --- a/controller/config/config.go +++ b/controller/config/config.go @@ -31,6 +31,7 @@ type Config struct { ResetPassword *ResetPasswordConfig Store *store.Config Ziti *zrokEdgeSdk.Config + Tls *TlsConfig } type AdminConfig struct { @@ -83,6 +84,11 @@ type ResetPasswordMaintenanceConfig struct { BatchLimit int } +type TlsConfig struct { + CertPath string + KeyPath string +} + func DefaultConfig() *Config { return &Config{ Limits: limits.DefaultConfig(), diff --git a/controller/controller.go b/controller/controller.go index 5c41ab4e..e706749a 100644 --- a/controller/controller.go +++ b/controller/controller.go @@ -2,6 +2,7 @@ package controller import ( "context" + "github.com/jessevdk/go-flags" "github.com/openziti/zrok/controller/config" "github.com/openziti/zrok/controller/limits" "github.com/openziti/zrok/controller/metrics" @@ -43,11 +44,13 @@ func Run(inCfg *config.Config) error { api := operations.NewZrokAPI(swaggerSpec) api.KeyAuth = newZrokAuthenticator(cfg).authenticate + api.AccountChangePasswordHandler = newChangePasswordHandler(cfg) api.AccountInviteHandler = newInviteHandler(cfg) api.AccountLoginHandler = account.LoginHandlerFunc(loginHandler) api.AccountRegisterHandler = newRegisterHandler(cfg) api.AccountResetPasswordHandler = newResetPasswordHandler(cfg) api.AccountResetPasswordRequestHandler = newResetPasswordRequestHandler() + api.AccountResetTokenHandler = newResetTokenHandler() api.AccountVerifyHandler = newVerifyHandler() api.AdminCreateFrontendHandler = newCreateFrontendHandler() api.AdminCreateIdentityHandler = newCreateIdentityHandler() @@ -128,8 +131,16 @@ func Run(inCfg *config.Config) error { server := rest_server_zrok.NewServer(api) defer func() { _ = server.Shutdown() }() - server.Host = cfg.Endpoint.Host - server.Port = cfg.Endpoint.Port + if cfg.Tls != nil { + server.TLSHost = cfg.Endpoint.Host + server.TLSPort = cfg.Endpoint.Port + server.TLSCertificate = flags.Filename(cfg.Tls.CertPath) + server.TLSCertificateKey = flags.Filename(cfg.Tls.KeyPath) + server.EnabledListeners = []string{"https"} + } else { + server.Host = cfg.Endpoint.Host + server.Port = cfg.Endpoint.Port + } rest_server_zrok.HealthCheck = HealthCheckHTTP server.ConfigureAPI() if err := server.Serve(); err != nil { diff --git a/controller/createFrontend.go b/controller/createFrontend.go index 7f037117..45767387 100644 --- a/controller/createFrontend.go +++ b/controller/createFrontend.go @@ -50,7 +50,7 @@ func (h *createFrontendHandler) Handle(params admin.CreateFrontendParams, princi } defer func() { _ = tx.Rollback() }() - feToken, err := createToken() + feToken, err := CreateToken() if err != nil { logrus.Errorf("error creating frontend token: %v", err) return admin.NewCreateFrontendInternalServerError() diff --git a/controller/frontendDetail.go b/controller/frontendDetail.go index b8a7f28b..7d8fa2db 100644 --- a/controller/frontendDetail.go +++ b/controller/frontendDetail.go @@ -47,6 +47,7 @@ func (h *getFrontendDetailHandler) Handle(params metadata.GetFrontendDetailParam } payload := &rest_model_zrok.Frontend{ ID: int64(fe.Id), + Token: fe.Token, ZID: fe.ZId, CreatedAt: fe.CreatedAt.UnixMilli(), UpdatedAt: fe.UpdatedAt.UnixMilli(), diff --git a/controller/invite.go b/controller/invite.go index dabd02c1..b9835cd6 100644 --- a/controller/invite.go +++ b/controller/invite.go @@ -55,7 +55,7 @@ func (h *inviteHandler) Handle(params account.InviteParams) middleware.Responder logrus.Infof("using invite token '%v' to process invite request for '%v'", inviteToken.Token, params.Body.Email) } - token, err = createToken() + token, err = CreateToken() if err != nil { logrus.Error(err) return account.NewInviteInternalServerError() diff --git a/controller/overview.go b/controller/overview.go index 5c3bb6f8..a942cdaa 100644 --- a/controller/overview.go +++ b/controller/overview.go @@ -95,6 +95,7 @@ func (h *overviewHandler) Handle(_ metadata.OverviewParams, principal *rest_mode for _, fe := range fes { envFe := &rest_model_zrok.Frontend{ ID: int64(fe.Id), + Token: fe.Token, ZID: fe.ZId, CreatedAt: fe.CreatedAt.UnixMilli(), UpdatedAt: fe.UpdatedAt.UnixMilli(), diff --git a/controller/passwords.go b/controller/passwords.go index 9e87e92c..7feac74e 100644 --- a/controller/passwords.go +++ b/controller/passwords.go @@ -24,7 +24,7 @@ func salt() string { return base64.StdEncoding.EncodeToString(buf) } -func hashPassword(password string) (*hashedPassword, error) { +func HashPassword(password string) (*hashedPassword, error) { return rehashPassword(password, salt()) } diff --git a/controller/register.go b/controller/register.go index 8bfb1fe9..7067c356 100644 --- a/controller/register.go +++ b/controller/register.go @@ -38,7 +38,7 @@ func (h *registerHandler) Handle(params account.RegisterParams) middleware.Respo return account.NewRegisterNotFound() } - token, err := createToken() + token, err := CreateToken() if err != nil { logrus.Errorf("error creating token for request '%v' (%v): %v", params.Body.Token, ar.Email, err) return account.NewRegisterInternalServerError() @@ -49,7 +49,7 @@ func (h *registerHandler) Handle(params account.RegisterParams) middleware.Respo return account.NewRegisterUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error())) } - hpwd, err := hashPassword(params.Body.Password) + hpwd, err := HashPassword(params.Body.Password) if err != nil { logrus.Errorf("error hashing password for request '%v' (%v): %v", params.Body.Token, ar.Email, err) return account.NewRegisterInternalServerError() diff --git a/controller/resetPassword.go b/controller/resetPassword.go index 5327e3c7..fb66ed77 100644 --- a/controller/resetPassword.go +++ b/controller/resetPassword.go @@ -53,7 +53,7 @@ func (handler *resetPasswordHandler) Handle(params account.ResetPasswordParams) return account.NewResetPasswordUnprocessableEntity().WithPayload(rest_model_zrok.ErrorMessage(err.Error())) } - hpwd, err := hashPassword(params.Body.Password) + hpwd, err := HashPassword(params.Body.Password) if err != nil { logrus.Errorf("error hashing password for '%v' (%v): %v", params.Body.Token, a.Email, err) return account.NewResetPasswordRequestInternalServerError() diff --git a/controller/resetPasswordRequest.go b/controller/resetPasswordRequest.go index 7913d359..c73c860a 100644 --- a/controller/resetPasswordRequest.go +++ b/controller/resetPasswordRequest.go @@ -34,7 +34,7 @@ func (handler *resetPasswordRequestHandler) Handle(params account.ResetPasswordR } defer func() { _ = tx.Rollback() }() - token, err = createToken() + token, err = CreateToken() if err != nil { logrus.Errorf("error creating token for '%v': %v", params.Body.EmailAddress, err) return account.NewResetPasswordRequestInternalServerError() diff --git a/controller/resetToken.go b/controller/resetToken.go new file mode 100644 index 00000000..f00a3ba3 --- /dev/null +++ b/controller/resetToken.go @@ -0,0 +1,63 @@ +package controller + +import ( + "github.com/go-openapi/runtime/middleware" + "github.com/openziti/zrok/rest_model_zrok" + "github.com/openziti/zrok/rest_server_zrok/operations/account" + "github.com/sirupsen/logrus" +) + +type resetTokenHandler struct{} + +func newResetTokenHandler() *resetTokenHandler { + return &resetTokenHandler{} +} + +func (handler *resetTokenHandler) Handle(params account.ResetTokenParams, principal *rest_model_zrok.Principal) middleware.Responder { + logrus.Infof("received token regeneration request for email '%v'", principal.Email) + + if params.Body.EmailAddress != principal.Email { + logrus.Errorf("mismatched account '%v' for '%v'", params.Body.EmailAddress, principal.Email) + return account.NewResetTokenNotFound() + } + + tx, err := str.Begin() + if err != nil { + logrus.Errorf("error starting transaction for '%v': %v", params.Body.EmailAddress, err) + return account.NewResetTokenInternalServerError() + } + defer tx.Rollback() + + a, err := str.FindAccountWithEmail(params.Body.EmailAddress, tx) + if err != nil { + logrus.Errorf("error finding account for '%v': %v", params.Body.EmailAddress, err) + return account.NewResetTokenNotFound() + } + if a.Deleted { + logrus.Errorf("account '%v' for '%v' deleted", a.Email, a.Token) + return account.NewResetTokenNotFound() + } + + // Need to create new token and invalidate all other resources + token, err := CreateToken() + if err != nil { + logrus.Errorf("error creating token for request '%v': %v", params.Body.EmailAddress, err) + return account.NewResetTokenInternalServerError() + } + + a.Token = token + + if _, err := str.UpdateAccount(a, tx); err != nil { + logrus.Errorf("error updating account for request '%v': %v", params.Body.EmailAddress, err) + return account.NewResetTokenInternalServerError() + } + + if err := tx.Commit(); err != nil { + logrus.Errorf("error committing '%v' (%v): %v", params.Body.EmailAddress, a.Email, err) + return account.NewResetTokenInternalServerError() + } + + logrus.Infof("regenerated token '%v' for '%v'", a.Token, a.Email) + + return account.NewResetTokenOK().WithPayload(&account.ResetTokenOKBody{Token: token}) +} diff --git a/controller/share.go b/controller/share.go index bcf36360..f2b73758 100644 --- a/controller/share.go +++ b/controller/share.go @@ -8,6 +8,7 @@ import ( "github.com/openziti/zrok/rest_model_zrok" "github.com/openziti/zrok/rest_server_zrok/operations/share" "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/openziti/zrok/util" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -33,14 +34,14 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr found := false for _, env := range envs { if env.ZId == envZId { - logrus.Debugf("found identity '%v' for user '%v'", envZId, principal.Email) + logrus.Debugf("found identity '%v' for account '%v'", envZId, principal.Email) envId = env.Id found = true break } } if !found { - logrus.Errorf("environment '%v' not found for user '%v'", envZId, principal.Email) + logrus.Errorf("environment '%v' not found for account '%v'", envZId, principal.Email) return share.NewShareUnauthorized() } } else { @@ -58,11 +59,30 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr logrus.Error(err) return share.NewShareInternalServerError() } + + reserved := params.Body.Reserved + uniqueName := params.Body.UniqueName shrToken, err := createShareToken() if err != nil { logrus.Error(err) return share.NewShareInternalServerError() } + if reserved && uniqueName != "" { + if !util.IsValidUniqueName(uniqueName) { + logrus.Errorf("invalid unique name '%v' for account '%v'", uniqueName, principal.Email) + return share.NewShareUnprocessableEntity() + } + shareExists, err := str.ShareWithTokenExists(uniqueName, trx) + if err != nil { + logrus.Errorf("error checking share for token collision: %v", err) + return share.NewUpdateShareInternalServerError() + } + if shareExists { + logrus.Errorf("token '%v' already exists; cannot create share", uniqueName) + return share.NewShareConflict() + } + shrToken = uniqueName + } var shrZId string var frontendEndpoints []string @@ -94,7 +114,6 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr } case string(sdk.PrivateShareMode): - logrus.Info("doing private") shrZId, frontendEndpoints, err = newPrivateResourceAllocator().allocate(envZId, shrToken, params, edge) if err != nil { logrus.Error(err) @@ -108,7 +127,6 @@ func (h *shareHandler) Handle(params share.ShareParams, principal *rest_model_zr logrus.Debugf("allocated share '%v'", shrToken) - reserved := params.Body.Reserved sshr := &store.Share{ ZId: shrZId, Token: shrToken, diff --git a/controller/store/account.go b/controller/store/account.go index 4ccb7a94..7f1d683d 100644 --- a/controller/store/account.go +++ b/controller/store/account.go @@ -16,7 +16,7 @@ type Account struct { } func (str *Store) CreateAccount(a *Account, tx *sqlx.Tx) (int, error) { - stmt, err := tx.Prepare("insert into accounts (email, salt, password, token, limitless) values ($1, $2, $3, $4, $5) returning id") + stmt, err := tx.Prepare("insert into accounts (email, salt, password, token, limitless) values (lower($1), $2, $3, $4, $5) returning id") if err != nil { return 0, errors.Wrap(err, "error preparing accounts insert statement") } @@ -37,7 +37,7 @@ func (str *Store) GetAccount(id int, tx *sqlx.Tx) (*Account, error) { func (str *Store) FindAccountWithEmail(email string, tx *sqlx.Tx) (*Account, error) { a := &Account{} - if err := tx.QueryRowx("select * from accounts where email = $1 and not deleted", email).StructScan(a); err != nil { + if err := tx.QueryRowx("select * from accounts where email = lower($1) and not deleted", email).StructScan(a); err != nil { return nil, errors.Wrap(err, "error selecting account by email") } return a, nil @@ -45,7 +45,7 @@ func (str *Store) FindAccountWithEmail(email string, tx *sqlx.Tx) (*Account, err func (str *Store) FindAccountWithEmailAndDeleted(email string, tx *sqlx.Tx) (*Account, error) { a := &Account{} - if err := tx.QueryRowx("select * from accounts where email = $1", email).StructScan(a); err != nil { + if err := tx.QueryRowx("select * from accounts where email = lower($1)", email).StructScan(a); err != nil { return nil, errors.Wrap(err, "error selecting acount by email") } return a, nil @@ -60,7 +60,7 @@ func (str *Store) FindAccountWithToken(token string, tx *sqlx.Tx) (*Account, err } func (str *Store) UpdateAccount(a *Account, tx *sqlx.Tx) (int, error) { - stmt, err := tx.Prepare("update accounts set email=$1, salt=$2, password=$3, token=$4, limitless=$5 where id = $6") + stmt, err := tx.Prepare("update accounts set email=lower($1), salt=$2, password=$3, token=$4, limitless=$5 where id = $6") if err != nil { return 0, errors.Wrap(err, "error preparing accounts update statement") } diff --git a/controller/store/password_reset_request.go b/controller/store/password_reset_request.go index a6a7b60d..2b14ce5f 100644 --- a/controller/store/password_reset_request.go +++ b/controller/store/password_reset_request.go @@ -7,6 +7,7 @@ import ( "github.com/jmoiron/sqlx" "github.com/pkg/errors" + "github.com/sirupsen/logrus" ) type PasswordResetRequest struct { @@ -17,7 +18,11 @@ type PasswordResetRequest struct { } func (str *Store) CreatePasswordResetRequest(prr *PasswordResetRequest, tx *sqlx.Tx) (int, error) { - stmt, err := tx.Prepare("insert into password_reset_requests (account_id, token) values ($1, $2) ON CONFLICT(account_id) DO UPDATE SET token=$2 returning id") + if err := str.DeletePasswordResetRequestsByAccountId(prr.AccountId, tx); err != nil { + logrus.Errorf("unable to delete old password reset requests for account '%v', but continuing: %v", prr.AccountId, err) + } + + stmt, err := tx.Prepare("insert into password_reset_requests (account_id, token) values ($1, $2) returning id") if err != nil { return 0, errors.Wrap(err, "error preparing password_reset_requests insert statement") } @@ -98,3 +103,15 @@ func (str *Store) DeleteMultiplePasswordResetRequests(ids []int, tx *sqlx.Tx) er } return nil } + +func (str *Store) DeletePasswordResetRequestsByAccountId(accountId int, tx *sqlx.Tx) error { + stmt, err := tx.Prepare("update password_reset_requests set updated_at = current_timestamp, deleted = true where account_id = $1") + if err != nil { + return errors.Wrap(err, "error preparing password_reset_requests delete by account_id statement") + } + _, err = stmt.Exec(accountId) + if err != nil { + return errors.Wrap(err, "error executing password_reset_requests delete by account_id statement") + } + return nil +} diff --git a/controller/store/share.go b/controller/store/share.go index 8161fc2d..365e5e03 100644 --- a/controller/store/share.go +++ b/controller/store/share.go @@ -63,6 +63,14 @@ func (str *Store) FindShareWithToken(shrToken string, tx *sqlx.Tx) (*Share, erro return shr, nil } +func (str *Store) ShareWithTokenExists(shrToken string, tx *sqlx.Tx) (bool, error) { + count := 0 + if err := tx.QueryRowx("select count(0) from shares where token = $1 and not deleted", shrToken).Scan(&count); err != nil { + return true, errors.Wrap(err, "error selecting share count by token") + } + return count > 0, nil +} + func (str *Store) FindShareWithZIdAndDeleted(zId string, tx *sqlx.Tx) (*Share, error) { shr := &Share{} if err := tx.QueryRowx("select * from shares where z_id = $1", zId).StructScan(shr); err != nil { diff --git a/controller/store/sql/postgresql/015_v0_4_19_share_unique_name_constraint.sql b/controller/store/sql/postgresql/015_v0_4_19_share_unique_name_constraint.sql new file mode 100644 index 00000000..42f01743 --- /dev/null +++ b/controller/store/sql/postgresql/015_v0_4_19_share_unique_name_constraint.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +-- remove the old unique index (which did not respect the deleted flag) +ALTER TABLE shares DROP CONSTRAINT shares_token_key; + +-- add a new unique index which only constrains uniqueness for not-deleted rows +CREATE UNIQUE INDEX shares_token_idx ON shares(token) WHERE deleted is false; diff --git a/controller/store/sql/postgresql/016_v0_4_21_lowercase_email.sql b/controller/store/sql/postgresql/016_v0_4_21_lowercase_email.sql new file mode 100644 index 00000000..772a3163 --- /dev/null +++ b/controller/store/sql/postgresql/016_v0_4_21_lowercase_email.sql @@ -0,0 +1,3 @@ +-- +migrate Up + +update accounts set email = lower(email); \ No newline at end of file diff --git a/controller/store/sql/postgresql/017_v0_4_24_backend_mode_socks.sql b/controller/store/sql/postgresql/017_v0_4_24_backend_mode_socks.sql new file mode 100644 index 00000000..501dcf11 --- /dev/null +++ b/controller/store/sql/postgresql/017_v0_4_24_backend_mode_socks.sql @@ -0,0 +1,3 @@ +-- +migrate Up + +alter type backend_mode add value 'socks'; \ No newline at end of file diff --git a/controller/store/sql/postgresql/018_v0_4_23_password_reset_request_unique.sql b/controller/store/sql/postgresql/018_v0_4_23_password_reset_request_unique.sql new file mode 100644 index 00000000..9606e67a --- /dev/null +++ b/controller/store/sql/postgresql/018_v0_4_23_password_reset_request_unique.sql @@ -0,0 +1,7 @@ +-- +migrate Up + +-- remove the old unique index (users might need multiple password resets) +ALTER TABLE password_reset_requests DROP CONSTRAINT password_reset_requests_account_id_key; + +-- add new constraint which doesnt mind having multiple resets for account ids +ALTER TABLE password_reset_requests ADD CONSTRAINT password_reset_requests_account_id_key FOREIGN KEY (account_id) REFERENCES accounts (id); diff --git a/controller/store/sql/sqlite3/015_v0_4_19_share_unique_name_constraint.sql b/controller/store/sql/sqlite3/015_v0_4_19_share_unique_name_constraint.sql new file mode 100644 index 00000000..a4d6a733 --- /dev/null +++ b/controller/store/sql/sqlite3/015_v0_4_19_share_unique_name_constraint.sql @@ -0,0 +1,57 @@ +-- +migrate Up + +alter table shares rename to shares_old; +create table shares ( + id integer primary key, + environment_id integer constraint fk_environments_shares references environments on delete cascade, + z_id string not null unique, + token string not null, + share_mode string not null, + backend_mode string not null, + frontend_selection string, + frontend_endpoint string, + backend_proxy_endpoint string, + reserved boolean not null default(false), + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + deleted boolean not null default(false), + + constraint chk_z_id check (z_id <> ''), + constraint chk_token check (token <> ''), + constraint chk_share_mode check (share_mode == 'public' or share_mode == 'private'), + constraint chk_backend_mode check (backend_mode == 'proxy' or backend_mode == 'web' or backend_mode == 'tcpTunnel' or backend_mode == 'udpTunnel' or backend_mode == 'caddy' or backend_mode == 'drive') +); +insert into shares select * from shares_old; +create unique index shares_token_idx ON shares(token) WHERE deleted is false; + +alter table frontends rename to frontends_old; +create table frontends ( + id integer primary key, + environment_id integer references environments(id), + token varchar(32) not null unique, + z_id varchar(32) not null, + public_name varchar(64) unique, + url_template varchar(1024), + reserved boolean not null default(false), + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + deleted boolean not null default(false), + private_share_id integer references shares(id) +); +insert into frontends select * from frontends_old; +drop table frontends_old; + +alter table share_limit_journal rename to share_limit_journal_old; +create table share_limit_journal ( + id integer primary key, + share_id integer references shares(id), + rx_bytes bigint not null, + tx_bytes bigint not null, + action limit_action_type not null, + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')) +); +insert into share_limit_journal select * from share_limit_journal_old; +drop table share_limit_journal_old; + +drop table shares_old; \ No newline at end of file diff --git a/controller/store/sql/sqlite3/016_v0_4_21_lowercase_email.sql b/controller/store/sql/sqlite3/016_v0_4_21_lowercase_email.sql new file mode 100644 index 00000000..772a3163 --- /dev/null +++ b/controller/store/sql/sqlite3/016_v0_4_21_lowercase_email.sql @@ -0,0 +1,3 @@ +-- +migrate Up + +update accounts set email = lower(email); \ No newline at end of file diff --git a/controller/store/sql/sqlite3/017_v0_4_24_backend_mode_socks.sql b/controller/store/sql/sqlite3/017_v0_4_24_backend_mode_socks.sql new file mode 100644 index 00000000..20d50b82 --- /dev/null +++ b/controller/store/sql/sqlite3/017_v0_4_24_backend_mode_socks.sql @@ -0,0 +1,58 @@ +-- +migrate Up + +alter table shares rename to shares_old; +create table shares ( + id integer primary key, + environment_id integer constraint fk_environments_shares references environments on delete cascade, + z_id string not null unique, + token string not null, + share_mode string not null, + backend_mode string not null, + frontend_selection string, + frontend_endpoint string, + backend_proxy_endpoint string, + reserved boolean not null default(false), + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + deleted boolean not null default(false), + + constraint chk_z_id check (z_id <> ''), + constraint chk_token check (token <> ''), + constraint chk_share_mode check (share_mode == 'public' or share_mode == 'private'), + constraint chk_backend_mode check (backend_mode == 'proxy' or backend_mode == 'web' or backend_mode == 'tcpTunnel' or backend_mode == 'udpTunnel' or backend_mode == 'caddy' or backend_mode == 'drive' or backend_mode == 'socks') +); +insert into shares select * from shares_old; +drop index shares_token_idx; +create unique index shares_token_idx ON shares(token) WHERE deleted is false; + +alter table frontends rename to frontends_old; +create table frontends ( + id integer primary key, + environment_id integer references environments(id), + token varchar(32) not null unique, + z_id varchar(32) not null, + public_name varchar(64) unique, + url_template varchar(1024), + reserved boolean not null default(false), + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + deleted boolean not null default(false), + private_share_id integer references shares(id) +); +insert into frontends select * from frontends_old; +drop table frontends_old; + +alter table share_limit_journal rename to share_limit_journal_old; +create table share_limit_journal ( + id integer primary key, + share_id integer references shares(id), + rx_bytes bigint not null, + tx_bytes bigint not null, + action limit_action_type not null, + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')) +); +insert into share_limit_journal select * from share_limit_journal_old; +drop table share_limit_journal_old; + +drop table shares_old; \ No newline at end of file diff --git a/controller/store/sql/sqlite3/018_v0_4_23_password_reset_request_unique.sql b/controller/store/sql/sqlite3/018_v0_4_23_password_reset_request_unique.sql new file mode 100644 index 00000000..0e9850d6 --- /dev/null +++ b/controller/store/sql/sqlite3/018_v0_4_23_password_reset_request_unique.sql @@ -0,0 +1,17 @@ +-- +migrate Up + +alter table password_reset_requests rename to password_reset_requests_old; + +CREATE TABLE password_reset_requests ( + id integer primary key, + token string not null unique, + created_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + updated_at datetime not null default(strftime('%Y-%m-%d %H:%M:%f', 'now')), + account_id integer not null constraint fk_accounts_password_reset_requests references accounts, + deleted boolean not null default(false), + + constraint chk_token check(token <> '') +); + +insert into password_reset_requests select * from password_reset_requests_old; +drop table password_reset_requests_old; \ No newline at end of file diff --git a/controller/unshare.go b/controller/unshare.go index 8d7fde96..eab9a346 100644 --- a/controller/unshare.go +++ b/controller/unshare.go @@ -76,11 +76,7 @@ func (h *unshareHandler) Handle(params share.UnshareParams, principal *rest_mode if sshr.Reserved == params.Body.Reserved { // single tag-based share deallocator; should work regardless of sharing mode - if err := h.deallocateResources(senv, shrToken, shrZId, edge); err != nil { - logrus.Errorf("error unsharing ziti resources for '%v': %v", sshr, err) - return share.NewUnshareInternalServerError() - } - + h.deallocateResources(senv, shrToken, shrZId, edge) logrus.Debugf("deallocated share '%v'", shrToken) if err := str.DeleteShare(sshr.Id, tx); err != nil { @@ -120,21 +116,20 @@ func (h *unshareHandler) findShareZId(shrToken string, edge *rest_management_api return "", errors.Errorf("share '%v' not found", shrToken) } -func (h *unshareHandler) deallocateResources(senv *store.Environment, shrToken, shrZId string, edge *rest_management_api_client.ZitiEdgeManagement) error { +func (h *unshareHandler) deallocateResources(senv *store.Environment, shrToken, shrZId string, edge *rest_management_api_client.ZitiEdgeManagement) { if err := zrokEdgeSdk.DeleteServiceEdgeRouterPolicy(senv.ZId, shrToken, edge); err != nil { - return err + logrus.Warnf("error deleting service edge router policies for share '%v' in environment '%v': %v", shrToken, senv.ZId, err) } if err := zrokEdgeSdk.DeleteServicePoliciesDial(senv.ZId, shrToken, edge); err != nil { - return err + logrus.Warnf("error deleting dial service policies for share '%v' in environment '%v': %v", shrToken, senv.ZId, err) } if err := zrokEdgeSdk.DeleteServicePoliciesBind(senv.ZId, shrToken, edge); err != nil { - return err + logrus.Warnf("error deleting bind service policies for share '%v' in environment '%v': %v", shrToken, senv.ZId, err) } if err := zrokEdgeSdk.DeleteConfig(senv.ZId, shrToken, edge); err != nil { - return err + logrus.Warnf("error deleting config for share '%v' in environment '%v': %v", shrToken, senv.ZId, err) } if err := zrokEdgeSdk.DeleteService(senv.ZId, shrZId, edge); err != nil { - return err + logrus.Warnf("error deleting service '%v' for share '%v' in environment '%v': %v", shrZId, shrToken, senv.ZId, err) } - return nil } diff --git a/controller/util.go b/controller/util.go index c490fde2..cf70f982 100644 --- a/controller/util.go +++ b/controller/util.go @@ -65,7 +65,7 @@ func createShareToken() (string, error) { return gen(), nil } -func createToken() (string, error) { +func CreateToken() (string, error) { gen, err := nanoid.CustomASCII("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789", 12) if err != nil { return "", err diff --git a/docker/compose/.gitignore b/docker/compose/.gitignore index 84939203..f19fff7f 100644 --- a/docker/compose/.gitignore +++ b/docker/compose/.gitignore @@ -1,2 +1,2 @@ .env -compose.override.yml \ No newline at end of file +*compose.override.yml \ No newline at end of file diff --git a/docker/compose/zrok-public-reserved/compose.yml b/docker/compose/zrok-public-reserved/compose.yml index a2167d72..a72f704c 100644 --- a/docker/compose/zrok-public-reserved/compose.yml +++ b/docker/compose/zrok-public-reserved/compose.yml @@ -38,11 +38,12 @@ services: STATE_DIRECTORY: /mnt # zrok homedir in container # most relevant options + ZROK_UNIQUE_NAME: # name is used to construct frontend domain name, e.g. "myapp" in "myapp.share.zrok.io" ZROK_BACKEND_MODE: # web, caddy, drive, proxy ZROK_TARGET: # backend target, is a path in container filesystem unless proxy mode ZROK_INSECURE: # "--insecure" if proxy target has unverifiable TLS server certificate ZROK_OAUTH_PROVIDER: # google, github - ZROK_OATH_EMAILS: # allow space-separated list of OAuth email addresses or @domain.tld + ZROK_OAUTH_EMAILS: # allow space-separated list of OAuth email addresses or @domain.tld ZROK_BASIC_AUTH: # username:password, mutually-exclusive with ZROK_OAUTH_PROVIDER # least relevant options diff --git a/docker/compose/zrok-public-share/compose.yml b/docker/compose/zrok-public-share/compose.yml index a57b3ea7..86665be9 100644 --- a/docker/compose/zrok-public-share/compose.yml +++ b/docker/compose/zrok-public-share/compose.yml @@ -44,7 +44,7 @@ services: ZROK_TARGET: http://zrok-test:9090 # backend target, is a path in container filesystem unless proxy mode ZROK_INSECURE: # "--insecure" if proxy target has unverifiable TLS server certificate ZROK_OAUTH_PROVIDER: # google, github - ZROK_OATH_EMAILS: # space-separated list of OAuth email addresses or @domain.tld to allow + ZROK_OAUTH_EMAILS: # space-separated list of OAuth email addresses or @domain.tld to allow ZROK_BASIC_AUTH: # username:password, mutually-exclusive with ZROK_OAUTH_PROVIDER # least relevant options diff --git a/docker/images/cross-build/Dockerfile b/docker/images/cross-build/Dockerfile index 4670025f..2f5f0670 100644 --- a/docker/images/cross-build/Dockerfile +++ b/docker/images/cross-build/Dockerfile @@ -1,3 +1,4 @@ +# Stage 1: Install Node.js with nvm FROM debian:bullseye-slim # # this file mirrors the build params used in the GitHub Actions and enables @@ -12,23 +13,31 @@ ARG go_root=/usr/local/go ARG go_cache=/usr/share/go_cache ARG uid=1000 ARG gid=1000 -RUN apt-get -y update -RUN apt-get -y install gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gcc-aarch64-linux-gnu -RUN apt-get -y install wget build-essential +RUN apt-get -y update \ + && apt-get -y install \ + gcc-arm-linux-gnueabihf g++-arm-linux-gnueabihf gcc-aarch64-linux-gnu \ + wget build-essential -COPY ./linux-build.sh /usr/local/bin/ RUN wget -q https://go.dev/dl/${go_distribution_file} RUN tar -xzf ${go_distribution_file} -C /usr/local/ +RUN wget -qO- https://deb.nodesource.com/setup_18.x | bash \ + && apt-get -y update \ + && apt-get -y install \ + nodejs + RUN mkdir ${go_path} ${go_cache} RUN chown -R ${uid}:${gid} ${go_path} ${go_cache} +COPY ./linux-build.sh /usr/local/bin/ + USER ${uid}:${gid} ENV TARGETARCH=${TARGETARCH} ENV GOPATH=${go_path} ENV GOROOT=${go_root} ENV GOCACHE=${go_cache} ENV PATH=${go_path}/bin:${go_root}/bin:$PATH + RUN go install github.com/mitchellh/gox@latest WORKDIR /mnt ENTRYPOINT ["linux-build.sh"] diff --git a/docker/images/cross-build/linux-build.sh b/docker/images/cross-build/linux-build.sh index 9d31a8ff..447106a8 100755 --- a/docker/images/cross-build/linux-build.sh +++ b/docker/images/cross-build/linux-build.sh @@ -6,7 +6,10 @@ # # -set -o pipefail -e -u +set -o errexit +set -o nounset +set -o pipefail +set -o xtrace # if no architectures supplied then default list of three if (( ${#} )); then @@ -31,6 +34,16 @@ else PROCS_PER_JOB=0 # invokes gox default to use all CPUs-1 fi +( + HOME=/tmp/builder + # Navigate to the "ui" directory and run npm commands + npm config set cache /mnt/.npm + cd ./ui/ + mkdir -p $HOME + npm install + npm run build +) + for ARCH in ${JOBS[@]}; do GOX_CMD=" gox \ diff --git a/docker/images/zrok/Dockerfile b/docker/images/zrok/Dockerfile index bb6b2b8f..f83038ab 100644 --- a/docker/images/zrok/Dockerfile +++ b/docker/images/zrok/Dockerfile @@ -1,5 +1,5 @@ # this builds docker.io/openziti/zrok -ARG ZITI_CLI_TAG="0.31.0" +ARG ZITI_CLI_TAG="0.32.1" ARG ZITI_CLI_IMAGE="docker.io/openziti/ziti-cli" # this builds docker.io/openziti/ziti-controller FROM ${ZITI_CLI_IMAGE}:${ZITI_CLI_TAG} @@ -20,8 +20,9 @@ LABEL name="openziti/zrok" \ USER root -### install packages (jq introduced in source image in next release 0.30.6) -RUN INSTALL_PKGS="jq" && \ +### install packages: findutils provides xargs which is used by the zrok Helm chart's controller bootstrapping script to +#create the default account enable token +RUN INSTALL_PKGS="findutils" && \ microdnf -y update --setopt=install_weak_deps=0 --setopt=tsflags=nodocs && \ microdnf -y install --setopt=install_weak_deps=0 --setopt=tsflags=nodocs ${INSTALL_PKGS} diff --git a/docs/_attic/network/_category_.json b/docs/_attic/network/_category_.json index 771a75a0..4acef57b 100644 --- a/docs/_attic/network/_category_.json +++ b/docs/_attic/network/_category_.json @@ -1,7 +1,7 @@ -{ - "label": "Network", - "position": 60, - "link": { - "type": "generated-index", - } +{ + "label": "Network", + "position": 60, + "link": { + "type": "generated-index", + } } \ No newline at end of file diff --git a/docs/_attic/sharing/_category_.json b/docs/_attic/sharing/_category_.json index 8c046ccc..e6277e75 100644 --- a/docs/_attic/sharing/_category_.json +++ b/docs/_attic/sharing/_category_.json @@ -1,7 +1,7 @@ -{ - "label": "Sharing", - "position": 20, - "link": { - "type": "generated-index", - } +{ + "label": "Sharing", + "position": 20, + "link": { + "type": "generated-index", + } } \ No newline at end of file diff --git a/docs/concepts/_category_.json b/docs/concepts/_category_.json index ce039221..430f3573 100644 --- a/docs/concepts/_category_.json +++ b/docs/concepts/_category_.json @@ -1,8 +1,8 @@ -{ - "label": "Concepts", - "position": 30, - "link": { - "type": "doc", - "id": "concepts/index" - } -} +{ + "label": "Concepts", + "position": 30, + "link": { + "type": "doc", + "id": "concepts/index" + } +} diff --git a/docs/concepts/files.md b/docs/concepts/files.md index 2c1aa620..9b436618 100644 --- a/docs/concepts/files.md +++ b/docs/concepts/files.md @@ -1,52 +1,52 @@ ---- -title: Sharing Websites and Files -sidebar_position: 30 ---- - -With `zrok` it is possible to share files quickly and easily as well. To share files using `zrok` use -the `--backend-mode web`, for example: `zrok share private . --backend-mode web`. - -Running with this mode will make it trivially easy to share files from the directory which the command -was run from. - -For example if you have a directory with a structure like this: - -```shell --rw-r--r--+ 1 Michael None 7090 Apr 17 12:53 CHANGELOG.md --rw-r--r--+ 1 Michael None 11346 Apr 17 12:53 LICENSE --rw-r--r--+ 1 Michael None 2885 Apr 17 12:53 README.md --rwxr-xr-x+ 1 Michael None 44250624 Apr 17 13:00 zrok.exe* -``` - -The files can be shared using a command such as: - -```shell -zrok share public --backend-mode web . -``` - -Then the files can be access with a `private` or `public` share, for example as shown: - -![zrok_share_web_files](../images/zrok_share_web_files.png) - -`zrok` will automatically provide a stock website, which will allow the accessing user to browse and navigate the file tree. Clicking the files allows the user to download them. - -`zrok` can also share a pre-rendered static HTML website. If you have a directory like this: - -```shell --rw-rw-r--+ 1 Michael None 56 Jun 26 13:23 index.html -``` - -If `index.html` contains valid HTML, like this: - -```html - - -

Hello zrok

- -``` - -Sharing the directory will result in the following when you access the share in a web browser: - -![zrok_share_web_website](../images/zrok_share_web_website.png) - +--- +title: Sharing Websites and Files +sidebar_position: 30 +--- + +With `zrok` it is possible to share files quickly and easily as well. To share files using `zrok` use +the `--backend-mode web`, for example: `zrok share private . --backend-mode web`. + +Running with this mode will make it trivially easy to share files from the directory which the command +was run from. + +For example if you have a directory with a structure like this: + +```shell +-rw-r--r--+ 1 Michael None 7090 Apr 17 12:53 CHANGELOG.md +-rw-r--r--+ 1 Michael None 11346 Apr 17 12:53 LICENSE +-rw-r--r--+ 1 Michael None 2885 Apr 17 12:53 README.md +-rwxr-xr-x+ 1 Michael None 44250624 Apr 17 13:00 zrok.exe* +``` + +The files can be shared using a command such as: + +```shell +zrok share public --backend-mode web . +``` + +Then the files can be access with a `private` or `public` share, for example as shown: + +![zrok_share_web_files](../images/zrok_share_web_files.png) + +`zrok` will automatically provide a stock website, which will allow the accessing user to browse and navigate the file tree. Clicking the files allows the user to download them. + +`zrok` can also share a pre-rendered static HTML website. If you have a directory like this: + +```shell +-rw-rw-r--+ 1 Michael None 56 Jun 26 13:23 index.html +``` + +If `index.html` contains valid HTML, like this: + +```html + + +

Hello zrok

+ +``` + +Sharing the directory will result in the following when you access the share in a web browser: + +![zrok_share_web_website](../images/zrok_share_web_website.png) + `zrok` contains a built-in web server, which you can use to serve static websites as a share. \ No newline at end of file diff --git a/docs/concepts/hosting.md b/docs/concepts/hosting.md index 08c28a31..95f3dc42 100644 --- a/docs/concepts/hosting.md +++ b/docs/concepts/hosting.md @@ -1,16 +1,16 @@ ---- -sidebar_position: 200 ---- - -# Hosting - -## Self-Hosted - -`zrok` is not limited to a managed offering. You can [host your own](../guides/self-hosting/self_hosting_guide.md) instance of `zrok` as well. `zrok` is -also freely available as open source software hosted by GitHub under a very permissive Apache v2 license. - -## Managed Service - -`zrok` is also offered as a cloud service, making it instantly accessible to a large population immediately. -NetFoundry provides a manged version of `zrok` at https://zrok.io. This provides the easy-to-use, -quick to demonstrate features of `zrok` without needing to deploy and host `zrok` yourself. +--- +sidebar_position: 200 +--- + +# Hosting + +## Self-Hosted + +`zrok` is not limited to a managed offering. You can [host your own](../guides/self-hosting/self_hosting_guide.md) instance of `zrok` as well. `zrok` is +also freely available as open source software hosted by GitHub under a very permissive Apache v2 license. + +## Managed Service + +`zrok` is also offered as a cloud service, making it instantly accessible to a large population immediately. +NetFoundry provides a manged version of `zrok` at https://zrok.io. This provides the easy-to-use, +quick to demonstrate features of `zrok` without needing to deploy and host `zrok` yourself. diff --git a/docs/concepts/index.md b/docs/concepts/index.md index fa87094d..6d6c1b00 100644 --- a/docs/concepts/index.md +++ b/docs/concepts/index.md @@ -1,13 +1,13 @@ ---- -sidebar_title: Core Features -sidebar_position: 25 ---- - -# Concepts - -`zrok` was designed to make sharing local resources both secure and easy. In this section of the `zrok` documentation, we'll tour through all of the most important features. - -Sharing with `zrok` can be either [`public`](./sharing-public.md) or [`private`](./sharing-private.md). -Naturally, regular web-based resources can be shared but `zrok` also includes support for sharing raw [TCP](./tunnels.md) and [UDP](./tunnels.md) network connections, and also includes a [website and file sharing](./files.md) feature. - -Learn about `zrok` [hosting here](./hosting.md), including instructions on how to [install your own `zrok` instance](../guides/self-hosting/self_hosting_guide.md). +--- +sidebar_title: Core Features +sidebar_position: 25 +--- + +# Concepts + +`zrok` was designed to make sharing local resources both secure and easy. In this section of the `zrok` documentation, we'll tour through all of the most important features. + +Sharing with `zrok` can be either [`public`](./sharing-public.md) or [`private`](./sharing-private.md). +Naturally, regular web-based resources can be shared but `zrok` also includes support for sharing raw [TCP](./tunnels.md) and [UDP](./tunnels.md) network connections, and also includes a [website and file sharing](./files.md) feature. + +Learn about `zrok` [hosting here](./hosting.md), including instructions on how to [install your own `zrok` instance](../guides/self-hosting/self_hosting_guide.md). diff --git a/docs/concepts/opensource.md b/docs/concepts/opensource.md index c6925d6e..b313af23 100644 --- a/docs/concepts/opensource.md +++ b/docs/concepts/opensource.md @@ -1,24 +1,24 @@ ---- -sidebar_position: 100 ---- - -# Open Source - -It's important to the `zrok` project that it remain free and open source software. The code is available on [GitHub](https://github.com/openziti/zrok) -for the world to use, inspect, and build upon! - -Check out the repository over on GitHub at [https://github.com/openziti/zrok](https://github.com/openziti/zrok). If you find `zrok` to be useful, and -you want to help spread the word of `zrok` give the project a star. It really does help get the word out about the -project. - -The project also uses a very permissive license: Apache v2. We encourage people to fork the repo and use `zrok` for your own purposes how you see fit or contribute back to the project. - -## Built on OpenZiti - -The power of `zrok` really lies in `private` sharing. It's increasingly clear that security needs to be a first-class -member of any organization. To enable `private` sharing, `zrok` was built on top of another excellent open source project named OpenZiti. - -OpenZiti is a secure overlay network focusing on bringing zero trust to applications. It is the __backbone__ of `zrok`. -In fact, `zrok` proudly proclaims itself as an Ziti _native_ application. - -If you are interested in learning more about OpenZiti head over to [the docs](https://docs.openziti.io/docs/learn/introduction/), try the quickstart, and don't forget to star that project too. We couldn't build `zrok` without OpenZiti! +--- +sidebar_position: 100 +--- + +# Open Source + +It's important to the `zrok` project that it remain free and open source software. The code is available on [GitHub](https://github.com/openziti/zrok) +for the world to use, inspect, and build upon! + +Check out the repository over on GitHub at [https://github.com/openziti/zrok](https://github.com/openziti/zrok). If you find `zrok` to be useful, and +you want to help spread the word of `zrok` give the project a star. It really does help get the word out about the +project. + +The project also uses a very permissive license: Apache v2. We encourage people to fork the repo and use `zrok` for your own purposes how you see fit or contribute back to the project. + +## Built on OpenZiti + +The power of `zrok` really lies in `private` sharing. It's increasingly clear that security needs to be a first-class +member of any organization. To enable `private` sharing, `zrok` was built on top of another excellent open source project named OpenZiti. + +OpenZiti is a secure overlay network focusing on bringing zero trust to applications. It is the __backbone__ of `zrok`. +In fact, `zrok` proudly proclaims itself as an Ziti _native_ application. + +If you are interested in learning more about OpenZiti head over to [the docs](https://docs.openziti.io/docs/learn/introduction/), try the quickstart, and don't forget to star that project too. We couldn't build `zrok` without OpenZiti! diff --git a/docs/concepts/sharing-private.md b/docs/concepts/sharing-private.md index 642c22ae..5858f463 100644 --- a/docs/concepts/sharing-private.md +++ b/docs/concepts/sharing-private.md @@ -1,29 +1,29 @@ ---- -sidebar_position: 0 ---- -# Private Shares - -`zrok` was built to share and access digital resources. A `private` share allows a resource to be -accessed on another user's system as if it were local to them. Privately shared resources can only be accessed by another `zrok` user who has the details of your unique share. You are in control of who can access your `private` shares by sharing the the share token. - -Peer-to-peer private resource sharing is one of the things that makes `zrok` unique. - -`zrok` also provides `public` sharing of resources with non-`zrok` users. Public resource sharing is limited to only resources that can be accessed over `HTTP` or `HTTPS`. `private` sharing works with all of the resources types that `zrok` supports. - -Here's how private sharing works: - -# Peer to Peer - -![zrok_public_share](../images/zrok_private_share.png) - -`private` shares are accessed using the `zrok access` command, and require the accessing user to have a `zrok enable`-d account on the same service instance where the share was created. - -The `private` share is identified by a _share token_. The accessing user will use the share token, along with the `zrok access` command to create a local endpoint on their system, which lets them use the shared resource as if it were local to their system. - -`zrok` does not require you to open any firewall ports or otherwise compromise the security of your local system; there is never an attack surface open to the public internet. As soon as you terminate the `zrok share` process, you immediately terminate any possible access to your shared resource. - -The shared resource can be a development web server to share with friends and colleagues, a webhook from a server running in the cloud which has `zrok` running and has been instructed to `access` the private resource. `zrok` can also share files, websites, and low-level TCP and UDP network connections using the `tunnel` backend. What matters is that the access to the shared resource is not done in a public way, and can only be accessed by other `zrok` users that have access to your share token. - -The peer-to-peer capabilities of `zrok` are an important property of the underlying [OpenZiti](https://docs.openziti.io/docs/learn/introduction/) network that `zrok` uses to provide connectivity between users and resources. - +--- +sidebar_position: 0 +--- +# Private Shares + +`zrok` was built to share and access digital resources. A `private` share allows a resource to be +accessed on another user's system as if it were local to them. Privately shared resources can only be accessed by another `zrok` user who has the details of your unique share. You are in control of who can access your `private` shares by sharing the the share token. + +Peer-to-peer private resource sharing is one of the things that makes `zrok` unique. + +`zrok` also provides `public` sharing of resources with non-`zrok` users. Public resource sharing is limited to only resources that can be accessed over `HTTP` or `HTTPS`. `private` sharing works with all of the resources types that `zrok` supports. + +Here's how private sharing works: + +# Peer to Peer + +![zrok_public_share](../images/zrok_private_share.png) + +`private` shares are accessed using the `zrok access` command, and require the accessing user to have a `zrok enable`-d account on the same service instance where the share was created. + +The `private` share is identified by a _share token_. The accessing user will use the share token, along with the `zrok access` command to create a local endpoint on their system, which lets them use the shared resource as if it were local to their system. + +`zrok` does not require you to open any firewall ports or otherwise compromise the security of your local system; there is never an attack surface open to the public internet. As soon as you terminate the `zrok share` process, you immediately terminate any possible access to your shared resource. + +The shared resource can be a development web server to share with friends and colleagues, a webhook from a server running in the cloud which has `zrok` running and has been instructed to `access` the private resource. `zrok` can also share files, websites, and low-level TCP and UDP network connections using the `tunnel` backend. What matters is that the access to the shared resource is not done in a public way, and can only be accessed by other `zrok` users that have access to your share token. + +The peer-to-peer capabilities of `zrok` are an important property of the underlying [OpenZiti](https://docs.openziti.io/docs/learn/introduction/) network that `zrok` uses to provide connectivity between users and resources. + Creating `private` shares is easy and is accomplished using the `zrok share private` command. Run `zrok share private` to see the usage output and to further learn how to use the command. \ No newline at end of file diff --git a/docs/concepts/sharing-public.md b/docs/concepts/sharing-public.md index fc403bbc..858b78bf 100644 --- a/docs/concepts/sharing-public.md +++ b/docs/concepts/sharing-public.md @@ -1,16 +1,16 @@ ---- -sidebar_position: 10 ---- -# Public Shares - -`zrok` supports `public` sharing for web-based (HTTP and HTTPS) resources. These resources are easily shared with the general internet through public access points. - -## Peer to Public - -![zrok_public_share](../images/zrok_public_share.png) - -`public` sharing is most useful when the person or service accessing your resources does not have `zrok` running locally and cannot make use of the `private` sharing mode built into `zrok`. Many users share development web servers, webhooks, and other HTTP/HTTPS resources. - -As with `private` sharing, `public` sharing does not require you to open any firewall ports or otherwise compromise the security of your local environments. A `public` share goes away as soon as you terminate the `zrok share` command. - +--- +sidebar_position: 10 +--- +# Public Shares + +`zrok` supports `public` sharing for web-based (HTTP and HTTPS) resources. These resources are easily shared with the general internet through public access points. + +## Peer to Public + +![zrok_public_share](../images/zrok_public_share.png) + +`public` sharing is most useful when the person or service accessing your resources does not have `zrok` running locally and cannot make use of the `private` sharing mode built into `zrok`. Many users share development web servers, webhooks, and other HTTP/HTTPS resources. + +As with `private` sharing, `public` sharing does not require you to open any firewall ports or otherwise compromise the security of your local environments. A `public` share goes away as soon as you terminate the `zrok share` command. + Using `public` shares is easy and is accomplished using the `zrok share public` command. Run `zrok share public` to see the command-line help and to learn how to use `public` shares. \ No newline at end of file diff --git a/docs/concepts/sharing-reserved.md b/docs/concepts/sharing-reserved.md index 5f0410fb..4fec2360 100644 --- a/docs/concepts/sharing-reserved.md +++ b/docs/concepts/sharing-reserved.md @@ -1,14 +1,14 @@ ---- -sidebar_position: 10 ---- -# Reserved Shares - -By default a `public` or `private` share is assigned a _share token_ when you create a share using the `zrok share` command. The `zrok share` command is the bridge between your local environment and the users you are sharing with. When you terminate the `zrok share`, the bridge is eliminated and the _share token_ is deleted. If you run `zrok share` again, you will be allocated a brand new _share token_. - -You can use a `reserved` share to persist your _share token_ across multiple runs of the `zrok share` bridge. When you use a `reserved` share, the share token will not be deleted between multiple runs of `zrok share`. - -To use a `reserved` share, you will first run the `zrok reserve` command to create the reserved share (see `zrok reserve --help` for details). Once you've created your `reserved` share, you will use the `zrok share reserved` command (see `--help` for details) to run the bridge for the shared resource. - -This pattern works for both `public` and `private` shares, and for all resource types supported by `zrok`. - -To delete your `reserved` share use the `zrok release` command or click the delete button in the share's _Actions_ tab in the web console. +--- +sidebar_position: 10 +--- +# Reserved Shares + +By default a `public` or `private` share is assigned a _share token_ when you create a share using the `zrok share` command. The `zrok share` command is the bridge between your local environment and the users you are sharing with. When you terminate the `zrok share`, the bridge is eliminated and the _share token_ is deleted. If you run `zrok share` again, you will be allocated a brand new _share token_. + +You can use a `reserved` share to persist your _share token_ across multiple runs of the `zrok share` bridge. When you use a `reserved` share, the share token will not be deleted between multiple runs of `zrok share`. + +To use a `reserved` share, you will first run the `zrok reserve` command to create the reserved share (see `zrok reserve --help` for details). Once you've created your `reserved` share, you will use the `zrok share reserved` command (see `--help` for details) to run the bridge for the shared resource. + +This pattern works for both `public` and `private` shares, and for all resource types supported by `zrok`. + +To delete your `reserved` share use the `zrok release` command or click the delete button in the share's _Actions_ tab in the web console. diff --git a/docs/guides/_category_.json b/docs/guides/_category_.json index 48ba0588..06389212 100644 --- a/docs/guides/_category_.json +++ b/docs/guides/_category_.json @@ -1,7 +1,7 @@ -{ - "label": "Guides", - "position": 50, - "link": { - "type": "generated-index" - } -} +{ + "label": "Guides", + "position": 50, + "link": { + "type": "generated-index" + } +} diff --git a/docs/guides/_frontdoor-docker.mdx b/docs/guides/_frontdoor-docker.mdx index 4bb9cb8e..cb7d8a36 100644 --- a/docs/guides/_frontdoor-docker.mdx +++ b/docs/guides/_frontdoor-docker.mdx @@ -19,25 +19,32 @@ When the project runs it will: 1. Download [the reserved public share `compose.yml` project file](pathname:///zrok-public-reserved/compose.yml) into the same directory. 1. Copy your zrok account's enable token from the zrok web console to your clipboard and paste it in a file named `.env` in the same folder like this: - ```bash title=".env" - ZROK_ENABLE_TOKEN="8UL9-48rN0ua" - ``` + ```bash title=".env" + ZROK_ENABLE_TOKEN="8UL9-48rN0ua" + ``` +1. Name the Share + + This unique name becomes part of the domain name of the share, e.g. `https://my-prod-app.in.zrok.io`. A random name is generated if you don't specify one. + + ```bash title=".env" + ZROK_UNIQUE_NAME="my-prod-app" + ``` 1. Run the Compose project to start sharing the built-in demo web server. Be sure to `--detach` so the project runs in the background if you want it to auto-restart when your computer reboots. - ```bash - docker compose up --detach - ``` + ```bash + docker compose up --detach + ``` 1. Get the public share URL from the output of the `zrok-share` service or by peeking in the zrok console where the share will appear in the graph. - ```bash - docker compose logs zrok-share - ``` + ```bash + docker compose logs zrok-share + ``` - ```buttonless title="Output" - zrok-public-share-1 | https://w6r1vesearkj.in.zrok.io/ - ``` + ```buttonless title="Output" + zrok-public-share-1 | https://w6r1vesearkj.in.zrok.io/ + ``` This concludes the minimum steps to begin sharing the demo web server. Read on to learn how to pivot to sharing any website or web service by leveraging additional zrok backend modes. @@ -78,50 +85,50 @@ With Caddy, you can balance the workload for websites or web services or share s 1. Create a Caddyfile. This example demonstrates proxying two HTTP servers with a weighted round-robin load balancer. - ```console title="Caddyfile" - http:// { - # zrok requires this bind address template - bind {{ .ZrokBindAddress }} - reverse_proxy /* { - to http://httpbin1:8080 http://httpbin2:8080 - lb_policy weighted_round_robin 3 2 + ```console title="Caddyfile" + http:// { + # zrok requires this bind address template + bind {{ .ZrokBindAddress }} + reverse_proxy /* { + to http://httpbin1:8080 http://httpbin2:8080 + lb_policy weighted_round_robin 3 2 + } } - } - ``` + ``` 1. Create a file `compose.override.yml`. This example adds two `httpbin` containers for load balancing, and mounts the Caddyfile into the container. - ```yaml title="compose.override.yml" - services: - httpbin1: - image: mccutchen/go-httpbin # 8080/tcp - httpbin2: - image: mccutchen/go-httpbin # 8080/tcp - zrok-share: - volumes: - - ./Caddyfile:/mnt/.zrok/Caddyfile - ``` + ```yaml title="compose.override.yml" + services: + httpbin1: + image: mccutchen/go-httpbin # 8080/tcp + httpbin2: + image: mccutchen/go-httpbin # 8080/tcp + zrok-share: + volumes: + - ./Caddyfile:/mnt/.zrok/Caddyfile + ``` 1. Start a new Docker Compose project or delete the existing state volume. - ```bash - docker compose down --volumes - ``` + ```bash + docker compose down --volumes + ``` If you prefer to keep using the same zrok environment with the new share then delete `/mnt/.zrok/reserved.json` instead of the entire volume. 1. Run the project to load the new configuration. - ```bash - docker compose up --detach - ``` + ```bash + docker compose up --detach + ``` 1. Note the new reserved share URL from the log. - ```bash - docker compose logs zrok-share - ``` + ```bash + docker compose logs zrok-share + ``` - ```buttonless title="Output" - INFO: zrok public URL: https://88s803f2qvao.in.zrok.io/ - ``` + ```buttonless title="Output" + INFO: zrok public URL: https://88s803f2qvao.in.zrok.io/ + ``` diff --git a/docs/guides/_frontdoor-linux.mdx b/docs/guides/_frontdoor-linux.mdx index 8f9fca76..e6f4ebff 100644 --- a/docs/guides/_frontdoor-linux.mdx +++ b/docs/guides/_frontdoor-linux.mdx @@ -31,7 +31,7 @@ When the service starts it will: 1. If you set up the repository by following the guide, then also install the `zrok-share` package. This package provides the systemd service. ```bash title="Ubuntu, Debian" - sudo sudo apt install zrok-share + sudo apt install zrok-share ``` ```bash title="Fedora, Rocky" @@ -89,6 +89,14 @@ Save the enable token from the zrok console in the configuration file. ZROK_ENABLE_TOKEN="14cbfca9772f" ``` +## Name your Share + +This unique name becomes part of the domain name of the share, e.g. `https://my-prod-app.in.zrok.io`. A random name is generated if you don't specify one. + +```bash title="/opt/openziti/etc/zrok/zrok-share.env" +ZROK_UNIQUE_NAME="my-prod-app" +``` + ## Use Cases You may change the target for the current backend mode, e.g. `proxy`, by editing the configuration file and restarting the service. The reserved subdomain will remain the same. diff --git a/docs/guides/drives/cli.md b/docs/guides/drives/cli.md new file mode 100644 index 00000000..c4e00c6c --- /dev/null +++ b/docs/guides/drives/cli.md @@ -0,0 +1,314 @@ +# The Drives CLI + +The zrok drives CLI tools allow for simple, ergonomic management and synchronization of local and remote files. + +## Sharing a Drive + +Virtual drives are shared through the `zrok` CLI using the `--backend-mode drive` flag through the `zrok share` command, using either the `public` or `private` sharing modes. We'll use the `private` sharing mode for this example: + +``` +$ mkdir /tmp/junk +$ zrok share private --headless --backend-mode drive /tmp/junk +[ 0.124] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[cf640aac-2706-49ae-9cc9-9a497d67d9c5]} new service session +[ 0.145] INFO main.(*sharePrivateCommand).run: allow other to access your share with the following command: +zrok access private wkcfb58vj51l +``` + +The command shown above creates an ephemeral, `private` drive share pointed at the local `/tmp/junk` folder. + +Notice that the share token allocated by `zrok` is `wkcfb58vj51l`. We'll use that share token to identify our virtual drive in the following operations. + +## Working with a Private Drive Share + +First, let's copy a file into our virtual drive using the `zrok copy` command: + +``` +$ zrok copy LICENSE zrok://wkcfb58vj51l +[ 0.119] INFO zrok/drives/sync.OneWay: => /LICENSE +copy complete! +``` + +We used the URL scheme `zrok://` to refer to the private virtual drive we allocated above using the `zrok share private` command. Use `zrok://` URLs with the drives CLI tools to refer to contents of private virtual drives. + +Next, let's get a directory listing of the virtual drive: + +``` +$ zrok ls zrok://wkcfb58vj51l +┌──────┬─────────┬─────────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼─────────┼─────────┼───────────────────────────────┤ +│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │ +└──────┴─────────┴─────────┴───────────────────────────────┘ +``` + +We can make directories on the virtual drive: + +``` +$ zrok mkdir zrok://wkcfb58vj51l/stuff +$ zrok ls zrok://wkcfb58vj51l +┌──────┬─────────┬─────────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼─────────┼─────────┼───────────────────────────────┤ +│ │ LICENSE │ 11.3 kB │ 2024-01-19 12:16:46 -0500 EST │ +│ DIR │ stuff │ │ │ +└──────┴─────────┴─────────┴───────────────────────────────┘ +``` + +We can copy the contents of a local directory into the new directory on the virtual drive: + +``` +$ ls -l util/ +total 20 +-rw-rw-r-- 1 michael michael 329 Jul 21 13:17 email.go +-rw-rw-r-- 1 michael michael 456 Jul 21 13:17 headers.go +-rw-rw-r-- 1 michael michael 609 Jul 21 13:17 proxy.go +-rw-rw-r-- 1 michael michael 361 Jul 21 13:17 size.go +-rw-rw-r-- 1 michael michael 423 Jan 2 11:57 uniqueName.go +$ zrok copy util/ zrok://wkcfb58vj51l/stuff +[ 0.123] INFO zrok/drives/sync.OneWay: => /email.go +[ 0.194] INFO zrok/drives/sync.OneWay: => /headers.go +[ 0.267] INFO zrok/drives/sync.OneWay: => /proxy.go +[ 0.337] INFO zrok/drives/sync.OneWay: => /size.go +[ 0.408] INFO zrok/drives/sync.OneWay: => /uniqueName.go +copy complete! +$ zrok ls zrok://wkcfb58vj51l/stuff +┌──────┬───────────────┬───────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼───────────────┼───────┼───────────────────────────────┤ +│ │ email.go │ 329 B │ 2024-01-19 12:26:45 -0500 EST │ +│ │ headers.go │ 456 B │ 2024-01-19 12:26:45 -0500 EST │ +│ │ proxy.go │ 609 B │ 2024-01-19 12:26:45 -0500 EST │ +│ │ size.go │ 361 B │ 2024-01-19 12:26:45 -0500 EST │ +│ │ uniqueName.go │ 423 B │ 2024-01-19 12:26:45 -0500 EST │ +└──────┴───────────────┴───────┴───────────────────────────────┘ +``` + +And we can remove files and directories from the virtual drive: + +``` +$ zrok rm zrok://wkcfb58vj51l/LICENSE +$ zrok ls zrok://wkcfb58vj51l +┌──────┬───────┬──────┬──────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼───────┼──────┼──────────┤ +│ DIR │ stuff │ │ │ +└──────┴───────┴──────┴──────────┘ +$ zrok rm zrok://wkcfb58vj51l/stuff +$ zrok ls zrok://wkcfb58vj51l +┌──────┬──────┬──────┬──────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼──────┼──────┼──────────┤ +└──────┴──────┴──────┴──────────┘ +``` + +## Working with Public Shares + +Public shares work very similarly to private shares, they just use a different URL scheme: + +``` +$ zrok share public --headless --backend-mode drive /tmp/junk +[ 0.708] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[05e0f48b-242b-4fd9-8edb-259488535c47]} new service session +[ 0.878] INFO main.(*sharePublicCommand).run: access your zrok share at the following endpoints: + https://6kiww4bn7iok.share.zrok.io +``` + +The same commands, with a different URL scheme work with the `zrok` drives CLI: + +``` +$ zrok copy util/ https://6kiww4bn7iok.share.zrok.io +[ 0.268] INFO zrok/drives/sync.OneWay: => /email.go +[ 0.406] INFO zrok/drives/sync.OneWay: => /headers.go +[ 0.530] INFO zrok/drives/sync.OneWay: => /proxy.go +[ 0.655] INFO zrok/drives/sync.OneWay: => /size.go +[ 0.714] INFO zrok/drives/sync.OneWay: => /uniqueName.go +copy complete! +michael@fourtyfour Fri Jan 19 12:42:52 ~/Repos/nf/zrok +$ zrok ls https://6kiww4bn7iok.share.zrok.io +┌──────┬───────────────┬───────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼───────────────┼───────┼───────────────────────────────┤ +│ │ email.go │ 329 B │ 2023-07-21 13:17:56 -0400 EDT │ +│ │ headers.go │ 456 B │ 2023-07-21 13:17:56 -0400 EDT │ +│ │ proxy.go │ 609 B │ 2023-07-21 13:17:56 -0400 EDT │ +│ │ size.go │ 361 B │ 2023-07-21 13:17:56 -0400 EDT │ +│ │ uniqueName.go │ 423 B │ 2024-01-02 11:57:14 -0500 EST │ +└──────┴───────────────┴───────┴───────────────────────────────┘ +``` + +For basic authentication provided by public shares, the `zrok` drives CLI offers the `--basic-auth` flag, which accepts a `:` parameter to specify the authentication for the public virtual drive (if it's required). + +Alternatively, the authentication can be set using the `ZROK_DRIVES_BASIC_AUTH` environment variable: + +``` +$ export ZROK_DRIVES_BASIC_AUTH=username:password +``` + +## One-way Synchronization + +The `zrok copy` command includes a `--sync` flag, which only copies files detected as _modified_. `zrok` considers a file with the same modification timestamp and size to be the same. Of course, this is not a strong guarantee that the files are equivalent. Future `zrok` drives versions will provide a cryptographically strong mechanism (a-la `rsync` and friends) to guarantee that files and trees of files are synchronized. + +For now, the `--sync` flag provides a convenience mechanism to allow resuming copies of large file trees and provide a reasonable guarantee that the trees are in sync. + +Let's take a look at `zrok copy --sync` in action: + +``` +$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io +[ 0.636] INFO zrok/drives/sync.OneWay: => /_attic/ +[ 0.760] INFO zrok/drives/sync.OneWay: => /_attic/network/ +[ 0.816] INFO zrok/drives/sync.OneWay: => /_attic/network/_category_.json +[ 0.928] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ +[ 0.987] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.service +[ 1.048] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-ctrl.yml +[ 1.107] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.service +[ 1.167] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/ziti-router0.yml +[ 1.218] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-access-public.service +[ 1.273] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.service +[ 1.328] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok-ctrl.yml +[ 1.382] INFO zrok/drives/sync.OneWay: => /_attic/network/prod/zrok.io-network-skeleton.md +[ 1.447] INFO zrok/drives/sync.OneWay: => /_attic/overview.md +[ 1.572] INFO zrok/drives/sync.OneWay: => /_attic/sharing/ +[ 1.622] INFO zrok/drives/sync.OneWay: => /_attic/sharing/_category_.json +[ 1.673] INFO zrok/drives/sync.OneWay: => /_attic/sharing/reserved_services.md +[ 1.737] INFO zrok/drives/sync.OneWay: => /_attic/sharing/sharing_modes.md +[ 1.793] INFO zrok/drives/sync.OneWay: => /_attic/v0.2_account_requests.md +[ 1.902] INFO zrok/drives/sync.OneWay: => /_attic/v0.4_limits.md +... +[ 9.691] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png +[ 9.812] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png +[ 9.870] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png +copy complete! +``` + +Because the target drive was empty, `zrok copy --sync` copied the entire contents of the local `docs/` tree into the virtual drive. However, if we run that command again, we get: + +``` +$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io +copy complete! +``` + +The virtual drive contents are already in sync with the local filesystem tree, so there is nothing for it to copy. + +Let's alter the contents of the drive and run the `--sync` again: + +``` +$ zrok rm https://glmv049c62p7.share.zrok.io/images +$ zrok copy --sync docs/ https://glmv049c62p7.share.zrok.io +[ 0.364] INFO zrok/drives/sync.OneWay: => /images/ +[ 0.456] INFO zrok/drives/sync.OneWay: => /images/zrok.png +[ 0.795] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png +[ 0.866] INFO zrok/drives/sync.OneWay: => /images/zrok_deployment.drawio +... +[ 2.254] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_empty_shares.png +[ 2.340] INFO zrok/drives/sync.OneWay: => /images/zrok_web_ui_new_environment.png +[ 2.391] INFO zrok/drives/sync.OneWay: => /images/zrok_zoom_to_fit.png +copy complete! +``` + +Because we removed the `images/` tree from the virtual drive, `zrok copy --sync` detected this and copied the local `images/` tree back onto the virtual drive. + +## Drive-to-Drive Copies and Synchronization + +The `zrok copy` CLI can operate on pairs of virtual drives remotely, without ever having to store files locally. This allow for drive-to-drive copies and synchronization. + +Here are a couple of examples: + +``` +$ zrok copy --sync https://glmv049c62p7.share.zrok.io https://glmv049c62p7.share.zrok.io +copy complete! +``` + +Specifying the same URL for both the source and the target of a `--sync` operation should always result in nothing being copied... they are the same drive with the same state. + +We can copy files between two virtual drives with a single command: + +``` +$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf +[ 1.396] INFO zrok/drives/sync.OneWay: => /_attic/ +[ 2.083] INFO zrok/drives/sync.OneWay: => /_attic/overview.md +[ 2.704] INFO zrok/drives/sync.OneWay: => /_attic/sharing/ +... +[ 118.240] INFO zrok/drives/sync.OneWay: => /images/zrok_web_console_empty.png +[ 118.920] INFO zrok/drives/sync.OneWay: => /images/zrok_enable_modal.png +[ 119.589] INFO zrok/drives/sync.OneWay: => /images/zrok_cover.png +[ 120.214] INFO zrok/drives/sync.OneWay: => /getting-started.mdx +copy complete! +$ zrok copy --sync https://glmv049c62p7.share.zrok.io zrok://hsml272j3xzf +copy complete! +``` + +## Copying from Drives to the Local Filesystem + +In the current version of the drives CLI, `zrok copy` always assumes the destination is a directory. There is currently no way to do: + +``` +$ zrok copy somefile someotherfile +``` + +What you'll end up with on the local filesystem is: + +``` +somefile +someotherfile/somefile +``` + +It's in the backlog to support file destinations in a future release of `zrok`. So, when using `zrok copy`, always take note of the destination. + +`zrok copy` supports a default destination of `file://.`, so you can do single parameter `zrok copy` commands like this: + +``` +$ zrok ls https://azc47r3cwjds.share.zrok.io +┌──────┬─────────┬─────────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼─────────┼─────────┼───────────────────────────────┤ +│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │ +└──────┴─────────┴─────────┴───────────────────────────────┘ +$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE +[ 0.260] INFO zrok/drives/sync.OneWay: => /LICENSE +copy complete! +$ ls -l +total 12 +-rw-rw-r-- 1 michael michael 11346 Jan 19 13:29 LICENSE +``` + +You can also specify a local folder as the destination for your copy: + +``` +$ zrok copy https://azc47r3cwjds.share.zrok.io/LICENSE /tmp/inbox +[ 0.221] INFO zrok/drives/sync.OneWay: => /LICENSE +copy complete! +$ l /tmp/inbox +total 12 +-rw-rw-r-- 1 michael michael 11346 Jan 19 13:30 LICENSE +``` + +## Unique Names and Reserved Shares + +Private reserved shares with unque names can be particularly useful with the drives CLI: + +``` +$ zrok reserve private -b drive --unique-name mydrive /tmp/junk +[ 0.315] INFO main.(*reserveCommand).run: your reserved share token is 'mydrive' +$ zrok share reserved --headless mydrive +[ 0.289] INFO main.(*shareReservedCommand).run: sharing target: '/tmp/junk' +[ 0.289] INFO main.(*shareReservedCommand).run: using existing backend proxy endpoint: /tmp/junk +[ 0.767] INFO sdk-golang/ziti.(*listenerManager).createSessionWithBackoff: {session token=[d519a436-9fb5-4207-afd5-7cbc28fb779a]} new service session +[ 0.927] INFO main.(*shareReservedCommand).run: use this command to access your zrok share: 'zrok access private mydrive' +``` + +This makes working with `zrok://` URLs particularly convenient: + +``` +$ zrok ls zrok://mydrive +┌──────┬─────────┬─────────┬───────────────────────────────┐ +│ TYPE │ NAME │ SIZE │ MODIFIED │ +├──────┼─────────┼─────────┼───────────────────────────────┤ +│ │ LICENSE │ 11.3 kB │ 2023-07-21 13:17:56 -0400 EDT │ +└──────┴─────────┴─────────┴───────────────────────────────┘ +``` + +## Future Enhancements + +Coming in a future release of `zrok` drives are features like: + +* two-way synchronization between multiple hosts... allowing for shared "dropbox-like" usage scenarios between multiple environments +* better ergonomics for single-file destinations diff --git a/docs/guides/frontdoor.mdx b/docs/guides/frontdoor.mdx index b6a72338..88ef00b3 100644 --- a/docs/guides/frontdoor.mdx +++ b/docs/guides/frontdoor.mdx @@ -16,7 +16,7 @@ import useBaseUrl from '@docusaurus/useBaseUrl'; - ## Overview +## Overview zrok frontends are the parts of zrok that proxy incoming public web traffic to zrok backend shares via OpenZiti. When you use zrok with a `zrok.io` frontend, you're using **zrok frontdoor**. `zrok.io` is zrok-as-a-service by NetFoundry, the team behind OpenZiti. You need a free account to use **zrok frontdoor**. diff --git a/docs/guides/install/_category_.json b/docs/guides/install/_category_.json index 5f5e9739..b7acc861 100644 --- a/docs/guides/install/_category_.json +++ b/docs/guides/install/_category_.json @@ -1,8 +1,8 @@ -{ - "label": "Install", - "position": 10, - "link": { - "type": "doc", - "id": "guides/install/index" - } -} +{ + "label": "Install", + "position": 10, + "link": { + "type": "doc", + "id": "guides/install/index" + } +} diff --git a/docs/guides/install/windows.mdx b/docs/guides/install/windows.mdx index b7eda1fe..54a14e34 100644 --- a/docs/guides/install/windows.mdx +++ b/docs/guides/install/windows.mdx @@ -29,7 +29,7 @@ import styles from '@site/src/css/download-card.module.css'; ```text $source = Join-Path -Path $env:TEMP -ChildPath "zrok\zrok.exe" - $destination = Join-Path -Path $env:HOME -ChildPath "bin\zrok.exe" + $destination = Join-Path -Path $env:USERPROFILE -ChildPath "bin\zrok.exe" New-Item -Path $destination -ItemType Directory -ErrorAction SilentlyContinue Copy-Item -Path $source -Destination $destination $env:path += ";"+$destination diff --git a/drives/davClient/client.go b/drives/davClient/client.go new file mode 100644 index 00000000..bf00c2f1 --- /dev/null +++ b/drives/davClient/client.go @@ -0,0 +1,305 @@ +package davClient + +import ( + "context" + "fmt" + "github.com/openziti/zrok/drives/davClient/internal" + "io" + "net/http" + "time" +) + +// HTTPClient performs HTTP requests. It's implemented by *http.Client. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type basicAuthHTTPClient struct { + c HTTPClient + username, password string +} + +func (c *basicAuthHTTPClient) Do(req *http.Request) (*http.Response, error) { + req.SetBasicAuth(c.username, c.password) + return c.c.Do(req) +} + +// HTTPClientWithBasicAuth returns an HTTP client that adds basic +// authentication to all outgoing requests. If c is nil, http.DefaultClient is +// used. +func HTTPClientWithBasicAuth(c HTTPClient, username, password string) HTTPClient { + if c == nil { + c = http.DefaultClient + } + return &basicAuthHTTPClient{c, username, password} +} + +// Client provides access to a remote WebDAV filesystem. +type Client struct { + ic *internal.Client +} + +func NewClient(c HTTPClient, endpoint string) (*Client, error) { + ic, err := internal.NewClient(c, endpoint) + if err != nil { + return nil, err + } + return &Client{ic}, nil +} + +func (c *Client) FindCurrentUserPrincipal(ctx context.Context) (string, error) { + propfind := internal.NewPropNamePropFind(internal.CurrentUserPrincipalName) + + // TODO: consider retrying on the root URI "/" if this fails, as suggested + // by the RFC? + resp, err := c.ic.PropFindFlat(ctx, "", propfind) + if err != nil { + return "", err + } + + var prop internal.CurrentUserPrincipal + if err := resp.DecodeProp(&prop); err != nil { + return "", err + } + if prop.Unauthenticated != nil { + return "", fmt.Errorf("webdav: unauthenticated") + } + + return prop.Href.Path, nil +} + +var fileInfoPropFind = internal.NewPropNamePropFind( + internal.ResourceTypeName, + internal.GetContentLengthName, + internal.GetLastModifiedName, + internal.GetContentTypeName, + internal.GetETagName, +) + +func fileInfoFromResponse(resp *internal.Response) (*FileInfo, error) { + path, err := resp.Path() + if err != nil { + return nil, err + } + + fi := &FileInfo{Path: path} + + var resType internal.ResourceType + if err := resp.DecodeProp(&resType); err != nil { + return nil, err + } + + if resType.Is(internal.CollectionName) { + fi.IsDir = true + } else { + var getLen internal.GetContentLength + if err := resp.DecodeProp(&getLen); err != nil { + return nil, err + } + + var getType internal.GetContentType + if err := resp.DecodeProp(&getType); err != nil && !internal.IsNotFound(err) { + return nil, err + } + + var getETag internal.GetETag + if err := resp.DecodeProp(&getETag); err != nil && !internal.IsNotFound(err) { + return nil, err + } + + fi.Size = getLen.Length + fi.MIMEType = getType.Type + fi.ETag = string(getETag.ETag) + } + + var getMod internal.GetLastModified + if err := resp.DecodeProp(&getMod); err != nil && !internal.IsNotFound(err) { + return nil, err + } + fi.ModTime = time.Time(getMod.LastModified) + + return fi, nil +} + +func (c *Client) Stat(ctx context.Context, name string) (*FileInfo, error) { + resp, err := c.ic.PropFindFlat(ctx, name, fileInfoPropFind) + if err != nil { + return nil, err + } + return fileInfoFromResponse(resp) +} + +func (c *Client) Open(ctx context.Context, name string) (io.ReadCloser, error) { + req, err := c.ic.NewRequest(http.MethodGet, name, nil) + if err != nil { + return nil, err + } + + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + return resp.Body, nil +} + +func (c *Client) Readdir(ctx context.Context, name string, recursive bool) ([]FileInfo, error) { + depth := internal.DepthOne + if recursive { + depth = internal.DepthInfinity + } + + ms, err := c.ic.PropFind(ctx, name, depth, fileInfoPropFind) + if err != nil { + return nil, err + } + + l := make([]FileInfo, 0, len(ms.Responses)) + for _, resp := range ms.Responses { + fi, err := fileInfoFromResponse(&resp) + if err != nil { + return l, err + } + l = append(l, *fi) + } + + return l, nil +} + +type fileWriter struct { + pw *io.PipeWriter + done <-chan error +} + +func (fw *fileWriter) Write(b []byte) (int, error) { + return fw.pw.Write(b) +} + +func (fw *fileWriter) Close() error { + if err := fw.pw.Close(); err != nil { + return err + } + return <-fw.done +} + +func (c *Client) Create(ctx context.Context, name string) (io.WriteCloser, error) { + pr, pw := io.Pipe() + + req, err := c.ic.NewRequest(http.MethodPut, name, pr) + if err != nil { + pw.Close() + return nil, err + } + + done := make(chan error, 1) + go func() { + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + done <- err + return + } + resp.Body.Close() + done <- nil + }() + + return &fileWriter{pw, done}, nil +} + +func (c *Client) CreateWithModTime(ctx context.Context, name string, modTime time.Time) (io.WriteCloser, error) { + pr, pw := io.Pipe() + + req, err := c.ic.NewRequest(http.MethodPut, name, pr) + if err != nil { + pw.Close() + return nil, err + } + req.Header.Set("Zrok-Modtime", fmt.Sprintf("%d", modTime.Unix())) + + done := make(chan error, 1) + go func() { + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + done <- err + return + } + resp.Body.Close() + done <- nil + }() + + return &fileWriter{pw, done}, nil +} + +func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) error { + status, err := c.ic.Touch(ctx, path, mtime) + if err != nil { + return err + } + for _, resp := range status.Responses { + if resp.Err() != nil { + return resp.Err() + } + } + return nil +} + +func (c *Client) RemoveAll(ctx context.Context, name string) error { + req, err := c.ic.NewRequest(http.MethodDelete, name, nil) + if err != nil { + return err + } + + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +func (c *Client) Mkdir(ctx context.Context, name string) error { + req, err := c.ic.NewRequest("MKCOL", name, nil) + if err != nil { + return err + } + + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +func (c *Client) CopyAll(ctx context.Context, name, dest string, overwrite bool) error { + req, err := c.ic.NewRequest("COPY", name, nil) + if err != nil { + return err + } + + req.Header.Set("Destination", c.ic.ResolveHref(dest).String()) + req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite)) + + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} + +func (c *Client) MoveAll(ctx context.Context, name, dest string, overwrite bool) error { + req, err := c.ic.NewRequest("MOVE", name, nil) + if err != nil { + return err + } + + req.Header.Set("Destination", c.ic.ResolveHref(dest).String()) + req.Header.Set("Overwrite", internal.FormatOverwrite(overwrite)) + + resp, err := c.ic.Do(req.WithContext(ctx)) + if err != nil { + return err + } + resp.Body.Close() + return nil +} diff --git a/drives/davClient/internal/client.go b/drives/davClient/internal/client.go new file mode 100644 index 00000000..78a2b3aa --- /dev/null +++ b/drives/davClient/internal/client.go @@ -0,0 +1,286 @@ +package internal + +import ( + "bytes" + "context" + "encoding/xml" + "fmt" + "io" + "mime" + "net" + "net/http" + "net/url" + "path" + "strings" + "time" + "unicode" +) + +// DiscoverContextURL performs a DNS-based CardDAV/CalDAV service discovery as +// described in RFC 6352 section 11. It returns the URL to the CardDAV server. +func DiscoverContextURL(ctx context.Context, service, domain string) (string, error) { + var resolver net.Resolver + + // Only lookup TLS records, plaintext connections are insecure + _, addrs, err := resolver.LookupSRV(ctx, service+"s", "tcp", domain) + if dnsErr, ok := err.(*net.DNSError); ok { + if dnsErr.IsTemporary { + return "", err + } + } else if err != nil { + return "", err + } + + if len(addrs) == 0 { + return "", fmt.Errorf("webdav: domain doesn't have an SRV record") + } + addr := addrs[0] + + target := strings.TrimSuffix(addr.Target, ".") + if target == "" { + return "", fmt.Errorf("webdav: empty target in SRV record") + } + + // TODO: perform a TXT lookup, check for a "path" key in the response + u := url.URL{Scheme: "https"} + if addr.Port == 443 { + u.Host = target + } else { + u.Host = fmt.Sprintf("%v:%v", target, addr.Port) + } + u.Path = "/.well-known/" + service + return u.String(), nil +} + +// HTTPClient performs HTTP requests. It's implemented by *http.Client. +type HTTPClient interface { + Do(req *http.Request) (*http.Response, error) +} + +type Client struct { + http HTTPClient + endpoint *url.URL +} + +func NewClient(c HTTPClient, endpoint string) (*Client, error) { + if c == nil { + c = http.DefaultClient + } + + u, err := url.Parse(endpoint) + if err != nil { + return nil, err + } + if u.Path == "" { + // This is important to avoid issues with path.Join + u.Path = "/" + } + return &Client{http: c, endpoint: u}, nil +} + +func (c *Client) ResolveHref(p string) *url.URL { + if !strings.HasPrefix(p, "/") { + p = path.Join(c.endpoint.Path, p) + } + return &url.URL{ + Scheme: c.endpoint.Scheme, + User: c.endpoint.User, + Host: c.endpoint.Host, + Path: p, + } +} + +func (c *Client) NewRequest(method string, path string, body io.Reader) (*http.Request, error) { + return http.NewRequest(method, c.ResolveHref(path).String(), body) +} + +func (c *Client) NewXMLRequest(method string, path string, v interface{}) (*http.Request, error) { + var buf bytes.Buffer + buf.WriteString(xml.Header) + if err := xml.NewEncoder(&buf).Encode(v); err != nil { + return nil, err + } + + req, err := c.NewRequest(method, path, &buf) + if err != nil { + return nil, err + } + + req.Header.Add("Content-Type", "text/xml; charset=\"utf-8\"") + + return req, nil +} + +func (c *Client) Do(req *http.Request) (*http.Response, error) { + resp, err := c.http.Do(req) + if err != nil { + return nil, err + } + if resp.StatusCode/100 != 2 { + defer resp.Body.Close() + + contentType := resp.Header.Get("Content-Type") + if contentType == "" { + contentType = "text/plain" + } + + var wrappedErr error + t, _, _ := mime.ParseMediaType(contentType) + if t == "application/xml" || t == "text/xml" { + var davErr Error + if err := xml.NewDecoder(resp.Body).Decode(&davErr); err != nil { + wrappedErr = err + } else { + wrappedErr = &davErr + } + } else if strings.HasPrefix(t, "text/") { + lr := io.LimitedReader{R: resp.Body, N: 1024} + var buf bytes.Buffer + io.Copy(&buf, &lr) + resp.Body.Close() + if s := strings.TrimSpace(buf.String()); s != "" { + if lr.N == 0 { + s += " […]" + } + wrappedErr = fmt.Errorf("%v", s) + } + } + return nil, &HTTPError{Code: resp.StatusCode, Err: wrappedErr} + } + return resp, nil +} + +func (c *Client) DoMultiStatus(req *http.Request) (*MultiStatus, error) { + resp, err := c.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusMultiStatus { + return nil, fmt.Errorf("HTTP multi-status request failed: %v", resp.Status) + } + + // TODO: the response can be quite large, support streaming Response elements + var ms MultiStatus + if err := xml.NewDecoder(resp.Body).Decode(&ms); err != nil { + return nil, err + } + + return &ms, nil +} + +func (c *Client) PropFind(ctx context.Context, path string, depth Depth, propfind *PropFind) (*MultiStatus, error) { + req, err := c.NewXMLRequest("PROPFIND", path, propfind) + if err != nil { + return nil, err + } + + req.Header.Add("Depth", depth.String()) + + return c.DoMultiStatus(req.WithContext(ctx)) +} + +func (c *Client) Touch(ctx context.Context, path string, mtime time.Time) (*MultiStatus, error) { + tstr := fmt.Sprintf("%d", mtime.Unix()) + var v []RawXMLValue + for _, c := range tstr { + v = append(v, RawXMLValue{tok: xml.CharData{byte(c)}}) + } + pup := &PropertyUpdate{ + Set: []Set{ + { + Prop: Prop{ + Raw: []RawXMLValue{ + *NewRawXMLElement(xml.Name{Space: "zrok:", Local: "lastmodified"}, nil, v), + }, + }, + }, + }, + } + status, err := c.PropUpdate(ctx, path, pup) + return status, err +} + +func (c *Client) PropUpdate(ctx context.Context, path string, propupd *PropertyUpdate) (*MultiStatus, error) { + req, err := c.NewXMLRequest("PROPPATCH", path, propupd) + if err != nil { + return nil, err + } + return c.DoMultiStatus(req.WithContext(ctx)) +} + +// PropfindFlat performs a PROPFIND request with a zero depth. +func (c *Client) PropFindFlat(ctx context.Context, path string, propfind *PropFind) (*Response, error) { + ms, err := c.PropFind(ctx, path, DepthZero, propfind) + if err != nil { + return nil, err + } + + // If the client followed a redirect, the Href might be different from the request path + if len(ms.Responses) != 1 { + return nil, fmt.Errorf("PROPFIND with Depth: 0 returned %d responses", len(ms.Responses)) + } + return &ms.Responses[0], nil +} + +func parseCommaSeparatedSet(values []string, upper bool) map[string]bool { + m := make(map[string]bool) + for _, v := range values { + fields := strings.FieldsFunc(v, func(r rune) bool { + return unicode.IsSpace(r) || r == ',' + }) + for _, f := range fields { + if upper { + f = strings.ToUpper(f) + } else { + f = strings.ToLower(f) + } + m[f] = true + } + } + return m +} + +func (c *Client) Options(ctx context.Context, path string) (classes map[string]bool, methods map[string]bool, err error) { + req, err := c.NewRequest(http.MethodOptions, path, nil) + if err != nil { + return nil, nil, err + } + + resp, err := c.Do(req.WithContext(ctx)) + if err != nil { + return nil, nil, err + } + resp.Body.Close() + + classes = parseCommaSeparatedSet(resp.Header["Dav"], false) + if !classes["1"] { + return nil, nil, fmt.Errorf("webdav: server doesn't support DAV class 1") + } + + methods = parseCommaSeparatedSet(resp.Header["Allow"], true) + return classes, methods, nil +} + +// SyncCollection perform a `sync-collection` REPORT operation on a resource +func (c *Client) SyncCollection(ctx context.Context, path, syncToken string, level Depth, limit *Limit, prop *Prop) (*MultiStatus, error) { + q := SyncCollectionQuery{ + SyncToken: syncToken, + SyncLevel: level.String(), + Limit: limit, + Prop: prop, + } + + req, err := c.NewXMLRequest("REPORT", path, &q) + if err != nil { + return nil, err + } + + ms, err := c.DoMultiStatus(req.WithContext(ctx)) + if err != nil { + return nil, err + } + + return ms, nil +} diff --git a/drives/davClient/internal/elements.go b/drives/davClient/internal/elements.go new file mode 100644 index 00000000..db7d9603 --- /dev/null +++ b/drives/davClient/internal/elements.go @@ -0,0 +1,452 @@ +package internal + +import ( + "encoding/xml" + "errors" + "fmt" + "net/http" + "net/url" + "strconv" + "strings" + "time" +) + +const Namespace = "DAV:" + +var ( + ResourceTypeName = xml.Name{Namespace, "resourcetype"} + DisplayNameName = xml.Name{Namespace, "displayname"} + GetContentLengthName = xml.Name{Namespace, "getcontentlength"} + GetContentTypeName = xml.Name{Namespace, "getcontenttype"} + GetLastModifiedName = xml.Name{Namespace, "getlastmodified"} + GetETagName = xml.Name{Namespace, "getetag"} + + CurrentUserPrincipalName = xml.Name{Namespace, "current-user-principal"} +) + +type Status struct { + Code int + Text string +} + +func (s *Status) MarshalText() ([]byte, error) { + text := s.Text + if text == "" { + text = http.StatusText(s.Code) + } + return []byte(fmt.Sprintf("HTTP/1.1 %v %v", s.Code, text)), nil +} + +func (s *Status) UnmarshalText(b []byte) error { + if len(b) == 0 { + return nil + } + + parts := strings.SplitN(string(b), " ", 3) + if len(parts) != 3 { + return fmt.Errorf("webdav: invalid HTTP status %q: expected 3 fields", s) + } + code, err := strconv.Atoi(parts[1]) + if err != nil { + return fmt.Errorf("webdav: invalid HTTP status %q: failed to parse code: %v", s, err) + } + + s.Code = code + s.Text = parts[2] + return nil +} + +func (s *Status) Err() error { + if s == nil { + return nil + } + + // TODO: handle 2xx, 3xx + if s.Code != http.StatusOK { + return &HTTPError{Code: s.Code} + } + return nil +} + +type Href url.URL + +func (h *Href) String() string { + u := (*url.URL)(h) + return u.String() +} + +func (h *Href) MarshalText() ([]byte, error) { + return []byte(h.String()), nil +} + +func (h *Href) UnmarshalText(b []byte) error { + u, err := url.Parse(string(b)) + if err != nil { + return err + } + *h = Href(*u) + return nil +} + +// https://tools.ietf.org/html/rfc4918#section-14.16 +type MultiStatus struct { + XMLName xml.Name `xml:"DAV: multistatus"` + Responses []Response `xml:"response"` + ResponseDescription string `xml:"responsedescription,omitempty"` + SyncToken string `xml:"sync-token,omitempty"` +} + +func NewMultiStatus(resps ...Response) *MultiStatus { + return &MultiStatus{Responses: resps} +} + +// https://tools.ietf.org/html/rfc4918#section-14.24 +type Response struct { + XMLName xml.Name `xml:"DAV: response"` + Hrefs []Href `xml:"href"` + PropStats []PropStat `xml:"propstat,omitempty"` + ResponseDescription string `xml:"responsedescription,omitempty"` + Status *Status `xml:"status,omitempty"` + Error *Error `xml:"error,omitempty"` + Location *Location `xml:"location,omitempty"` +} + +func NewOKResponse(path string) *Response { + href := Href{Path: path} + return &Response{ + Hrefs: []Href{href}, + Status: &Status{Code: http.StatusOK}, + } +} + +func NewErrorResponse(path string, err error) *Response { + code := http.StatusInternalServerError + var httpErr *HTTPError + if errors.As(err, &httpErr) { + code = httpErr.Code + } + + var errElt *Error + errors.As(err, &errElt) + + href := Href{Path: path} + return &Response{ + Hrefs: []Href{href}, + Status: &Status{Code: code}, + ResponseDescription: err.Error(), + Error: errElt, + } +} + +func (resp *Response) Err() error { + if resp.Status == nil || resp.Status.Code/100 == 2 { + return nil + } + + var err error + if resp.Error != nil { + err = resp.Error + } + if resp.ResponseDescription != "" { + if err != nil { + err = fmt.Errorf("%v (%w)", resp.ResponseDescription, err) + } else { + err = fmt.Errorf("%v", resp.ResponseDescription) + } + } + + return &HTTPError{ + Code: resp.Status.Code, + Err: err, + } +} + +func (resp *Response) Path() (string, error) { + err := resp.Err() + var path string + if len(resp.Hrefs) == 1 { + path = resp.Hrefs[0].Path + } else if err == nil { + err = fmt.Errorf("webdav: malformed response: expected exactly one href element, got %v", len(resp.Hrefs)) + } + return path, err +} + +func (resp *Response) DecodeProp(values ...interface{}) error { + for _, v := range values { + // TODO wrap errors with more context (XML name) + name, err := valueXMLName(v) + if err != nil { + return err + } + if err := resp.Err(); err != nil { + return newPropError(name, err) + } + for _, propstat := range resp.PropStats { + raw := propstat.Prop.Get(name) + if raw == nil { + continue + } + if err := propstat.Status.Err(); err != nil { + return newPropError(name, err) + } + if err := raw.Decode(v); err != nil { + return newPropError(name, err) + } + return nil + } + return newPropError(name, &HTTPError{ + Code: http.StatusNotFound, + Err: fmt.Errorf("missing property"), + }) + } + + return nil +} + +func newPropError(name xml.Name, err error) error { + return fmt.Errorf("property <%v %v>: %w", name.Space, name.Local, err) +} + +func (resp *Response) EncodeProp(code int, v interface{}) error { + raw, err := EncodeRawXMLElement(v) + if err != nil { + return err + } + + for i := range resp.PropStats { + propstat := &resp.PropStats[i] + if propstat.Status.Code == code { + propstat.Prop.Raw = append(propstat.Prop.Raw, *raw) + return nil + } + } + + resp.PropStats = append(resp.PropStats, PropStat{ + Status: Status{Code: code}, + Prop: Prop{Raw: []RawXMLValue{*raw}}, + }) + return nil +} + +// https://tools.ietf.org/html/rfc4918#section-14.9 +type Location struct { + XMLName xml.Name `xml:"DAV: location"` + Href Href `xml:"href"` +} + +// https://tools.ietf.org/html/rfc4918#section-14.22 +type PropStat struct { + XMLName xml.Name `xml:"DAV: propstat"` + Prop Prop `xml:"prop"` + Status Status `xml:"status"` + ResponseDescription string `xml:"responsedescription,omitempty"` + Error *Error `xml:"error,omitempty"` +} + +// https://tools.ietf.org/html/rfc4918#section-14.18 +type Prop struct { + XMLName xml.Name `xml:"DAV: prop"` + Raw []RawXMLValue `xml:",any"` +} + +func EncodeProp(values ...interface{}) (*Prop, error) { + l := make([]RawXMLValue, len(values)) + for i, v := range values { + raw, err := EncodeRawXMLElement(v) + if err != nil { + return nil, err + } + l[i] = *raw + } + return &Prop{Raw: l}, nil +} + +func (p *Prop) Get(name xml.Name) *RawXMLValue { + for i := range p.Raw { + raw := &p.Raw[i] + if n, ok := raw.XMLName(); ok && name == n { + return raw + } + } + return nil +} + +func (p *Prop) Decode(v interface{}) error { + name, err := valueXMLName(v) + if err != nil { + return err + } + + raw := p.Get(name) + if raw == nil { + return HTTPErrorf(http.StatusNotFound, "missing property %s", name) + } + + return raw.Decode(v) +} + +// https://tools.ietf.org/html/rfc4918#section-14.20 +type PropFind struct { + XMLName xml.Name `xml:"DAV: propfind"` + Prop *Prop `xml:"prop,omitempty"` + AllProp *struct{} `xml:"allprop,omitempty"` + Include *Include `xml:"include,omitempty"` + PropName *struct{} `xml:"propname,omitempty"` +} + +func xmlNamesToRaw(names []xml.Name) []RawXMLValue { + l := make([]RawXMLValue, len(names)) + for i, name := range names { + l[i] = *NewRawXMLElement(name, nil, nil) + } + return l +} + +func NewPropNamePropFind(names ...xml.Name) *PropFind { + return &PropFind{Prop: &Prop{Raw: xmlNamesToRaw(names)}} +} + +// https://tools.ietf.org/html/rfc4918#section-14.8 +type Include struct { + XMLName xml.Name `xml:"DAV: include"` + Raw []RawXMLValue `xml:",any"` +} + +// https://tools.ietf.org/html/rfc4918#section-15.9 +type ResourceType struct { + XMLName xml.Name `xml:"DAV: resourcetype"` + Raw []RawXMLValue `xml:",any"` +} + +func NewResourceType(names ...xml.Name) *ResourceType { + return &ResourceType{Raw: xmlNamesToRaw(names)} +} + +func (t *ResourceType) Is(name xml.Name) bool { + for _, raw := range t.Raw { + if n, ok := raw.XMLName(); ok && name == n { + return true + } + } + return false +} + +var CollectionName = xml.Name{Namespace, "collection"} + +// https://tools.ietf.org/html/rfc4918#section-15.4 +type GetContentLength struct { + XMLName xml.Name `xml:"DAV: getcontentlength"` + Length int64 `xml:",chardata"` +} + +// https://tools.ietf.org/html/rfc4918#section-15.5 +type GetContentType struct { + XMLName xml.Name `xml:"DAV: getcontenttype"` + Type string `xml:",chardata"` +} + +type Time time.Time + +func (t *Time) UnmarshalText(b []byte) error { + tt, err := http.ParseTime(string(b)) + if err != nil { + return err + } + *t = Time(tt) + return nil +} + +func (t *Time) MarshalText() ([]byte, error) { + s := time.Time(*t).UTC().Format(http.TimeFormat) + return []byte(s), nil +} + +// https://tools.ietf.org/html/rfc4918#section-15.7 +type GetLastModified struct { + XMLName xml.Name `xml:"DAV: getlastmodified"` + LastModified Time `xml:",chardata"` +} + +// https://tools.ietf.org/html/rfc4918#section-15.6 +type GetETag struct { + XMLName xml.Name `xml:"DAV: getetag"` + ETag ETag `xml:",chardata"` +} + +type ETag string + +func (etag *ETag) UnmarshalText(b []byte) error { + s, err := strconv.Unquote(string(b)) + if err != nil { + return fmt.Errorf("webdav: failed to unquote ETag: %v", err) + } + *etag = ETag(s) + return nil +} + +func (etag ETag) MarshalText() ([]byte, error) { + return []byte(etag.String()), nil +} + +func (etag ETag) String() string { + return fmt.Sprintf("%q", string(etag)) +} + +// https://tools.ietf.org/html/rfc4918#section-14.5 +type Error struct { + XMLName xml.Name `xml:"DAV: error"` + Raw []RawXMLValue `xml:",any"` +} + +func (err *Error) Error() string { + b, _ := xml.Marshal(err) + return string(b) +} + +// https://tools.ietf.org/html/rfc4918#section-15.2 +type DisplayName struct { + XMLName xml.Name `xml:"DAV: displayname"` + Name string `xml:",chardata"` +} + +// https://tools.ietf.org/html/rfc5397#section-3 +type CurrentUserPrincipal struct { + XMLName xml.Name `xml:"DAV: current-user-principal"` + Href Href `xml:"href,omitempty"` + Unauthenticated *struct{} `xml:"unauthenticated,omitempty"` +} + +// https://tools.ietf.org/html/rfc4918#section-14.19 +type PropertyUpdate struct { + XMLName xml.Name `xml:"DAV: propertyupdate"` + Remove []Remove `xml:"remove"` + Set []Set `xml:"set"` +} + +// https://tools.ietf.org/html/rfc4918#section-14.23 +type Remove struct { + XMLName xml.Name `xml:"DAV: remove"` + Prop Prop `xml:"prop"` +} + +// https://tools.ietf.org/html/rfc4918#section-14.26 +type Set struct { + XMLName xml.Name `xml:"DAV: set"` + Prop Prop `xml:"prop"` +} + +// https://tools.ietf.org/html/rfc6578#section-6.1 +type SyncCollectionQuery struct { + XMLName xml.Name `xml:"DAV: sync-collection"` + SyncToken string `xml:"sync-token"` + Limit *Limit `xml:"limit,omitempty"` + SyncLevel string `xml:"sync-level"` + Prop *Prop `xml:"prop"` +} + +// https://tools.ietf.org/html/rfc5323#section-5.17 +type Limit struct { + XMLName xml.Name `xml:"DAV: limit"` + NResults uint `xml:"nresults"` +} diff --git a/drives/davClient/internal/internal.go b/drives/davClient/internal/internal.go new file mode 100644 index 00000000..1f3e0e52 --- /dev/null +++ b/drives/davClient/internal/internal.go @@ -0,0 +1,108 @@ +package internal // Package internal provides low-level helpers for WebDAV clients and servers. +import ( + "errors" + "fmt" + "net/http" +) + +// Depth indicates whether a request applies to the resource's members. It's +// defined in RFC 4918 section 10.2. +type Depth int + +const ( + // DepthZero indicates that the request applies only to the resource. + DepthZero Depth = 0 + // DepthOne indicates that the request applies to the resource and its + // internal members only. + DepthOne Depth = 1 + // DepthInfinity indicates that the request applies to the resource and all + // of its members. + DepthInfinity Depth = -1 +) + +// ParseDepth parses a Depth header. +func ParseDepth(s string) (Depth, error) { + switch s { + case "0": + return DepthZero, nil + case "1": + return DepthOne, nil + case "infinity": + return DepthInfinity, nil + } + return 0, fmt.Errorf("webdav: invalid Depth value") +} + +// String formats the depth. +func (d Depth) String() string { + switch d { + case DepthZero: + return "0" + case DepthOne: + return "1" + case DepthInfinity: + return "infinity" + } + panic("webdav: invalid Depth value") +} + +// ParseOverwrite parses an Overwrite header. +func ParseOverwrite(s string) (bool, error) { + switch s { + case "T": + return true, nil + case "F": + return false, nil + } + return false, fmt.Errorf("webdav: invalid Overwrite value") +} + +// FormatOverwrite formats an Overwrite header. +func FormatOverwrite(overwrite bool) string { + if overwrite { + return "T" + } else { + return "F" + } +} + +type HTTPError struct { + Code int + Err error +} + +func HTTPErrorFromError(err error) *HTTPError { + if err == nil { + return nil + } + if httpErr, ok := err.(*HTTPError); ok { + return httpErr + } else { + return &HTTPError{http.StatusInternalServerError, err} + } +} + +func IsNotFound(err error) bool { + var httpErr *HTTPError + if errors.As(err, &httpErr) { + return httpErr.Code == http.StatusNotFound + } + return false +} + +func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError { + return &HTTPError{code, fmt.Errorf(format, a...)} +} + +func (err *HTTPError) Error() string { + s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code)) + if err.Err != nil { + return fmt.Sprintf("%v: %v", s, err.Err) + } else { + return s + } +} + +func (err *HTTPError) Unwrap() error { + return err.Err +} diff --git a/drives/davClient/internal/xml.go b/drives/davClient/internal/xml.go new file mode 100644 index 00000000..2f4c61ca --- /dev/null +++ b/drives/davClient/internal/xml.go @@ -0,0 +1,175 @@ +package internal + +import ( + "encoding/xml" + "fmt" + "io" + "reflect" + "strings" +) + +// RawXMLValue is a raw XML value. It implements xml.Unmarshaler and +// xml.Marshaler and can be used to delay XML decoding or precompute an XML +// encoding. +type RawXMLValue struct { + tok xml.Token // guaranteed not to be xml.EndElement + children []RawXMLValue + + // Unfortunately encoding/xml doesn't offer TokenWriter, so we need to + // cache outgoing data. + out interface{} +} + +// NewRawXMLElement creates a new RawXMLValue for an element. +func NewRawXMLElement(name xml.Name, attr []xml.Attr, children []RawXMLValue) *RawXMLValue { + return &RawXMLValue{tok: xml.StartElement{name, attr}, children: children} +} + +// EncodeRawXMLElement encodes a value into a new RawXMLValue. The XML value +// can only be used for marshalling. +func EncodeRawXMLElement(v interface{}) (*RawXMLValue, error) { + return &RawXMLValue{out: v}, nil +} + +// UnmarshalXML implements xml.Unmarshaler. +func (val *RawXMLValue) UnmarshalXML(d *xml.Decoder, start xml.StartElement) error { + val.tok = start + val.children = nil + val.out = nil + + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok := tok.(type) { + case xml.StartElement: + child := RawXMLValue{} + if err := child.UnmarshalXML(d, tok); err != nil { + return err + } + val.children = append(val.children, child) + case xml.EndElement: + return nil + default: + val.children = append(val.children, RawXMLValue{tok: xml.CopyToken(tok)}) + } + } +} + +// MarshalXML implements xml.Marshaler. +func (val *RawXMLValue) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + if val.out != nil { + return e.Encode(val.out) + } + + switch tok := val.tok.(type) { + case xml.StartElement: + if err := e.EncodeToken(tok); err != nil { + return err + } + for _, child := range val.children { + // TODO: find a sensible value for the start argument? + if err := child.MarshalXML(e, xml.StartElement{}); err != nil { + return err + } + } + return e.EncodeToken(tok.End()) + case xml.EndElement: + panic("unexpected end element") + default: + return e.EncodeToken(tok) + } +} + +var _ xml.Marshaler = (*RawXMLValue)(nil) +var _ xml.Unmarshaler = (*RawXMLValue)(nil) + +func (val *RawXMLValue) Decode(v interface{}) error { + return xml.NewTokenDecoder(val.TokenReader()).Decode(&v) +} + +func (val *RawXMLValue) XMLName() (name xml.Name, ok bool) { + if start, ok := val.tok.(xml.StartElement); ok { + return start.Name, true + } + return xml.Name{}, false +} + +// TokenReader returns a stream of tokens for the XML value. +func (val *RawXMLValue) TokenReader() xml.TokenReader { + if val.out != nil { + panic("webdav: called RawXMLValue.TokenReader on a marshal-only XML value") + } + return &rawXMLValueReader{val: val} +} + +type rawXMLValueReader struct { + val *RawXMLValue + start, end bool + child int + childReader xml.TokenReader +} + +func (tr *rawXMLValueReader) Token() (xml.Token, error) { + if tr.end { + return nil, io.EOF + } + + start, ok := tr.val.tok.(xml.StartElement) + if !ok { + tr.end = true + return tr.val.tok, nil + } + + if !tr.start { + tr.start = true + return start, nil + } + + for tr.child < len(tr.val.children) { + if tr.childReader == nil { + tr.childReader = tr.val.children[tr.child].TokenReader() + } + + tok, err := tr.childReader.Token() + if err == io.EOF { + tr.childReader = nil + tr.child++ + } else { + return tok, err + } + } + + tr.end = true + return start.End(), nil +} + +var _ xml.TokenReader = (*rawXMLValueReader)(nil) + +func valueXMLName(v interface{}) (xml.Name, error) { + t := reflect.TypeOf(v) + for t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() != reflect.Struct { + return xml.Name{}, fmt.Errorf("webdav: %T is not a struct", v) + } + nameField, ok := t.FieldByName("XMLName") + if !ok { + return xml.Name{}, fmt.Errorf("webdav: %T is missing an XMLName struct field", v) + } + if nameField.Type != reflect.TypeOf(xml.Name{}) { + return xml.Name{}, fmt.Errorf("webdav: %T.XMLName isn't an xml.Name", v) + } + tag := nameField.Tag.Get("xml") + if tag == "" { + return xml.Name{}, fmt.Errorf(`webdav: %T.XMLName is missing an "xml" tag`, v) + } + name := strings.Split(tag, ",")[0] + nameParts := strings.Split(name, " ") + if len(nameParts) != 2 { + return xml.Name{}, fmt.Errorf("webdav: expected a namespace and local name in %T.XMLName's xml tag", v) + } + return xml.Name{nameParts[0], nameParts[1]}, nil +} diff --git a/drives/davClient/model.go b/drives/davClient/model.go new file mode 100644 index 00000000..d66627f0 --- /dev/null +++ b/drives/davClient/model.go @@ -0,0 +1,119 @@ +package davClient + +import ( + "errors" + "fmt" + "net/http" + "time" +) + +// Depth indicates whether a request applies to the resource's members. It's +// defined in RFC 4918 section 10.2. +type Depth int + +const ( + // DepthZero indicates that the request applies only to the resource. + DepthZero Depth = 0 + // DepthOne indicates that the request applies to the resource and its + // internal members only. + DepthOne Depth = 1 + // DepthInfinity indicates that the request applies to the resource and all + // of its members. + DepthInfinity Depth = -1 +) + +// ParseDepth parses a Depth header. +func ParseDepth(s string) (Depth, error) { + switch s { + case "0": + return DepthZero, nil + case "1": + return DepthOne, nil + case "infinity": + return DepthInfinity, nil + } + return 0, fmt.Errorf("webdav: invalid Depth value") +} + +// String formats the depth. +func (d Depth) String() string { + switch d { + case DepthZero: + return "0" + case DepthOne: + return "1" + case DepthInfinity: + return "infinity" + } + panic("webdav: invalid Depth value") +} + +// ParseOverwrite parses an Overwrite header. +func ParseOverwrite(s string) (bool, error) { + switch s { + case "T": + return true, nil + case "F": + return false, nil + } + return false, fmt.Errorf("webdav: invalid Overwrite value") +} + +// FormatOverwrite formats an Overwrite header. +func FormatOverwrite(overwrite bool) string { + if overwrite { + return "T" + } else { + return "F" + } +} + +type HTTPError struct { + Code int + Err error +} + +func HTTPErrorFromError(err error) *HTTPError { + if err == nil { + return nil + } + if httpErr, ok := err.(*HTTPError); ok { + return httpErr + } else { + return &HTTPError{http.StatusInternalServerError, err} + } +} + +func IsNotFound(err error) bool { + var httpErr *HTTPError + if errors.As(err, &httpErr) { + return httpErr.Code == http.StatusNotFound + } + return false +} + +func HTTPErrorf(code int, format string, a ...interface{}) *HTTPError { + return &HTTPError{code, fmt.Errorf(format, a...)} +} + +func (err *HTTPError) Error() string { + s := fmt.Sprintf("%v %v", err.Code, http.StatusText(err.Code)) + if err.Err != nil { + return fmt.Sprintf("%v: %v", s, err.Err) + } else { + return s + } +} + +func (err *HTTPError) Unwrap() error { + return err.Err +} + +type FileInfo struct { + Path string + Size int64 + ModTime time.Time + IsDir bool + MIMEType string + ETag string +} diff --git a/drives/davServer/file.go b/drives/davServer/file.go new file mode 100644 index 00000000..762e605f --- /dev/null +++ b/drives/davServer/file.go @@ -0,0 +1,855 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "context" + "encoding/xml" + "io" + "io/fs" + "net/http" + "os" + "path" + "path/filepath" + "runtime" + "strconv" + "strings" + "sync" + "time" +) + +// slashClean is equivalent to but slightly more efficient than +// path.Clean("/" + name). +func slashClean(name string) string { + if name == "" || name[0] != '/' { + name = "/" + name + } + return path.Clean(name) +} + +// A FileSystem implements access to a collection of named files. The elements +// in a file path are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +// +// Each method has the same semantics as the os package's function of the same +// name. +// +// Note that the os.Rename documentation says that "OS-specific restrictions +// might apply". In particular, whether or not renaming a file or directory +// overwriting another existing file or directory is an error is OS-dependent. +type FileSystem interface { + Mkdir(ctx context.Context, name string, perm os.FileMode) error + OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) + RemoveAll(ctx context.Context, name string) error + Rename(ctx context.Context, oldName, newName string) error + Stat(ctx context.Context, name string) (os.FileInfo, error) +} + +// A File is returned by a FileSystem's OpenFile method and can be served by a +// Handler. +// +// A File may optionally implement the DeadPropsHolder interface, if it can +// load and save dead properties. +type File interface { + http.File + io.Writer +} + +type webdavFile struct { + File + name string +} + +func (f *webdavFile) DeadProps() (map[xml.Name]Property, error) { + var ( + xmlName xml.Name + property Property + properties = make(map[xml.Name]Property) + ) + var stat fs.FileInfo + stat, err := f.Stat() + if err == nil { + xmlName.Space = "zrok:" + xmlName.Local = "lastmodified" + property.XMLName = xmlName + property.InnerXML = strconv.AppendInt(nil, stat.ModTime().Unix(), 10) + properties[xmlName] = property + } + + return properties, nil +} + +func (f *webdavFile) Patch(patches []Proppatch) ([]Propstat, error) { + var stat Propstat + stat.Status = http.StatusOK + for _, patch := range patches { + for _, prop := range patch.Props { + if prop.XMLName.Space == "zrok:" && prop.XMLName.Local == "lastmodified" { + modtimeUnix, err := strconv.ParseInt(string(prop.InnerXML), 10, 64) + if err != nil { + return nil, err + } + if err := f.updateModtime(f.name, time.Unix(modtimeUnix, 0)); err != nil { + return nil, err + } + } + } + } + return []Propstat{stat}, nil +} + +func (f *webdavFile) updateModtime(path string, modtime time.Time) error { + if err := os.Chtimes(f.name, time.Now(), modtime); err != nil { + return err + } + return nil +} + +// A Dir implements FileSystem using the native file system restricted to a +// specific directory tree. +// +// While the FileSystem.OpenFile method takes '/'-separated paths, a Dir's +// string value is a filename on the native file system, not a URL, so it is +// separated by filepath.Separator, which isn't necessarily '/'. +// +// An empty Dir is treated as ".". +type Dir string + +func (d Dir) resolve(name string) string { + // This implementation is based on Dir.Open's code in the standard net/http package. + if filepath.Separator != '/' && strings.IndexRune(name, filepath.Separator) >= 0 || + strings.Contains(name, "\x00") { + return "" + } + dir := string(d) + if dir == "" { + dir = "." + } + return filepath.Join(dir, filepath.FromSlash(slashClean(name))) +} + +func (d Dir) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + return os.Mkdir(name, perm) +} + +func (d Dir) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + f, err := os.OpenFile(name, flag, perm) + if err != nil { + return nil, err + } + return &webdavFile{f, name}, nil +} + +func (d Dir) RemoveAll(ctx context.Context, name string) error { + if name = d.resolve(name); name == "" { + return os.ErrNotExist + } + if name == filepath.Clean(string(d)) { + // Prohibit removing the virtual root directory. + return os.ErrInvalid + } + return os.RemoveAll(name) +} + +func (d Dir) Rename(ctx context.Context, oldName, newName string) error { + if oldName = d.resolve(oldName); oldName == "" { + return os.ErrNotExist + } + if newName = d.resolve(newName); newName == "" { + return os.ErrNotExist + } + if root := filepath.Clean(string(d)); root == oldName || root == newName { + // Prohibit renaming from or to the virtual root directory. + return os.ErrInvalid + } + return os.Rename(oldName, newName) +} + +func (d Dir) Stat(ctx context.Context, name string) (os.FileInfo, error) { + if name = d.resolve(name); name == "" { + return nil, os.ErrNotExist + } + return os.Stat(name) +} + +// NewMemFS returns a new in-memory FileSystem implementation. +func NewMemFS() FileSystem { + return &memFS{ + root: memFSNode{ + children: make(map[string]*memFSNode), + mode: 0660 | os.ModeDir, + modTime: time.Now(), + }, + } +} + +// A memFS implements FileSystem, storing all metadata and actual file data +// in-memory. No limits on filesystem size are used, so it is not recommended +// this be used where the clients are untrusted. +// +// Concurrent access is permitted. The tree structure is protected by a mutex, +// and each node's contents and metadata are protected by a per-node mutex. +// +// TODO: Enforce file permissions. +type memFS struct { + mu sync.Mutex + root memFSNode +} + +// TODO: clean up and rationalize the walk/find code. + +// walk walks the directory tree for the fullname, calling f at each step. If f +// returns an error, the walk will be aborted and return that same error. +// +// dir is the directory at that step, frag is the name fragment, and final is +// whether it is the final step. For example, walking "/foo/bar/x" will result +// in 3 calls to f: +// - "/", "foo", false +// - "/foo/", "bar", false +// - "/foo/bar/", "x", true +// +// The frag argument will be empty only if dir is the root node and the walk +// ends at that root node. +func (fs *memFS) walk(op, fullname string, f func(dir *memFSNode, frag string, final bool) error) error { + original := fullname + fullname = slashClean(fullname) + + // Strip any leading "/"s to make fullname a relative path, as the walk + // starts at fs.root. + if fullname[0] == '/' { + fullname = fullname[1:] + } + dir := &fs.root + + for { + frag, remaining := fullname, "" + i := strings.IndexRune(fullname, '/') + final := i < 0 + if !final { + frag, remaining = fullname[:i], fullname[i+1:] + } + if frag == "" && dir != &fs.root { + panic("webdav: empty path fragment for a clean path") + } + if err := f(dir, frag, final); err != nil { + return &os.PathError{ + Op: op, + Path: original, + Err: err, + } + } + if final { + break + } + child := dir.children[frag] + if child == nil { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrNotExist, + } + } + if !child.mode.IsDir() { + return &os.PathError{ + Op: op, + Path: original, + Err: os.ErrInvalid, + } + } + dir, fullname = child, remaining + } + return nil +} + +// find returns the parent of the named node and the relative name fragment +// from the parent to the child. For example, if finding "/foo/bar/baz" then +// parent will be the node for "/foo/bar" and frag will be "baz". +// +// If the fullname names the root node, then parent, frag and err will be zero. +// +// find returns an error if the parent does not already exist or the parent +// isn't a directory, but it will not return an error per se if the child does +// not already exist. The error returned is either nil or an *os.PathError +// whose Op is op. +func (fs *memFS) find(op, fullname string) (parent *memFSNode, frag string, err error) { + err = fs.walk(op, fullname, func(parent0 *memFSNode, frag0 string, final bool) error { + if !final { + return nil + } + if frag0 != "" { + parent, frag = parent0, frag0 + } + return nil + }) + return parent, frag, err +} + +func (fs *memFS) Mkdir(ctx context.Context, name string, perm os.FileMode) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("mkdir", name) + if err != nil { + return err + } + if dir == nil { + // We can't create the root. + return os.ErrInvalid + } + if _, ok := dir.children[frag]; ok { + return os.ErrExist + } + dir.children[frag] = &memFSNode{ + children: make(map[string]*memFSNode), + mode: perm.Perm() | os.ModeDir, + modTime: time.Now(), + } + return nil +} + +func (fs *memFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("open", name) + if err != nil { + return nil, err + } + var n *memFSNode + if dir == nil { + // We're opening the root. + if runtime.GOOS == "zos" { + if flag&os.O_WRONLY != 0 { + return nil, os.ErrPermission + } + } else { + if flag&(os.O_WRONLY|os.O_RDWR) != 0 { + return nil, os.ErrPermission + } + } + n, frag = &fs.root, "/" + + } else { + n = dir.children[frag] + if flag&(os.O_SYNC|os.O_APPEND) != 0 { + // memFile doesn't support these flags yet. + return nil, os.ErrInvalid + } + if flag&os.O_CREATE != 0 { + if flag&os.O_EXCL != 0 && n != nil { + return nil, os.ErrExist + } + if n == nil { + n = &memFSNode{ + mode: perm.Perm(), + } + dir.children[frag] = n + } + } + if n == nil { + return nil, os.ErrNotExist + } + if flag&(os.O_WRONLY|os.O_RDWR) != 0 && flag&os.O_TRUNC != 0 { + n.mu.Lock() + n.data = nil + n.mu.Unlock() + } + } + + children := make([]os.FileInfo, 0, len(n.children)) + for cName, c := range n.children { + children = append(children, c.stat(cName)) + } + return &memFile{ + n: n, + nameSnapshot: frag, + childrenSnapshot: children, + }, nil +} + +func (fs *memFS) RemoveAll(ctx context.Context, name string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("remove", name) + if err != nil { + return err + } + if dir == nil { + // We can't remove the root. + return os.ErrInvalid + } + delete(dir.children, frag) + return nil +} + +func (fs *memFS) Rename(ctx context.Context, oldName, newName string) error { + fs.mu.Lock() + defer fs.mu.Unlock() + + oldName = slashClean(oldName) + newName = slashClean(newName) + if oldName == newName { + return nil + } + if strings.HasPrefix(newName, oldName+"/") { + // We can't rename oldName to be a sub-directory of itself. + return os.ErrInvalid + } + + oDir, oFrag, err := fs.find("rename", oldName) + if err != nil { + return err + } + if oDir == nil { + // We can't rename from the root. + return os.ErrInvalid + } + + nDir, nFrag, err := fs.find("rename", newName) + if err != nil { + return err + } + if nDir == nil { + // We can't rename to the root. + return os.ErrInvalid + } + + oNode, ok := oDir.children[oFrag] + if !ok { + return os.ErrNotExist + } + if oNode.children != nil { + if nNode, ok := nDir.children[nFrag]; ok { + if nNode.children == nil { + return errNotADirectory + } + if len(nNode.children) != 0 { + return errDirectoryNotEmpty + } + } + } + delete(oDir.children, oFrag) + nDir.children[nFrag] = oNode + return nil +} + +func (fs *memFS) Stat(ctx context.Context, name string) (os.FileInfo, error) { + fs.mu.Lock() + defer fs.mu.Unlock() + + dir, frag, err := fs.find("stat", name) + if err != nil { + return nil, err + } + if dir == nil { + // We're stat'ting the root. + return fs.root.stat("/"), nil + } + if n, ok := dir.children[frag]; ok { + return n.stat(path.Base(name)), nil + } + return nil, os.ErrNotExist +} + +// A memFSNode represents a single entry in the in-memory filesystem and also +// implements os.FileInfo. +type memFSNode struct { + // children is protected by memFS.mu. + children map[string]*memFSNode + + mu sync.Mutex + data []byte + mode os.FileMode + modTime time.Time + deadProps map[xml.Name]Property +} + +func (n *memFSNode) stat(name string) *memFileInfo { + n.mu.Lock() + defer n.mu.Unlock() + return &memFileInfo{ + name: name, + size: int64(len(n.data)), + mode: n.mode, + modTime: n.modTime, + } +} + +func (n *memFSNode) DeadProps() (map[xml.Name]Property, error) { + n.mu.Lock() + defer n.mu.Unlock() + if len(n.deadProps) == 0 { + return nil, nil + } + ret := make(map[xml.Name]Property, len(n.deadProps)) + for k, v := range n.deadProps { + ret[k] = v + } + return ret, nil +} + +func (n *memFSNode) Patch(patches []Proppatch) ([]Propstat, error) { + n.mu.Lock() + defer n.mu.Unlock() + pstat := Propstat{Status: http.StatusOK} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + if patch.Remove { + delete(n.deadProps, p.XMLName) + continue + } + if n.deadProps == nil { + n.deadProps = map[xml.Name]Property{} + } + n.deadProps[p.XMLName] = p + } + } + return []Propstat{pstat}, nil +} + +type memFileInfo struct { + name string + size int64 + mode os.FileMode + modTime time.Time +} + +func (f *memFileInfo) Name() string { return f.name } +func (f *memFileInfo) Size() int64 { return f.size } +func (f *memFileInfo) Mode() os.FileMode { return f.mode } +func (f *memFileInfo) ModTime() time.Time { return f.modTime } +func (f *memFileInfo) IsDir() bool { return f.mode.IsDir() } +func (f *memFileInfo) Sys() interface{} { return nil } + +// A memFile is a File implementation for a memFSNode. It is a per-file (not +// per-node) read/write position, and a snapshot of the memFS' tree structure +// (a node's name and children) for that node. +type memFile struct { + n *memFSNode + nameSnapshot string + childrenSnapshot []os.FileInfo + // pos is protected by n.mu. + pos int +} + +// A *memFile implements the optional DeadPropsHolder interface. +var _ DeadPropsHolder = (*memFile)(nil) + +func (f *memFile) DeadProps() (map[xml.Name]Property, error) { return f.n.DeadProps() } +func (f *memFile) Patch(patches []Proppatch) ([]Propstat, error) { return f.n.Patch(patches) } + +func (f *memFile) Close() error { + return nil +} + +func (f *memFile) Read(p []byte) (int, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos >= len(f.n.data) { + return 0, io.EOF + } + n := copy(p, f.n.data[f.pos:]) + f.pos += n + return n, nil +} + +func (f *memFile) Readdir(count int) ([]os.FileInfo, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + if !f.n.mode.IsDir() { + return nil, os.ErrInvalid + } + old := f.pos + if old >= len(f.childrenSnapshot) { + // The os.File Readdir docs say that at the end of a directory, + // the error is io.EOF if count > 0 and nil if count <= 0. + if count > 0 { + return nil, io.EOF + } + return nil, nil + } + if count > 0 { + f.pos += count + if f.pos > len(f.childrenSnapshot) { + f.pos = len(f.childrenSnapshot) + } + } else { + f.pos = len(f.childrenSnapshot) + old = 0 + } + return f.childrenSnapshot[old:f.pos], nil +} + +func (f *memFile) Seek(offset int64, whence int) (int64, error) { + f.n.mu.Lock() + defer f.n.mu.Unlock() + npos := f.pos + // TODO: How to handle offsets greater than the size of system int? + switch whence { + case io.SeekStart: + npos = int(offset) + case io.SeekCurrent: + npos += int(offset) + case io.SeekEnd: + npos = len(f.n.data) + int(offset) + default: + npos = -1 + } + if npos < 0 { + return 0, os.ErrInvalid + } + f.pos = npos + return int64(f.pos), nil +} + +func (f *memFile) Stat() (os.FileInfo, error) { + return f.n.stat(f.nameSnapshot), nil +} + +func (f *memFile) Write(p []byte) (int, error) { + lenp := len(p) + f.n.mu.Lock() + defer f.n.mu.Unlock() + + if f.n.mode.IsDir() { + return 0, os.ErrInvalid + } + if f.pos < len(f.n.data) { + n := copy(f.n.data[f.pos:], p) + f.pos += n + p = p[n:] + } else if f.pos > len(f.n.data) { + // Write permits the creation of holes, if we've seek'ed past the + // existing end of file. + if f.pos <= cap(f.n.data) { + oldLen := len(f.n.data) + f.n.data = f.n.data[:f.pos] + hole := f.n.data[oldLen:] + for i := range hole { + hole[i] = 0 + } + } else { + d := make([]byte, f.pos, f.pos+len(p)) + copy(d, f.n.data) + f.n.data = d + } + } + + if len(p) > 0 { + // We should only get here if f.pos == len(f.n.data). + f.n.data = append(f.n.data, p...) + f.pos = len(f.n.data) + } + f.n.modTime = time.Now() + return lenp, nil +} + +// moveFiles moves files and/or directories from src to dst. +// +// See section 9.9.4 for when various HTTP status codes apply. +func moveFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool) (status int, err error) { + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if !os.IsNotExist(err) { + return http.StatusForbidden, err + } + created = true + } else if overwrite { + // Section 9.9.3 says that "If a resource exists at the destination + // and the Overwrite header is "T", then prior to performing the move, + // the server must perform a DELETE with "Depth: infinity" on the + // destination resource. + if err := fs.RemoveAll(ctx, dst); err != nil { + return http.StatusForbidden, err + } + } else { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.Rename(ctx, src, dst); err != nil { + return http.StatusForbidden, err + } + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +func copyProps(dst, src File) error { + d, ok := dst.(DeadPropsHolder) + if !ok { + return nil + } + s, ok := src.(DeadPropsHolder) + if !ok { + return nil + } + m, err := s.DeadProps() + if err != nil { + return err + } + props := make([]Property, 0, len(m)) + for _, prop := range m { + props = append(props, prop) + } + _, err = d.Patch([]Proppatch{{Props: props}}) + return err +} + +// copyFiles copies files and/or directories from src to dst. +// +// See section 9.8.5 for when various HTTP status codes apply. +func copyFiles(ctx context.Context, fs FileSystem, src, dst string, overwrite bool, depth int, recursion int) (status int, err error) { + if recursion == 1000 { + return http.StatusInternalServerError, errRecursionTooDeep + } + recursion++ + + // TODO: section 9.8.3 says that "Note that an infinite-depth COPY of /A/ + // into /A/B/ could lead to infinite recursion if not handled correctly." + + srcFile, err := fs.OpenFile(ctx, src, os.O_RDONLY, 0) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + defer srcFile.Close() + srcStat, err := srcFile.Stat() + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusInternalServerError, err + } + srcPerm := srcStat.Mode() & os.ModePerm + + created := false + if _, err := fs.Stat(ctx, dst); err != nil { + if os.IsNotExist(err) { + created = true + } else { + return http.StatusForbidden, err + } + } else { + if !overwrite { + return http.StatusPreconditionFailed, os.ErrExist + } + if err := fs.RemoveAll(ctx, dst); err != nil && !os.IsNotExist(err) { + return http.StatusForbidden, err + } + } + + if srcStat.IsDir() { + if err := fs.Mkdir(ctx, dst, srcPerm); err != nil { + return http.StatusForbidden, err + } + if depth == infiniteDepth { + children, err := srcFile.Readdir(-1) + if err != nil { + return http.StatusForbidden, err + } + for _, c := range children { + name := c.Name() + s := path.Join(src, name) + d := path.Join(dst, name) + cStatus, cErr := copyFiles(ctx, fs, s, d, overwrite, depth, recursion) + if cErr != nil { + // TODO: MultiStatus. + return cStatus, cErr + } + } + } + + } else { + dstFile, err := fs.OpenFile(ctx, dst, os.O_RDWR|os.O_CREATE|os.O_TRUNC, srcPerm) + if err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusForbidden, err + + } + _, copyErr := io.Copy(dstFile, srcFile) + propsErr := copyProps(dstFile, srcFile) + closeErr := dstFile.Close() + if copyErr != nil { + return http.StatusInternalServerError, copyErr + } + if propsErr != nil { + return http.StatusInternalServerError, propsErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + } + + if created { + return http.StatusCreated, nil + } + return http.StatusNoContent, nil +} + +// walkFS traverses filesystem fs starting at name up to depth levels. +// +// Allowed values for depth are 0, 1 or infiniteDepth. For each visited node, +// walkFS calls walkFn. If a visited file system node is a directory and +// walkFn returns filepath.SkipDir, walkFS will skip traversal of this node. +func walkFS(ctx context.Context, fs FileSystem, depth int, name string, info os.FileInfo, walkFn filepath.WalkFunc) error { + // This implementation is based on Walk's code in the standard path/filepath package. + err := walkFn(name, info, nil) + if err != nil { + if info.IsDir() && err == filepath.SkipDir { + return nil + } + return err + } + if !info.IsDir() || depth == 0 { + return nil + } + if depth == 1 { + depth = 0 + } + + // Read directory names. + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return walkFn(name, info, err) + } + fileInfos, err := f.Readdir(0) + f.Close() + if err != nil { + return walkFn(name, info, err) + } + + for _, fileInfo := range fileInfos { + filename := path.Join(name, fileInfo.Name()) + fileInfo, err := fs.Stat(ctx, filename) + if err != nil { + if err := walkFn(filename, fileInfo, err); err != nil && err != filepath.SkipDir { + return err + } + } else { + err = walkFS(ctx, fs, depth, filename, fileInfo, walkFn) + if err != nil { + if !fileInfo.IsDir() || err != filepath.SkipDir { + return err + } + } + } + } + return nil +} diff --git a/drives/davServer/file_test.go b/drives/davServer/file_test.go new file mode 100644 index 00000000..c935d5a6 --- /dev/null +++ b/drives/davServer/file_test.go @@ -0,0 +1,1183 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "context" + "encoding/xml" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "path/filepath" + "reflect" + "runtime" + "sort" + "strconv" + "strings" + "testing" +) + +func TestSlashClean(t *testing.T) { + testCases := []string{ + "", + ".", + "/", + "/./", + "//", + "//.", + "//a", + "/a", + "/a/b/c", + "/a//b/./../c/d/", + "a", + "a/b/c", + } + for _, tc := range testCases { + got := slashClean(tc) + want := path.Clean("/" + tc) + if got != want { + t.Errorf("tc=%q: got %q, want %q", tc, got, want) + } + } +} + +func TestDirResolve(t *testing.T) { + testCases := []struct { + dir, name, want string + }{ + {"/", "", "/"}, + {"/", "/", "/"}, + {"/", ".", "/"}, + {"/", "./a", "/a"}, + {"/", "..", "/"}, + {"/", "..", "/"}, + {"/", "../", "/"}, + {"/", "../.", "/"}, + {"/", "../a", "/a"}, + {"/", "../..", "/"}, + {"/", "../bar/a", "/bar/a"}, + {"/", "../baz/a", "/baz/a"}, + {"/", "...", "/..."}, + {"/", ".../a", "/.../a"}, + {"/", ".../..", "/"}, + {"/", "a", "/a"}, + {"/", "a/./b", "/a/b"}, + {"/", "a/../../b", "/b"}, + {"/", "a/../b", "/b"}, + {"/", "a/b", "/a/b"}, + {"/", "a/b/c/../../d", "/a/d"}, + {"/", "a/b/c/../../../d", "/d"}, + {"/", "a/b/c/../../../../d", "/d"}, + {"/", "a/b/c/d", "/a/b/c/d"}, + + {"/foo/bar", "", "/foo/bar"}, + {"/foo/bar", "/", "/foo/bar"}, + {"/foo/bar", ".", "/foo/bar"}, + {"/foo/bar", "./a", "/foo/bar/a"}, + {"/foo/bar", "..", "/foo/bar"}, + {"/foo/bar", "../", "/foo/bar"}, + {"/foo/bar", "../.", "/foo/bar"}, + {"/foo/bar", "../a", "/foo/bar/a"}, + {"/foo/bar", "../..", "/foo/bar"}, + {"/foo/bar", "../bar/a", "/foo/bar/bar/a"}, + {"/foo/bar", "../baz/a", "/foo/bar/baz/a"}, + {"/foo/bar", "...", "/foo/bar/..."}, + {"/foo/bar", ".../a", "/foo/bar/.../a"}, + {"/foo/bar", ".../..", "/foo/bar"}, + {"/foo/bar", "a", "/foo/bar/a"}, + {"/foo/bar", "a/./b", "/foo/bar/a/b"}, + {"/foo/bar", "a/../../b", "/foo/bar/b"}, + {"/foo/bar", "a/../b", "/foo/bar/b"}, + {"/foo/bar", "a/b", "/foo/bar/a/b"}, + {"/foo/bar", "a/b/c/../../d", "/foo/bar/a/d"}, + {"/foo/bar", "a/b/c/../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/../../../../d", "/foo/bar/d"}, + {"/foo/bar", "a/b/c/d", "/foo/bar/a/b/c/d"}, + + {"/foo/bar/", "", "/foo/bar"}, + {"/foo/bar/", "/", "/foo/bar"}, + {"/foo/bar/", ".", "/foo/bar"}, + {"/foo/bar/", "./a", "/foo/bar/a"}, + {"/foo/bar/", "..", "/foo/bar"}, + + {"/foo//bar///", "", "/foo/bar"}, + {"/foo//bar///", "/", "/foo/bar"}, + {"/foo//bar///", ".", "/foo/bar"}, + {"/foo//bar///", "./a", "/foo/bar/a"}, + {"/foo//bar///", "..", "/foo/bar"}, + + {"/x/y/z", "ab/c\x00d/ef", ""}, + + {".", "", "."}, + {".", "/", "."}, + {".", ".", "."}, + {".", "./a", "a"}, + {".", "..", "."}, + {".", "..", "."}, + {".", "../", "."}, + {".", "../.", "."}, + {".", "../a", "a"}, + {".", "../..", "."}, + {".", "../bar/a", "bar/a"}, + {".", "../baz/a", "baz/a"}, + {".", "...", "..."}, + {".", ".../a", ".../a"}, + {".", ".../..", "."}, + {".", "a", "a"}, + {".", "a/./b", "a/b"}, + {".", "a/../../b", "b"}, + {".", "a/../b", "b"}, + {".", "a/b", "a/b"}, + {".", "a/b/c/../../d", "a/d"}, + {".", "a/b/c/../../../d", "d"}, + {".", "a/b/c/../../../../d", "d"}, + {".", "a/b/c/d", "a/b/c/d"}, + + {"", "", "."}, + {"", "/", "."}, + {"", ".", "."}, + {"", "./a", "a"}, + {"", "..", "."}, + } + + for _, tc := range testCases { + d := Dir(filepath.FromSlash(tc.dir)) + if got := filepath.ToSlash(d.resolve(tc.name)); got != tc.want { + t.Errorf("dir=%q, name=%q: got %q, want %q", tc.dir, tc.name, got, tc.want) + } + } +} + +func TestWalk(t *testing.T) { + type walkStep struct { + name, frag string + final bool + } + + testCases := []struct { + dir string + want []walkStep + }{ + {"", []walkStep{ + {"", "", true}, + }}, + {"/", []walkStep{ + {"", "", true}, + }}, + {"/a", []walkStep{ + {"", "a", true}, + }}, + {"/a/", []walkStep{ + {"", "a", true}, + }}, + {"/a/b", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/", []walkStep{ + {"", "a", false}, + {"a", "b", true}, + }}, + {"/a/b/c", []walkStep{ + {"", "a", false}, + {"a", "b", false}, + {"b", "c", true}, + }}, + // The following test case is the one mentioned explicitly + // in the method description. + {"/foo/bar/x", []walkStep{ + {"", "foo", false}, + {"foo", "bar", false}, + {"bar", "x", true}, + }}, + } + + ctx := context.Background() + + for _, tc := range testCases { + fs := NewMemFS().(*memFS) + + parts := strings.Split(tc.dir, "/") + for p := 2; p < len(parts); p++ { + d := strings.Join(parts[:p], "/") + if err := fs.Mkdir(ctx, d, 0666); err != nil { + t.Errorf("tc.dir=%q: mkdir: %q: %v", tc.dir, d, err) + } + } + + i, prevFrag := 0, "" + err := fs.walk("test", tc.dir, func(dir *memFSNode, frag string, final bool) error { + got := walkStep{ + name: prevFrag, + frag: frag, + final: final, + } + want := tc.want[i] + + if got != want { + return fmt.Errorf("got %+v, want %+v", got, want) + } + i, prevFrag = i+1, frag + return nil + }) + if err != nil { + t.Errorf("tc.dir=%q: %v", tc.dir, err) + } + } +} + +// find appends to ss the names of the named file and its children. It is +// analogous to the Unix find command. +// +// The returned strings are not guaranteed to be in any particular order. +func find(ctx context.Context, ss []string, fs FileSystem, name string) ([]string, error) { + stat, err := fs.Stat(ctx, name) + if err != nil { + return nil, err + } + ss = append(ss, name) + if stat.IsDir() { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + return nil, err + } + for _, c := range children { + ss, err = find(ctx, ss, fs, path.Join(name, c.Name())) + if err != nil { + return nil, err + } + } + } + return ss, nil +} + +func testFS(t *testing.T, fs FileSystem) { + errStr := func(err error) string { + switch { + case os.IsExist(err): + return "errExist" + case os.IsNotExist(err): + return "errNotExist" + case err != nil: + return "err" + } + return "ok" + } + + // The non-"find" non-"stat" test cases should change the file system state. The + // indentation of the "find"s and "stat"s helps distinguish such test cases. + testCases := []string{ + " stat / want dir", + " stat /a want errNotExist", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + "create /a A want ok", + " stat /a want 1", + "create /d/e EEE want errNotExist", + "mk-dir /a want errExist", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + " stat /d want dir", + "create /d/e EEE want ok", + " stat /d/e want 3", + " find / /a /d /d/e", + "create /d/f FFFF want ok", + "create /d/g GGGGGGG want ok", + "mk-dir /d/m want ok", + "mk-dir /d/m want errExist", + "create /d/m/p PPPPP want ok", + " stat /d/e want 3", + " stat /d/f want 4", + " stat /d/g want 7", + " stat /d/h want errNotExist", + " stat /d/m want dir", + " stat /d/m/p want 5", + " find / /a /d /d/e /d/f /d/g /d/m /d/m/p", + "rm-all /d want ok", + " stat /a want 1", + " stat /d want errNotExist", + " stat /d/e want errNotExist", + " stat /d/f want errNotExist", + " stat /d/g want errNotExist", + " stat /d/m want errNotExist", + " stat /d/m/p want errNotExist", + " find / /a", + "mk-dir /d/m want errNotExist", + "mk-dir /d want ok", + "create /d/f FFFF want ok", + "rm-all /d/f want ok", + "mk-dir /d/m want ok", + "rm-all /z want ok", + "rm-all / want err", + "create /b BB want ok", + " stat / want dir", + " stat /a want 1", + " stat /b want 2", + " stat /c want errNotExist", + " stat /d want dir", + " stat /d/m want dir", + " find / /a /b /d /d/m", + "move__ o=F /b /c want ok", + " stat /b want errNotExist", + " stat /c want 2", + " stat /d/m want dir", + " stat /d/n want errNotExist", + " find / /a /c /d /d/m", + "move__ o=F /d/m /d/n want ok", + "create /d/n/q QQQQ want ok", + " stat /d/m want errNotExist", + " stat /d/n want dir", + " stat /d/n/q want 4", + "move__ o=F /d /d/n/z want err", + "move__ o=T /c /d/n/q want ok", + " stat /c want errNotExist", + " stat /d/n/q want 2", + " find / /a /d /d/n /d/n/q", + "create /d/n/r RRRRR want ok", + "mk-dir /u want ok", + "mk-dir /u/v want ok", + "move__ o=F /d/n /u want errExist", + "create /t TTTTTT want ok", + "move__ o=F /d/n /t want errExist", + "rm-all /t want ok", + "move__ o=F /d/n /t want ok", + " stat /d want dir", + " stat /d/n want errNotExist", + " stat /d/n/r want errNotExist", + " stat /t want dir", + " stat /t/q want 2", + " stat /t/r want 5", + " find / /a /d /t /t/q /t/r /u /u/v", + "move__ o=F /t / want errExist", + "move__ o=T /t /u/v want ok", + " stat /u/v/r want 5", + "move__ o=F / /z want err", + " find / /a /d /u /u/v /u/v/q /u/v/r", + " stat /a want 1", + " stat /b want errNotExist", + " stat /c want errNotExist", + " stat /u/v/r want 5", + "copy__ o=F d=0 /a /b want ok", + "copy__ o=T d=0 /a /c want ok", + " stat /a want 1", + " stat /b want 1", + " stat /c want 1", + " stat /u/v/r want 5", + "copy__ o=F d=0 /u/v/r /b want errExist", + " stat /b want 1", + "copy__ o=T d=0 /u/v/r /b want ok", + " stat /a want 1", + " stat /b want 5", + " stat /u/v/r want 5", + "rm-all /a want ok", + "rm-all /b want ok", + "mk-dir /u/v/w want ok", + "create /u/v/w/s SSSSSSSS want ok", + " stat /d want dir", + " stat /d/x want errNotExist", + " stat /d/y want errNotExist", + " stat /u/v/r want 5", + " stat /u/v/w/s want 8", + " find / /c /d /u /u/v /u/v/q /u/v/r /u/v/w /u/v/w/s", + "copy__ o=T d=0 /u/v /d/x want ok", + "copy__ o=T d=∞ /u/v /d/y want ok", + "rm-all /u want ok", + " stat /d/x want dir", + " stat /d/x/q want errNotExist", + " stat /d/x/r want errNotExist", + " stat /d/x/w want errNotExist", + " stat /d/x/w/s want errNotExist", + " stat /d/y want dir", + " stat /d/y/q want 2", + " stat /d/y/r want 5", + " stat /d/y/w want dir", + " stat /d/y/w/s want 8", + " stat /u want errNotExist", + " find / /c /d /d/x /d/y /d/y/q /d/y/r /d/y/w /d/y/w/s", + "copy__ o=F d=∞ /d/y /d/x want errExist", + } + + ctx := context.Background() + + for i, tc := range testCases { + tc = strings.TrimSpace(tc) + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create": + parts := strings.Split(arg, " ") + if len(parts) != 4 || parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid write", i, tc) + } + f, opErr := fs.OpenFile(ctx, parts[0], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if got := errStr(opErr); got != parts[3] { + t.Fatalf("test case #%d %q: OpenFile: got %q (%v), want %q", i, tc, got, opErr, parts[3]) + } + if f != nil { + if _, err := f.Write([]byte(parts[1])); err != nil { + t.Fatalf("test case #%d %q: Write: %v", i, tc, err) + } + if err := f.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + } + + case "find": + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Fatalf("test case #%d %q: find: %v", i, tc, err) + } + sort.Strings(got) + want := strings.Split(arg, " ") + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %s\nwant %s", i, tc, got, want) + } + + case "copy__", "mk-dir", "move__", "rm-all", "stat": + nParts := 3 + switch op { + case "copy__": + nParts = 6 + case "move__": + nParts = 5 + } + parts := strings.Split(arg, " ") + if len(parts) != nParts { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + + got, opErr := "", error(nil) + switch op { + case "copy__": + depth := 0 + if parts[1] == "d=∞" { + depth = infiniteDepth + } + _, opErr = copyFiles(ctx, fs, parts[2], parts[3], parts[0] == "o=T", depth, 0) + case "mk-dir": + opErr = fs.Mkdir(ctx, parts[0], 0777) + case "move__": + _, opErr = moveFiles(ctx, fs, parts[1], parts[2], parts[0] == "o=T") + case "rm-all": + opErr = fs.RemoveAll(ctx, parts[0]) + case "stat": + var stat os.FileInfo + fileName := parts[0] + if stat, opErr = fs.Stat(ctx, fileName); opErr == nil { + if stat.IsDir() { + got = "dir" + } else { + got = strconv.Itoa(int(stat.Size())) + } + + if fileName == "/" { + // For a Dir FileSystem, the virtual file system root maps to a + // real file system name like "/tmp/webdav-test012345", which does + // not end with "/". We skip such cases. + } else if statName := stat.Name(); path.Base(fileName) != statName { + t.Fatalf("test case #%d %q: file name %q inconsistent with stat name %q", + i, tc, fileName, statName) + } + } + } + if got == "" { + got = errStr(opErr) + } + + if parts[len(parts)-2] != "want" { + t.Fatalf("test case #%d %q: invalid %s", i, tc, op) + } + if want := parts[len(parts)-1]; got != want { + t.Fatalf("test case #%d %q: got %q (%v), want %q", i, tc, got, opErr, want) + } + } + } +} + +func TestDir(t *testing.T) { + switch runtime.GOOS { + case "nacl": + t.Skip("see golang.org/issue/12004") + case "plan9": + t.Skip("see golang.org/issue/11453") + } + + td, err := ioutil.TempDir("", "webdav-test") + if err != nil { + t.Fatal(err) + } + defer os.RemoveAll(td) + testFS(t, Dir(td)) +} + +func TestMemFS(t *testing.T) { + testFS(t, NewMemFS()) +} + +func TestMemFSRoot(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + for i := 0; i < 5; i++ { + stat, err := fs.Stat(ctx, "/") + if err != nil { + t.Fatalf("i=%d: Stat: %v", i, err) + } + if !stat.IsDir() { + t.Fatalf("i=%d: Stat.IsDir is false, want true", i) + } + + f, err := fs.OpenFile(ctx, "/", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("i=%d: OpenFile: %v", i, err) + } + defer f.Close() + children, err := f.Readdir(-1) + if err != nil { + t.Fatalf("i=%d: Readdir: %v", i, err) + } + if len(children) != i { + t.Fatalf("i=%d: got %d children, want %d", i, len(children), i) + } + + if _, err := f.Write(make([]byte, 1)); err == nil { + t.Fatalf("i=%d: Write: got nil error, want non-nil", i) + } + + if err := fs.Mkdir(ctx, fmt.Sprintf("/dir%d", i), 0777); err != nil { + t.Fatalf("i=%d: Mkdir: %v", i, err) + } + } +} + +func TestMemFileReaddir(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + if err := fs.Mkdir(ctx, "/foo", 0777); err != nil { + t.Fatalf("Mkdir: %v", err) + } + readdir := func(count int) ([]os.FileInfo, error) { + f, err := fs.OpenFile(ctx, "/foo", os.O_RDONLY, 0) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + return f.Readdir(count) + } + if got, err := readdir(-1); len(got) != 0 || err != nil { + t.Fatalf("readdir(-1): got %d fileInfos with err=%v, want 0, ", len(got), err) + } + if got, err := readdir(+1); len(got) != 0 || err != io.EOF { + t.Fatalf("readdir(+1): got %d fileInfos with err=%v, want 0, EOF", len(got), err) + } +} + +func TestMemFile(t *testing.T) { + testCases := []string{ + "wantData ", + "wantSize 0", + "write abc", + "wantData abc", + "write de", + "wantData abcde", + "wantSize 5", + "write 5*x", + "write 4*y+2*z", + "write 3*st", + "wantData abcdexxxxxyyyyzzststst", + "wantSize 22", + "seek set 4 want 4", + "write EFG", + "wantData abcdEFGxxxyyyyzzststst", + "wantSize 22", + "seek set 2 want 2", + "read cdEF", + "read Gx", + "seek cur 0 want 8", + "seek cur 2 want 10", + "seek cur -1 want 9", + "write J", + "wantData abcdEFGxxJyyyyzzststst", + "wantSize 22", + "seek cur -4 want 6", + "write ghijk", + "wantData abcdEFghijkyyyzzststst", + "wantSize 22", + "read yyyz", + "seek cur 0 want 15", + "write ", + "seek cur 0 want 15", + "read ", + "seek cur 0 want 15", + "seek end -3 want 19", + "write ZZ", + "wantData abcdEFghijkyyyzzstsZZt", + "wantSize 22", + "write 4*A", + "wantData abcdEFghijkyyyzzstsZZAAAA", + "wantSize 25", + "seek end 0 want 25", + "seek end -5 want 20", + "read Z+4*A", + "write 5*B", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB", + "wantSize 30", + "seek end 10 want 40", + "write C", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........C", + "wantSize 41", + "write D", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD", + "wantSize 42", + "seek set 43 want 43", + "write E", + "wantData abcdEFghijkyyyzzstsZZAAAABBBBB..........CD.E", + "wantSize 44", + "seek set 0 want 0", + "write 5*123456789_", + "wantData 123456789_123456789_123456789_123456789_123456789_", + "wantSize 50", + "seek cur 0 want 50", + "seek cur -99 want err", + } + + ctx := context.Background() + + const filename = "/foo" + fs := NewMemFS() + f, err := fs.OpenFile(ctx, filename, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + + // Expand an arg like "3*a+2*b" to "aaabb". + parts := strings.Split(arg, "+") + for j, part := range parts { + if k := strings.IndexByte(part, '*'); k >= 0 { + repeatCount, repeatStr := part[:k], part[k+1:] + n, err := strconv.Atoi(repeatCount) + if err != nil { + t.Fatalf("test case #%d %q: invalid repeat count %q", i, tc, repeatCount) + } + parts[j] = strings.Repeat(repeatStr, n) + } + } + arg = strings.Join(parts, "") + + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "read": + buf := make([]byte, len(arg)) + if _, err := io.ReadFull(f, buf); err != nil { + t.Fatalf("test case #%d %q: ReadFull: %v", i, tc, err) + } + if got := string(buf); got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + + case "seek": + parts := strings.Split(arg, " ") + if len(parts) != 4 { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + + whence := 0 + switch parts[0] { + default: + t.Fatalf("test case #%d %q: invalid seek whence", i, tc) + case "set": + whence = io.SeekStart + case "cur": + whence = io.SeekCurrent + case "end": + whence = io.SeekEnd + } + offset, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid offset %q", i, tc, parts[1]) + } + + if parts[2] != "want" { + t.Fatalf("test case #%d %q: invalid seek", i, tc) + } + if parts[3] == "err" { + _, err := f.Seek(int64(offset), whence) + if err == nil { + t.Fatalf("test case #%d %q: Seek returned nil error, want non-nil", i, tc) + } + } else { + got, err := f.Seek(int64(offset), whence) + if err != nil { + t.Fatalf("test case #%d %q: Seek: %v", i, tc, err) + } + want, err := strconv.Atoi(parts[3]) + if err != nil { + t.Fatalf("test case #%d %q: invalid want %q", i, tc, parts[3]) + } + if got != int64(want) { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + + case "write": + n, err := f.Write([]byte(arg)) + if err != nil { + t.Fatalf("test case #%d %q: write: %v", i, tc, err) + } + if n != len(arg) { + t.Fatalf("test case #%d %q: write returned %d bytes, want %d", i, tc, n, len(arg)) + } + + case "wantData": + g, err := fs.OpenFile(ctx, filename, os.O_RDONLY, 0666) + if err != nil { + t.Fatalf("test case #%d %q: OpenFile: %v", i, tc, err) + } + gotBytes, err := ioutil.ReadAll(g) + if err != nil { + t.Fatalf("test case #%d %q: ReadAll: %v", i, tc, err) + } + for i, c := range gotBytes { + if c == '\x00' { + gotBytes[i] = '.' + } + } + got := string(gotBytes) + if got != arg { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, arg) + } + if err := g.Close(); err != nil { + t.Fatalf("test case #%d %q: Close: %v", i, tc, err) + } + + case "wantSize": + n, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid size %q", i, tc, arg) + } + fi, err := fs.Stat(ctx, filename) + if err != nil { + t.Fatalf("test case #%d %q: Stat: %v", i, tc, err) + } + if got, want := fi.Size(), int64(n); got != want { + t.Fatalf("test case #%d %q: got %d, want %d", i, tc, got, want) + } + } + } +} + +// TestMemFileWriteAllocs tests that writing N consecutive 1KiB chunks to a +// memFile doesn't allocate a new buffer for each of those N times. Otherwise, +// calling io.Copy(aMemFile, src) is likely to have quadratic complexity. +func TestMemFileWriteAllocs(t *testing.T) { + if runtime.Compiler == "gccgo" { + t.Skip("gccgo allocates here") + } + ctx := context.Background() + fs := NewMemFS() + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + t.Fatalf("OpenFile: %v", err) + } + defer f.Close() + + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + a := testing.AllocsPerRun(100, func() { + f.Write(xxx) + }) + // AllocsPerRun returns an integral value, so we compare the rounded-down + // number to zero. + if a > 0 { + t.Fatalf("%v allocs per run, want 0", a) + } +} + +func BenchmarkMemFileWrite(b *testing.B) { + ctx := context.Background() + fs := NewMemFS() + xxx := make([]byte, 1024) + for i := range xxx { + xxx[i] = 'x' + } + + b.ResetTimer() + for i := 0; i < b.N; i++ { + f, err := fs.OpenFile(ctx, "/xxx", os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + b.Fatalf("OpenFile: %v", err) + } + for j := 0; j < 100; j++ { + f.Write(xxx) + } + if err := f.Close(); err != nil { + b.Fatalf("Close: %v", err) + } + if err := fs.RemoveAll(ctx, "/xxx"); err != nil { + b.Fatalf("RemoveAll: %v", err) + } + } +} + +func TestCopyMoveProps(t *testing.T) { + ctx := context.Background() + fs := NewMemFS() + create := func(name string) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return err + } + _, wErr := f.Write([]byte("contents")) + cErr := f.Close() + if wErr != nil { + return wErr + } + return cErr + } + patch := func(name string, patches ...Proppatch) error { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return err + } + _, pErr := f.(DeadPropsHolder).Patch(patches) + cErr := f.Close() + if pErr != nil { + return pErr + } + return cErr + } + props := func(name string) (map[xml.Name]Property, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0666) + if err != nil { + return nil, err + } + m, pErr := f.(DeadPropsHolder).DeadProps() + cErr := f.Close() + if pErr != nil { + return nil, pErr + } + if cErr != nil { + return nil, cErr + } + return m, nil + } + + p0 := Property{ + XMLName: xml.Name{Space: "x:", Local: "boat"}, + InnerXML: []byte("pea-green"), + } + p1 := Property{ + XMLName: xml.Name{Space: "x:", Local: "ring"}, + InnerXML: []byte("1 shilling"), + } + p2 := Property{ + XMLName: xml.Name{Space: "x:", Local: "spoon"}, + InnerXML: []byte("runcible"), + } + p3 := Property{ + XMLName: xml.Name{Space: "x:", Local: "moon"}, + InnerXML: []byte("light"), + } + + if err := create("/src"); err != nil { + t.Fatalf("create /src: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0, p1}}); err != nil { + t.Fatalf("patch /src +p0 +p1: %v", err) + } + if _, err := copyFiles(ctx, fs, "/src", "/tmp", true, infiniteDepth, 0); err != nil { + t.Fatalf("copyFiles /src /tmp: %v", err) + } + if _, err := moveFiles(ctx, fs, "/tmp", "/dst", true); err != nil { + t.Fatalf("moveFiles /tmp /dst: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p0}, Remove: true}); err != nil { + t.Fatalf("patch /src -p0: %v", err) + } + if err := patch("/src", Proppatch{Props: []Property{p2}}); err != nil { + t.Fatalf("patch /src +p2: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p1}, Remove: true}); err != nil { + t.Fatalf("patch /dst -p1: %v", err) + } + if err := patch("/dst", Proppatch{Props: []Property{p3}}); err != nil { + t.Fatalf("patch /dst +p3: %v", err) + } + + gotSrc, err := props("/src") + if err != nil { + t.Fatalf("props /src: %v", err) + } + wantSrc := map[xml.Name]Property{ + p1.XMLName: p1, + p2.XMLName: p2, + } + if !reflect.DeepEqual(gotSrc, wantSrc) { + t.Fatalf("props /src:\ngot %v\nwant %v", gotSrc, wantSrc) + } + + gotDst, err := props("/dst") + if err != nil { + t.Fatalf("props /dst: %v", err) + } + wantDst := map[xml.Name]Property{ + p0.XMLName: p0, + p3.XMLName: p3, + } + if !reflect.DeepEqual(gotDst, wantDst) { + t.Fatalf("props /dst:\ngot %v\nwant %v", gotDst, wantDst) + } +} + +func TestWalkFS(t *testing.T) { + testCases := []struct { + desc string + buildfs []string + startAt string + depth int + walkFn filepath.WalkFunc + want []string + }{{ + "just root", + []string{}, + "/", + infiniteDepth, + nil, + []string{ + "/", + }, + }, { + "infinite walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + infiniteDepth, + nil, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/d", + "/e", + "/f", + }, + }, { + "infinite walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/a", + infiniteDepth, + nil, + []string{ + "/a", + "/a/b", + "/a/b/c", + "/a/d", + }, + }, { + "depth 1 walk from root", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/d", + "mkdir /e", + "touch /f", + }, + "/", + 1, + nil, + []string{ + "/", + "/a", + "/e", + "/f", + }, + }, { + "depth 1 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 1, + nil, + []string{ + "/a/b", + "/a/b/c", + "/a/b/g", + }, + }, { + "depth 0 walk from subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk from file", + []string{ + "mkdir /a", + "touch /a/b", + "touch /a/c", + }, + "/a/b", + 0, + nil, + []string{ + "/a/b", + }, + }, { + "infinite walk with skipped subdir", + []string{ + "mkdir /a", + "mkdir /a/b", + "touch /a/b/c", + "mkdir /a/b/g", + "mkdir /a/b/g/h", + "touch /a/b/g/i", + "touch /a/b/g/h/j", + "touch /a/b/z", + }, + "/", + infiniteDepth, + func(path string, info os.FileInfo, err error) error { + if path == "/a/b/g" { + return filepath.SkipDir + } + return nil + }, + []string{ + "/", + "/a", + "/a/b", + "/a/b/c", + "/a/b/z", + }, + }} + ctx := context.Background() + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + var got []string + traceFn := func(path string, info os.FileInfo, err error) error { + if tc.walkFn != nil { + err = tc.walkFn(path, info, err) + if err != nil { + return err + } + } + got = append(got, path) + return nil + } + fi, err := fs.Stat(ctx, tc.startAt) + if err != nil { + t.Fatalf("%s: cannot stat: %v", tc.desc, err) + } + err = walkFS(ctx, fs, tc.depth, tc.startAt, fi, traceFn) + if err != nil { + t.Errorf("%s:\ngot error %v, want nil", tc.desc, err) + continue + } + sort.Strings(got) + sort.Strings(tc.want) + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %q\nwant %q", tc.desc, got, tc.want) + continue + } + } +} + +func buildTestFS(buildfs []string) (FileSystem, error) { + // TODO: Could this be merged with the build logic in TestFS? + + ctx := context.Background() + fs := NewMemFS() + for _, b := range buildfs { + op := strings.Split(b, " ") + switch op[0] { + case "mkdir": + err := fs.Mkdir(ctx, op[1], os.ModeDir|0777) + if err != nil { + return nil, err + } + case "touch": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE, 0666) + if err != nil { + return nil, err + } + f.Close() + case "write": + f, err := fs.OpenFile(ctx, op[1], os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return nil, err + } + _, err = f.Write([]byte(op[2])) + f.Close() + if err != nil { + return nil, err + } + default: + return nil, fmt.Errorf("unknown file operation %q", op[0]) + } + } + return fs, nil +} diff --git a/drives/davServer/if.go b/drives/davServer/if.go new file mode 100644 index 00000000..0697c540 --- /dev/null +++ b/drives/davServer/if.go @@ -0,0 +1,173 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +// The If header is covered by Section 10.4. +// http://www.webdav.org/specs/rfc4918.html#HEADER_If + +import ( + "strings" +) + +// ifHeader is a disjunction (OR) of ifLists. +type ifHeader struct { + lists []ifList +} + +// ifList is a conjunction (AND) of Conditions, and an optional resource tag. +type ifList struct { + resourceTag string + conditions []Condition +} + +// parseIfHeader parses the "If: foo bar" HTTP header. The httpHeader string +// should omit the "If:" prefix and have any "\r\n"s collapsed to a " ", as is +// returned by req.Header.Get("If") for an http.Request req. +func parseIfHeader(httpHeader string) (h ifHeader, ok bool) { + s := strings.TrimSpace(httpHeader) + switch tokenType, _, _ := lex(s); tokenType { + case '(': + return parseNoTagLists(s) + case angleTokenType: + return parseTaggedLists(s) + default: + return ifHeader{}, false + } +} + +func parseNoTagLists(s string) (h ifHeader, ok bool) { + for { + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + } +} + +func parseTaggedLists(s string) (h ifHeader, ok bool) { + resourceTag, n := "", 0 + for first := true; ; first = false { + tokenType, tokenStr, remaining := lex(s) + switch tokenType { + case angleTokenType: + if !first && n == 0 { + return ifHeader{}, false + } + resourceTag, n = tokenStr, 0 + s = remaining + case '(': + n++ + l, remaining, ok := parseList(s) + if !ok { + return ifHeader{}, false + } + l.resourceTag = resourceTag + h.lists = append(h.lists, l) + if remaining == "" { + return h, true + } + s = remaining + default: + return ifHeader{}, false + } + } +} + +func parseList(s string) (l ifList, remaining string, ok bool) { + tokenType, _, s := lex(s) + if tokenType != '(' { + return ifList{}, "", false + } + for { + tokenType, _, remaining = lex(s) + if tokenType == ')' { + if len(l.conditions) == 0 { + return ifList{}, "", false + } + return l, remaining, true + } + c, remaining, ok := parseCondition(s) + if !ok { + return ifList{}, "", false + } + l.conditions = append(l.conditions, c) + s = remaining + } +} + +func parseCondition(s string) (c Condition, remaining string, ok bool) { + tokenType, tokenStr, s := lex(s) + if tokenType == notTokenType { + c.Not = true + tokenType, tokenStr, s = lex(s) + } + switch tokenType { + case strTokenType, angleTokenType: + c.Token = tokenStr + case squareTokenType: + c.ETag = tokenStr + default: + return Condition{}, "", false + } + return c, s, true +} + +// Single-rune tokens like '(' or ')' have a token type equal to their rune. +// All other tokens have a negative token type. +const ( + errTokenType = rune(-1) + eofTokenType = rune(-2) + strTokenType = rune(-3) + notTokenType = rune(-4) + angleTokenType = rune(-5) + squareTokenType = rune(-6) +) + +func lex(s string) (tokenType rune, tokenStr string, remaining string) { + // The net/textproto Reader that parses the HTTP header will collapse + // Linear White Space that spans multiple "\r\n" lines to a single " ", + // so we don't need to look for '\r' or '\n'. + for len(s) > 0 && (s[0] == '\t' || s[0] == ' ') { + s = s[1:] + } + if len(s) == 0 { + return eofTokenType, "", "" + } + i := 0 +loop: + for ; i < len(s); i++ { + switch s[i] { + case '\t', ' ', '(', ')', '<', '>', '[', ']': + break loop + } + } + + if i != 0 { + tokenStr, remaining = s[:i], s[i:] + if tokenStr == "Not" { + return notTokenType, "", remaining + } + return strTokenType, tokenStr, remaining + } + + j := 0 + switch s[0] { + case '<': + j, tokenType = strings.IndexByte(s, '>'), angleTokenType + case '[': + j, tokenType = strings.IndexByte(s, ']'), squareTokenType + default: + return rune(s[0]), "", s[1:] + } + if j < 0 { + return errTokenType, "", "" + } + return tokenType, s[1:j], s[j+1:] +} diff --git a/drives/davServer/if_test.go b/drives/davServer/if_test.go new file mode 100644 index 00000000..b8cc8acf --- /dev/null +++ b/drives/davServer/if_test.go @@ -0,0 +1,322 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "reflect" + "strings" + "testing" +) + +func TestParseIfHeader(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + want ifHeader + }{{ + "bad: empty", + ``, + ifHeader{}, + }, { + "bad: no parens", + `foobar`, + ifHeader{}, + }, { + "bad: empty list #1", + `()`, + ifHeader{}, + }, { + "bad: empty list #2", + `(a) (b c) () (d)`, + ifHeader{}, + }, { + "bad: no list after resource #1", + ``, + ifHeader{}, + }, { + "bad: no list after resource #2", + ` (a)`, + ifHeader{}, + }, { + "bad: no list after resource #3", + ` (a) (b) `, + ifHeader{}, + }, { + "bad: no-tag-list followed by tagged-list", + `(a) (b) (c)`, + ifHeader{}, + }, { + "bad: unfinished list", + `(a`, + ifHeader{}, + }, { + "bad: unfinished ETag", + `([b`, + ifHeader{}, + }, { + "bad: unfinished Notted list", + `(Not a`, + ifHeader{}, + }, { + "bad: double Not", + `(Not Not a)`, + ifHeader{}, + }, { + "good: one list with a Token", + `(a)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }}, + }, + }, { + "good: one list with an ETag", + `([a])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + ETag: `a`, + }}, + }}, + }, + }, { + "good: one list with three Nots", + `(Not a Not b Not [d])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }, { + Not: true, + Token: `b`, + }, { + Not: true, + ETag: `d`, + }}, + }}, + }, + }, { + "good: two lists", + `(a) (b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Token: `b`, + }}, + }}, + }, + }, { + "good: two Notted lists", + `(Not a) (Not b)`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `a`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `b`, + }}, + }}, + }, + }, { + "section 7.5.1", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/users/f/fielding/index.html`, + conditions: []Condition{{ + Token: `urn:uuid:f81d4fae-7dec-11d0-a765-00a0c91e6bf6`, + }}, + }}, + }, + }, { + "section 7.5.2 #1", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #2", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 7.5.2 #3", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://example.com/locked/member`, + conditions: []Condition{{ + Token: `urn:uuid:150852e2-3847-42d5-8cbe-0f4f296f26cf`, + }}, + }}, + }, + }, { + "section 9.9.6", + `() + ()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:fe184f2e-6eec-41d0-c765-01adc56e6bb4`, + }}, + }, { + conditions: []Condition{{ + Token: `urn:uuid:e454f3f3-acdc-452a-56c7-00a5c91e4b77`, + }}, + }}, + }, + }, { + "section 9.10.8", + `()`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:e71d4fae-5dec-22d6-fea5-00a0c91e6be4`, + }}, + }}, + }, + }, { + "section 10.4.6", + `( + ["I am an ETag"]) + (["I am another ETag"])`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `"I am an ETag"`, + }}, + }, { + conditions: []Condition{{ + ETag: `"I am another ETag"`, + }}, + }}, + }, + }, { + "section 10.4.7", + `(Not + )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Not: true, + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + Token: `urn:uuid:58f202ac-22cf-11d1-b12d-002035b29092`, + }}, + }}, + }, + }, { + "section 10.4.8", + `() + (Not )`, + ifHeader{ + lists: []ifList{{ + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }, { + conditions: []Condition{{ + Not: true, + Token: `DAV:no-lock`, + }}, + }}, + }, + }, { + "section 10.4.9", + ` + ( + [W/"A weak ETag"]) (["strong ETag"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/resource1`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }, { + ETag: `W/"A weak ETag"`, + }}, + }, { + resourceTag: `/resource1`, + conditions: []Condition{{ + ETag: `"strong ETag"`, + }}, + }}, + }, + }, { + "section 10.4.10", + ` + ()`, + ifHeader{ + lists: []ifList{{ + resourceTag: `http://www.example.com/specs/`, + conditions: []Condition{{ + Token: `urn:uuid:181d4fae-7d8c-11d0-a765-00a0c91e6bf2`, + }}, + }}, + }, + }, { + "section 10.4.11 #1", + ` (["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + ETag: `"4217"`, + }}, + }}, + }, + }, { + "section 10.4.11 #2", + ` (Not ["4217"])`, + ifHeader{ + lists: []ifList{{ + resourceTag: `/specs/rfc2518.doc`, + conditions: []Condition{{ + Not: true, + ETag: `"4217"`, + }}, + }}, + }, + }} + + for _, tc := range testCases { + got, ok := parseIfHeader(strings.Replace(tc.input, "\n", "", -1)) + if gotEmpty := reflect.DeepEqual(got, ifHeader{}); gotEmpty == ok { + t.Errorf("%s: should be different: empty header == %t, ok == %t", tc.desc, gotEmpty, ok) + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("%s:\ngot %v\nwant %v", tc.desc, got, tc.want) + continue + } + } +} diff --git a/drives/davServer/internal/xml/README b/drives/davServer/internal/xml/README new file mode 100644 index 00000000..89656f48 --- /dev/null +++ b/drives/davServer/internal/xml/README @@ -0,0 +1,11 @@ +This is a fork of the encoding/xml package at ca1d6c4, the last commit before +https://go.googlesource.com/go/+/c0d6d33 "encoding/xml: restore Go 1.4 name +space behavior" made late in the lead-up to the Go 1.5 release. + +The list of encoding/xml changes is at +https://go.googlesource.com/go/+log/master/src/encoding/xml + +This fork is temporary, and I (nigeltao) expect to revert it after Go 1.6 is +released. + +See http://golang.org/issue/11841 diff --git a/drives/davServer/internal/xml/atom_test.go b/drives/davServer/internal/xml/atom_test.go new file mode 100644 index 00000000..a7128431 --- /dev/null +++ b/drives/davServer/internal/xml/atom_test.go @@ -0,0 +1,56 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import "time" + +var atomValue = &Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Example Feed", + Link: []Link{{Href: "http://example.org/"}}, + Updated: ParseTime("2003-12-13T18:30:02Z"), + Author: Person{Name: "John Doe"}, + Id: "urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6", + + Entry: []Entry{ + { + Title: "Atom-Powered Robots Run Amok", + Link: []Link{{Href: "http://example.org/2003/12/13/atom03"}}, + Id: "urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a", + Updated: ParseTime("2003-12-13T18:30:02Z"), + Summary: NewText("Some text."), + }, + }, +} + +var atomXml = `` + + `` + + `Example Feed` + + `urn:uuid:60a76c80-d399-11d9-b93C-0003939e0af6` + + `` + + `John Doe` + + `` + + `Atom-Powered Robots Run Amok` + + `urn:uuid:1225c695-cfb8-4ebb-aaaa-80da344efa6a` + + `` + + `2003-12-13T18:30:02Z` + + `` + + `Some text.` + + `` + + `` + +func ParseTime(str string) time.Time { + t, err := time.Parse(time.RFC3339, str) + if err != nil { + panic(err) + } + return t +} + +func NewText(text string) Text { + return Text{ + Body: text, + } +} diff --git a/drives/davServer/internal/xml/example_test.go b/drives/davServer/internal/xml/example_test.go new file mode 100644 index 00000000..21b48dea --- /dev/null +++ b/drives/davServer/internal/xml/example_test.go @@ -0,0 +1,151 @@ +// Copyright 2012 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml_test + +import ( + "encoding/xml" + "fmt" + "os" +) + +func ExampleMarshalIndent() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + output, err := xml.MarshalIndent(v, " ", " ") + if err != nil { + fmt.Printf("error: %v\n", err) + } + + os.Stdout.Write(output) + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +func ExampleEncoder() { + type Address struct { + City, State string + } + type Person struct { + XMLName xml.Name `xml:"person"` + Id int `xml:"id,attr"` + FirstName string `xml:"name>first"` + LastName string `xml:"name>last"` + Age int `xml:"age"` + Height float32 `xml:"height,omitempty"` + Married bool + Address + Comment string `xml:",comment"` + } + + v := &Person{Id: 13, FirstName: "John", LastName: "Doe", Age: 42} + v.Comment = " Need more details. " + v.Address = Address{"Hanga Roa", "Easter Island"} + + enc := xml.NewEncoder(os.Stdout) + enc.Indent(" ", " ") + if err := enc.Encode(v); err != nil { + fmt.Printf("error: %v\n", err) + } + + // Output: + // + // + // John + // Doe + // + // 42 + // false + // Hanga Roa + // Easter Island + // + // +} + +// This example demonstrates unmarshaling an XML excerpt into a value with +// some preset fields. Note that the Phone field isn't modified and that +// the XML element is ignored. Also, the Groups field is assigned +// considering the element path provided in its tag. +func ExampleUnmarshal() { + type Email struct { + Where string `xml:"where,attr"` + Addr string + } + type Address struct { + City, State string + } + type Result struct { + XMLName xml.Name `xml:"Person"` + Name string `xml:"FullName"` + Phone string + Email []Email + Groups []string `xml:"Group>Value"` + Address + } + v := Result{Name: "none", Phone: "none"} + + data := ` + + Grace R. Emlin + Example Inc. + + gre@example.com + + + gre@work.com + + + Friends + Squash + + Hanga Roa + Easter Island + + ` + err := xml.Unmarshal([]byte(data), &v) + if err != nil { + fmt.Printf("error: %v", err) + return + } + fmt.Printf("XMLName: %#v\n", v.XMLName) + fmt.Printf("Name: %q\n", v.Name) + fmt.Printf("Phone: %q\n", v.Phone) + fmt.Printf("Email: %v\n", v.Email) + fmt.Printf("Groups: %v\n", v.Groups) + fmt.Printf("Address: %v\n", v.Address) + // Output: + // XMLName: xml.Name{Space:"", Local:"Person"} + // Name: "Grace R. Emlin" + // Phone: "none" + // Email: [{home gre@example.com} {work gre@work.com}] + // Groups: [Friends Squash] + // Address: {Hanga Roa Easter Island} +} diff --git a/drives/davServer/internal/xml/marshal.go b/drives/davServer/internal/xml/marshal.go new file mode 100644 index 00000000..4dd0f417 --- /dev/null +++ b/drives/davServer/internal/xml/marshal.go @@ -0,0 +1,1223 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bufio" + "bytes" + "encoding" + "fmt" + "io" + "reflect" + "strconv" + "strings" +) + +const ( + // A generic XML header suitable for use with the output of Marshal. + // This is not automatically added to any output of this package, + // it is provided as a convenience. + Header = `` + "\n" +) + +// Marshal returns the XML encoding of v. +// +// Marshal handles an array or slice by marshalling each of the elements. +// Marshal handles a pointer by marshalling the value it points at or, if the +// pointer is nil, by writing nothing. Marshal handles an interface value by +// marshalling the value it contains or, if the interface value is nil, by +// writing nothing. Marshal handles all other data by writing one or more XML +// elements containing the data. +// +// The name for the XML elements is taken from, in order of preference: +// - the tag on the XMLName field, if the data is a struct +// - the value of the XMLName field of type xml.Name +// - the tag of the struct field used to obtain the data +// - the name of the struct field used to obtain the data +// - the name of the marshalled type +// +// The XML element for a struct contains marshalled elements for each of the +// exported fields of the struct, with these exceptions: +// - the XMLName field, described above, is omitted. +// - a field with tag "-" is omitted. +// - a field with tag "name,attr" becomes an attribute with +// the given name in the XML element. +// - a field with tag ",attr" becomes an attribute with the +// field name in the XML element. +// - a field with tag ",chardata" is written as character data, +// not as an XML element. +// - a field with tag ",innerxml" is written verbatim, not subject +// to the usual marshalling procedure. +// - a field with tag ",comment" is written as an XML comment, not +// subject to the usual marshalling procedure. It must not contain +// the "--" string within it. +// - a field with a tag including the "omitempty" option is omitted +// if the field value is empty. The empty values are false, 0, any +// nil pointer or interface value, and any array, slice, map, or +// string of length zero. +// - an anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// If a field uses a tag "a>b>c", then the element c will be nested inside +// parent elements a and b. Fields that appear next to each other that name +// the same parent will be enclosed in one XML element. +// +// See MarshalIndent for an example. +// +// Marshal will return an error if asked to marshal a channel, function, or map. +func Marshal(v interface{}) ([]byte, error) { + var b bytes.Buffer + if err := NewEncoder(&b).Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// Marshaler is the interface implemented by objects that can marshal +// themselves into valid XML elements. +// +// MarshalXML encodes the receiver as zero or more XML elements. +// By convention, arrays or slices are typically encoded as a sequence +// of elements, one per entry. +// Using start as the element tag is not required, but doing so +// will enable Unmarshal to match the XML elements to the correct +// struct field. +// One common implementation strategy is to construct a separate +// value with a layout corresponding to the desired XML and then +// to encode it using e.EncodeElement. +// Another common strategy is to use repeated calls to e.EncodeToken +// to generate the XML output one token at a time. +// The sequence of encoded tokens must make up zero or more valid +// XML elements. +type Marshaler interface { + MarshalXML(e *Encoder, start StartElement) error +} + +// MarshalerAttr is the interface implemented by objects that can marshal +// themselves into valid XML attributes. +// +// MarshalXMLAttr returns an XML attribute with the encoded value of the receiver. +// Using name as the attribute name is not required, but doing so +// will enable Unmarshal to match the attribute to the correct +// struct field. +// If MarshalXMLAttr returns the zero attribute Attr{}, no attribute +// will be generated in the output. +// MarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type MarshalerAttr interface { + MarshalXMLAttr(name Name) (Attr, error) +} + +// MarshalIndent works like Marshal, but each XML element begins on a new +// indented line that starts with prefix and is followed by one or more +// copies of indent according to the nesting depth. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + var b bytes.Buffer + enc := NewEncoder(&b) + enc.Indent(prefix, indent) + if err := enc.Encode(v); err != nil { + return nil, err + } + return b.Bytes(), nil +} + +// An Encoder writes XML data to an output stream. +type Encoder struct { + p printer +} + +// NewEncoder returns a new encoder that writes to w. +func NewEncoder(w io.Writer) *Encoder { + e := &Encoder{printer{Writer: bufio.NewWriter(w)}} + e.p.encoder = e + return e +} + +// Indent sets the encoder to generate XML in which each element +// begins on a new indented line that starts with prefix and is followed by +// one or more copies of indent according to the nesting depth. +func (enc *Encoder) Indent(prefix, indent string) { + enc.p.prefix = prefix + enc.p.indent = indent +} + +// Encode writes the XML encoding of v to the stream. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// Encode calls Flush before returning. +func (enc *Encoder) Encode(v interface{}) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, nil) + if err != nil { + return err + } + return enc.p.Flush() +} + +// EncodeElement writes the XML encoding of v to the stream, +// using start as the outermost tag in the encoding. +// +// See the documentation for Marshal for details about the conversion +// of Go values to XML. +// +// EncodeElement calls Flush before returning. +func (enc *Encoder) EncodeElement(v interface{}, start StartElement) error { + err := enc.p.marshalValue(reflect.ValueOf(v), nil, &start) + if err != nil { + return err + } + return enc.p.Flush() +} + +var ( + begComment = []byte("") + endProcInst = []byte("?>") + endDirective = []byte(">") +) + +// EncodeToken writes the given XML token to the stream. +// It returns an error if StartElement and EndElement tokens are not +// properly matched. +// +// EncodeToken does not call Flush, because usually it is part of a +// larger operation such as Encode or EncodeElement (or a custom +// Marshaler's MarshalXML invoked during those), and those will call +// Flush when finished. Callers that create an Encoder and then invoke +// EncodeToken directly, without using Encode or EncodeElement, need to +// call Flush when finished to ensure that the XML is written to the +// underlying writer. +// +// EncodeToken allows writing a ProcInst with Target set to "xml" only +// as the first token in the stream. +// +// When encoding a StartElement holding an XML namespace prefix +// declaration for a prefix that is not already declared, contained +// elements (including the StartElement itself) will use the declared +// prefix when encoding names with matching namespace URIs. +func (enc *Encoder) EncodeToken(t Token) error { + + p := &enc.p + switch t := t.(type) { + case StartElement: + if err := p.writeStart(&t); err != nil { + return err + } + case EndElement: + if err := p.writeEnd(t.Name); err != nil { + return err + } + case CharData: + escapeText(p, t, false) + case Comment: + if bytes.Contains(t, endComment) { + return fmt.Errorf("xml: EncodeToken of Comment containing --> marker") + } + p.WriteString("") + return p.cachedWriteError() + case ProcInst: + // First token to be encoded which is also a ProcInst with target of xml + // is the xml declaration. The only ProcInst where target of xml is allowed. + if t.Target == "xml" && p.Buffered() != 0 { + return fmt.Errorf("xml: EncodeToken of ProcInst xml target only valid for xml declaration, first token encoded") + } + if !isNameString(t.Target) { + return fmt.Errorf("xml: EncodeToken of ProcInst with invalid Target") + } + if bytes.Contains(t.Inst, endProcInst) { + return fmt.Errorf("xml: EncodeToken of ProcInst containing ?> marker") + } + p.WriteString(" 0 { + p.WriteByte(' ') + p.Write(t.Inst) + } + p.WriteString("?>") + case Directive: + if !isValidDirective(t) { + return fmt.Errorf("xml: EncodeToken of Directive containing wrong < or > markers") + } + p.WriteString("") + default: + return fmt.Errorf("xml: EncodeToken of invalid token type") + + } + return p.cachedWriteError() +} + +// isValidDirective reports whether dir is a valid directive text, +// meaning angle brackets are matched, ignoring comments and strings. +func isValidDirective(dir Directive) bool { + var ( + depth int + inquote uint8 + incomment bool + ) + for i, c := range dir { + switch { + case incomment: + if c == '>' { + if n := 1 + i - len(endComment); n >= 0 && bytes.Equal(dir[n:i+1], endComment) { + incomment = false + } + } + // Just ignore anything in comment + case inquote != 0: + if c == inquote { + inquote = 0 + } + // Just ignore anything within quotes + case c == '\'' || c == '"': + inquote = c + case c == '<': + if i+len(begComment) < len(dir) && bytes.Equal(dir[i:i+len(begComment)], begComment) { + incomment = true + } else { + depth++ + } + case c == '>': + if depth == 0 { + return false + } + depth-- + } + } + return depth == 0 && inquote == 0 && !incomment +} + +// Flush flushes any buffered XML to the underlying writer. +// See the EncodeToken documentation for details about when it is necessary. +func (enc *Encoder) Flush() error { + return enc.p.Flush() +} + +type printer struct { + *bufio.Writer + encoder *Encoder + seq int + indent string + prefix string + depth int + indentedIn bool + putNewline bool + defaultNS string + attrNS map[string]string // map prefix -> name space + attrPrefix map[string]string // map name space -> prefix + prefixes []printerPrefix + tags []Name +} + +// printerPrefix holds a namespace undo record. +// When an element is popped, the prefix record +// is set back to the recorded URL. The empty +// prefix records the URL for the default name space. +// +// The start of an element is recorded with an element +// that has mark=true. +type printerPrefix struct { + prefix string + url string + mark bool +} + +func (p *printer) prefixForNS(url string, isAttr bool) string { + // The "http://www.w3.org/XML/1998/namespace" name space is predefined as "xml" + // and must be referred to that way. + // (The "http://www.w3.org/2000/xmlns/" name space is also predefined as "xmlns", + // but users should not be trying to use that one directly - that's our job.) + if url == xmlURL { + return "xml" + } + if !isAttr && url == p.defaultNS { + // We can use the default name space. + return "" + } + return p.attrPrefix[url] +} + +// defineNS pushes any namespace definition found in the given attribute. +// If ignoreNonEmptyDefault is true, an xmlns="nonempty" +// attribute will be ignored. +func (p *printer) defineNS(attr Attr, ignoreNonEmptyDefault bool) error { + var prefix string + if attr.Name.Local == "xmlns" { + if attr.Name.Space != "" && attr.Name.Space != "xml" && attr.Name.Space != xmlURL { + return fmt.Errorf("xml: cannot redefine xmlns attribute prefix") + } + } else if attr.Name.Space == "xmlns" && attr.Name.Local != "" { + prefix = attr.Name.Local + if attr.Value == "" { + // Technically, an empty XML namespace is allowed for an attribute. + // From http://www.w3.org/TR/xml-names11/#scoping-defaulting: + // + // The attribute value in a namespace declaration for a prefix may be + // empty. This has the effect, within the scope of the declaration, of removing + // any association of the prefix with a namespace name. + // + // However our namespace prefixes here are used only as hints. There's + // no need to respect the removal of a namespace prefix, so we ignore it. + return nil + } + } else { + // Ignore: it's not a namespace definition + return nil + } + if prefix == "" { + if attr.Value == p.defaultNS { + // No need for redefinition. + return nil + } + if attr.Value != "" && ignoreNonEmptyDefault { + // We have an xmlns="..." value but + // it can't define a name space in this context, + // probably because the element has an empty + // name space. In this case, we just ignore + // the name space declaration. + return nil + } + } else if _, ok := p.attrPrefix[attr.Value]; ok { + // There's already a prefix for the given name space, + // so use that. This prevents us from + // having two prefixes for the same name space + // so attrNS and attrPrefix can remain bijective. + return nil + } + p.pushPrefix(prefix, attr.Value) + return nil +} + +// createNSPrefix creates a name space prefix attribute +// to use for the given name space, defining a new prefix +// if necessary. +// If isAttr is true, the prefix is to be created for an attribute +// prefix, which means that the default name space cannot +// be used. +func (p *printer) createNSPrefix(url string, isAttr bool) { + if _, ok := p.attrPrefix[url]; ok { + // We already have a prefix for the given URL. + return + } + switch { + case !isAttr && url == p.defaultNS: + // We can use the default name space. + return + case url == "": + // The only way we can encode names in the empty + // name space is by using the default name space, + // so we must use that. + if p.defaultNS != "" { + // The default namespace is non-empty, so we + // need to set it to empty. + p.pushPrefix("", "") + } + return + case url == xmlURL: + return + } + // TODO If the URL is an existing prefix, we could + // use it as is. That would enable the + // marshaling of elements that had been unmarshaled + // and with a name space prefix that was not found. + // although technically it would be incorrect. + + // Pick a name. We try to use the final element of the path + // but fall back to _. + prefix := strings.TrimRight(url, "/") + if i := strings.LastIndex(prefix, "/"); i >= 0 { + prefix = prefix[i+1:] + } + if prefix == "" || !isName([]byte(prefix)) || strings.Contains(prefix, ":") { + prefix = "_" + } + if strings.HasPrefix(prefix, "xml") { + // xmlanything is reserved. + prefix = "_" + prefix + } + if p.attrNS[prefix] != "" { + // Name is taken. Find a better one. + for p.seq++; ; p.seq++ { + if id := prefix + "_" + strconv.Itoa(p.seq); p.attrNS[id] == "" { + prefix = id + break + } + } + } + + p.pushPrefix(prefix, url) +} + +// writeNamespaces writes xmlns attributes for all the +// namespace prefixes that have been defined in +// the current element. +func (p *printer) writeNamespaces() { + for i := len(p.prefixes) - 1; i >= 0; i-- { + prefix := p.prefixes[i] + if prefix.mark { + return + } + p.WriteString(" ") + if prefix.prefix == "" { + // Default name space. + p.WriteString(`xmlns="`) + } else { + p.WriteString("xmlns:") + p.WriteString(prefix.prefix) + p.WriteString(`="`) + } + EscapeText(p, []byte(p.nsForPrefix(prefix.prefix))) + p.WriteString(`"`) + } +} + +// pushPrefix pushes a new prefix on the prefix stack +// without checking to see if it is already defined. +func (p *printer) pushPrefix(prefix, url string) { + p.prefixes = append(p.prefixes, printerPrefix{ + prefix: prefix, + url: p.nsForPrefix(prefix), + }) + p.setAttrPrefix(prefix, url) +} + +// nsForPrefix returns the name space for the given +// prefix. Note that this is not valid for the +// empty attribute prefix, which always has an empty +// name space. +func (p *printer) nsForPrefix(prefix string) string { + if prefix == "" { + return p.defaultNS + } + return p.attrNS[prefix] +} + +// markPrefix marks the start of an element on the prefix +// stack. +func (p *printer) markPrefix() { + p.prefixes = append(p.prefixes, printerPrefix{ + mark: true, + }) +} + +// popPrefix pops all defined prefixes for the current +// element. +func (p *printer) popPrefix() { + for len(p.prefixes) > 0 { + prefix := p.prefixes[len(p.prefixes)-1] + p.prefixes = p.prefixes[:len(p.prefixes)-1] + if prefix.mark { + break + } + p.setAttrPrefix(prefix.prefix, prefix.url) + } +} + +// setAttrPrefix sets an attribute name space prefix. +// If url is empty, the attribute is removed. +// If prefix is empty, the default name space is set. +func (p *printer) setAttrPrefix(prefix, url string) { + if prefix == "" { + p.defaultNS = url + return + } + if url == "" { + delete(p.attrPrefix, p.attrNS[prefix]) + delete(p.attrNS, prefix) + return + } + if p.attrPrefix == nil { + // Need to define a new name space. + p.attrPrefix = make(map[string]string) + p.attrNS = make(map[string]string) + } + // Remove any old prefix value. This is OK because we maintain a + // strict one-to-one mapping between prefix and URL (see + // defineNS) + delete(p.attrPrefix, p.attrNS[prefix]) + p.attrPrefix[url] = prefix + p.attrNS[prefix] = url +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() + marshalerAttrType = reflect.TypeOf((*MarshalerAttr)(nil)).Elem() + textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() +) + +// marshalValue writes one or more XML elements representing val. +// If val was obtained from a struct field, finfo must have its details. +func (p *printer) marshalValue(val reflect.Value, finfo *fieldInfo, startTemplate *StartElement) error { + if startTemplate != nil && startTemplate.Name.Local == "" { + return fmt.Errorf("xml: EncodeElement of StartElement with missing name") + } + + if !val.IsValid() { + return nil + } + if finfo != nil && finfo.flags&fOmitEmpty != 0 && isEmptyValue(val) { + return nil + } + + // Drill into interfaces and pointers. + // This can turn into an infinite loop given a cyclic chain, + // but it matches the Go 1 behavior. + for val.Kind() == reflect.Interface || val.Kind() == reflect.Ptr { + if val.IsNil() { + return nil + } + val = val.Elem() + } + + kind := val.Kind() + typ := val.Type() + + // Check for marshaler. + if val.CanInterface() && typ.Implements(marshalerType) { + return p.marshalInterface(val.Interface().(Marshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerType) { + return p.marshalInterface(pv.Interface().(Marshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Check for text marshaler. + if val.CanInterface() && typ.Implements(textMarshalerType) { + return p.marshalTextInterface(val.Interface().(encoding.TextMarshaler), p.defaultStart(typ, finfo, startTemplate)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + return p.marshalTextInterface(pv.Interface().(encoding.TextMarshaler), p.defaultStart(pv.Type(), finfo, startTemplate)) + } + } + + // Slices and arrays iterate over the elements. They do not have an enclosing tag. + if (kind == reflect.Slice || kind == reflect.Array) && typ.Elem().Kind() != reflect.Uint8 { + for i, n := 0, val.Len(); i < n; i++ { + if err := p.marshalValue(val.Index(i), finfo, startTemplate); err != nil { + return err + } + } + return nil + } + + tinfo, err := getTypeInfo(typ) + if err != nil { + return err + } + + // Create start element. + // Precedence for the XML element name is: + // 0. startTemplate + // 1. XMLName field in underlying struct; + // 2. field name/tag in the struct field; and + // 3. type name + var start StartElement + + // explicitNS records whether the element's name space has been + // explicitly set (for example an XMLName field). + explicitNS := false + + if startTemplate != nil { + start.Name = startTemplate.Name + explicitNS = true + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if tinfo.xmlname != nil { + xmlname := tinfo.xmlname + if xmlname.name != "" { + start.Name.Space, start.Name.Local = xmlname.xmlns, xmlname.name + } else if v, ok := xmlname.value(val).Interface().(Name); ok && v.Local != "" { + start.Name = v + } + explicitNS = true + } + if start.Name.Local == "" && finfo != nil { + start.Name.Local = finfo.name + if finfo.xmlns != "" { + start.Name.Space = finfo.xmlns + explicitNS = true + } + } + if start.Name.Local == "" { + name := typ.Name() + if name == "" { + return &UnsupportedTypeError{typ} + } + start.Name.Local = name + } + + // defaultNS records the default name space as set by a xmlns="..." + // attribute. We don't set p.defaultNS because we want to let + // the attribute writing code (in p.defineNS) be solely responsible + // for maintaining that. + defaultNS := p.defaultNS + + // Attributes + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr == 0 { + continue + } + attr, err := p.fieldAttr(finfo, val) + if err != nil { + return err + } + if attr.Name.Local == "" { + continue + } + start.Attr = append(start.Attr, attr) + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + defaultNS = attr.Value + } + } + if !explicitNS { + // Historic behavior: elements use the default name space + // they are contained in by default. + start.Name.Space = defaultNS + } + // Historic behaviour: an element that's in a namespace sets + // the default namespace for all elements contained within it. + start.setDefaultNamespace() + + if err := p.writeStart(&start); err != nil { + return err + } + + if val.Kind() == reflect.Struct { + err = p.marshalStruct(tinfo, val) + } else { + s, b, err1 := p.marshalSimple(typ, val) + if err1 != nil { + err = err1 + } else if b != nil { + EscapeText(p, b) + } else { + p.EscapeString(s) + } + } + if err != nil { + return err + } + + if err := p.writeEnd(start.Name); err != nil { + return err + } + + return p.cachedWriteError() +} + +// fieldAttr returns the attribute of the given field. +// If the returned attribute has an empty Name.Local, +// it should not be used. +// The given value holds the value containing the field. +func (p *printer) fieldAttr(finfo *fieldInfo, val reflect.Value) (Attr, error) { + fv := finfo.value(val) + name := Name{Space: finfo.xmlns, Local: finfo.name} + if finfo.flags&fOmitEmpty != 0 && isEmptyValue(fv) { + return Attr{}, nil + } + if fv.Kind() == reflect.Interface && fv.IsNil() { + return Attr{}, nil + } + if fv.CanInterface() && fv.Type().Implements(marshalerAttrType) { + attr, err := fv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(marshalerAttrType) { + attr, err := pv.Interface().(MarshalerAttr).MarshalXMLAttr(name) + return attr, err + } + } + if fv.CanInterface() && fv.Type().Implements(textMarshalerType) { + text, err := fv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + if fv.CanAddr() { + pv := fv.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + text, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return Attr{}, err + } + return Attr{name, string(text)}, nil + } + } + // Dereference or skip nil pointer, interface values. + switch fv.Kind() { + case reflect.Ptr, reflect.Interface: + if fv.IsNil() { + return Attr{}, nil + } + fv = fv.Elem() + } + s, b, err := p.marshalSimple(fv.Type(), fv) + if err != nil { + return Attr{}, err + } + if b != nil { + s = string(b) + } + return Attr{name, s}, nil +} + +// defaultStart returns the default start element to use, +// given the reflect type, field info, and start template. +func (p *printer) defaultStart(typ reflect.Type, finfo *fieldInfo, startTemplate *StartElement) StartElement { + var start StartElement + // Precedence for the XML element name is as above, + // except that we do not look inside structs for the first field. + if startTemplate != nil { + start.Name = startTemplate.Name + start.Attr = append(start.Attr, startTemplate.Attr...) + } else if finfo != nil && finfo.name != "" { + start.Name.Local = finfo.name + start.Name.Space = finfo.xmlns + } else if typ.Name() != "" { + start.Name.Local = typ.Name() + } else { + // Must be a pointer to a named type, + // since it has the Marshaler methods. + start.Name.Local = typ.Elem().Name() + } + // Historic behaviour: elements use the name space of + // the element they are contained in by default. + if start.Name.Space == "" { + start.Name.Space = p.defaultNS + } + start.setDefaultNamespace() + return start +} + +// marshalInterface marshals a Marshaler interface value. +func (p *printer) marshalInterface(val Marshaler, start StartElement) error { + // Push a marker onto the tag stack so that MarshalXML + // cannot close the XML tags that it did not open. + p.tags = append(p.tags, Name{}) + n := len(p.tags) + + err := val.MarshalXML(p.encoder, start) + if err != nil { + return err + } + + // Make sure MarshalXML closed all its tags. p.tags[n-1] is the mark. + if len(p.tags) > n { + return fmt.Errorf("xml: %s.MarshalXML wrote invalid XML: <%s> not closed", receiverType(val), p.tags[len(p.tags)-1].Local) + } + p.tags = p.tags[:n-1] + return nil +} + +// marshalTextInterface marshals a TextMarshaler interface value. +func (p *printer) marshalTextInterface(val encoding.TextMarshaler, start StartElement) error { + if err := p.writeStart(&start); err != nil { + return err + } + text, err := val.MarshalText() + if err != nil { + return err + } + EscapeText(p, text) + return p.writeEnd(start.Name) +} + +// writeStart writes the given start element. +func (p *printer) writeStart(start *StartElement) error { + if start.Name.Local == "" { + return fmt.Errorf("xml: start tag with no name") + } + + p.tags = append(p.tags, start.Name) + p.markPrefix() + // Define any name spaces explicitly declared in the attributes. + // We do this as a separate pass so that explicitly declared prefixes + // will take precedence over implicitly declared prefixes + // regardless of the order of the attributes. + ignoreNonEmptyDefault := start.Name.Space == "" + for _, attr := range start.Attr { + if err := p.defineNS(attr, ignoreNonEmptyDefault); err != nil { + return err + } + } + // Define any new name spaces implied by the attributes. + for _, attr := range start.Attr { + name := attr.Name + // From http://www.w3.org/TR/xml-names11/#defaulting + // "Default namespace declarations do not apply directly + // to attribute names; the interpretation of unprefixed + // attributes is determined by the element on which they + // appear." + // This means we don't need to create a new namespace + // when an attribute name space is empty. + if name.Space != "" && !name.isNamespace() { + p.createNSPrefix(name.Space, true) + } + } + p.createNSPrefix(start.Name.Space, false) + + p.writeIndent(1) + p.WriteByte('<') + p.writeName(start.Name, false) + p.writeNamespaces() + for _, attr := range start.Attr { + name := attr.Name + if name.Local == "" || name.isNamespace() { + // Namespaces have already been written by writeNamespaces above. + continue + } + p.WriteByte(' ') + p.writeName(name, true) + p.WriteString(`="`) + p.EscapeString(attr.Value) + p.WriteByte('"') + } + p.WriteByte('>') + return nil +} + +// writeName writes the given name. It assumes +// that p.createNSPrefix(name) has already been called. +func (p *printer) writeName(name Name, isAttr bool) { + if prefix := p.prefixForNS(name.Space, isAttr); prefix != "" { + p.WriteString(prefix) + p.WriteByte(':') + } + p.WriteString(name.Local) +} + +func (p *printer) writeEnd(name Name) error { + if name.Local == "" { + return fmt.Errorf("xml: end tag with no name") + } + if len(p.tags) == 0 || p.tags[len(p.tags)-1].Local == "" { + return fmt.Errorf("xml: end tag without start tag", name.Local) + } + if top := p.tags[len(p.tags)-1]; top != name { + if top.Local != name.Local { + return fmt.Errorf("xml: end tag does not match start tag <%s>", name.Local, top.Local) + } + return fmt.Errorf("xml: end tag in namespace %s does not match start tag <%s> in namespace %s", name.Local, name.Space, top.Local, top.Space) + } + p.tags = p.tags[:len(p.tags)-1] + + p.writeIndent(-1) + p.WriteByte('<') + p.WriteByte('/') + p.writeName(name, false) + p.WriteByte('>') + p.popPrefix() + return nil +} + +func (p *printer) marshalSimple(typ reflect.Type, val reflect.Value) (string, []byte, error) { + switch val.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(val.Int(), 10), nil, nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(val.Uint(), 10), nil, nil + case reflect.Float32, reflect.Float64: + return strconv.FormatFloat(val.Float(), 'g', -1, val.Type().Bits()), nil, nil + case reflect.String: + return val.String(), nil, nil + case reflect.Bool: + return strconv.FormatBool(val.Bool()), nil, nil + case reflect.Array: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // [...]byte + var bytes []byte + if val.CanAddr() { + bytes = val.Slice(0, val.Len()).Bytes() + } else { + bytes = make([]byte, val.Len()) + reflect.Copy(reflect.ValueOf(bytes), val) + } + return "", bytes, nil + case reflect.Slice: + if typ.Elem().Kind() != reflect.Uint8 { + break + } + // []byte + return "", val.Bytes(), nil + } + return "", nil, &UnsupportedTypeError{typ} +} + +var ddBytes = []byte("--") + +func (p *printer) marshalStruct(tinfo *typeInfo, val reflect.Value) error { + s := parentStack{p: p} + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fAttr != 0 { + continue + } + vf := finfo.value(val) + + // Dereference or skip nil pointer, interface values. + switch vf.Kind() { + case reflect.Ptr, reflect.Interface: + if !vf.IsNil() { + vf = vf.Elem() + } + } + + switch finfo.flags & fMode { + case fCharData: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + if vf.CanInterface() && vf.Type().Implements(textMarshalerType) { + data, err := vf.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + if vf.CanAddr() { + pv := vf.Addr() + if pv.CanInterface() && pv.Type().Implements(textMarshalerType) { + data, err := pv.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + Escape(p, data) + continue + } + } + var scratch [64]byte + switch vf.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + Escape(p, strconv.AppendInt(scratch[:0], vf.Int(), 10)) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + Escape(p, strconv.AppendUint(scratch[:0], vf.Uint(), 10)) + case reflect.Float32, reflect.Float64: + Escape(p, strconv.AppendFloat(scratch[:0], vf.Float(), 'g', -1, vf.Type().Bits())) + case reflect.Bool: + Escape(p, strconv.AppendBool(scratch[:0], vf.Bool())) + case reflect.String: + if err := EscapeText(p, []byte(vf.String())); err != nil { + return err + } + case reflect.Slice: + if elem, ok := vf.Interface().([]byte); ok { + if err := EscapeText(p, elem); err != nil { + return err + } + } + } + continue + + case fComment: + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + k := vf.Kind() + if !(k == reflect.String || k == reflect.Slice && vf.Type().Elem().Kind() == reflect.Uint8) { + return fmt.Errorf("xml: bad type for comment field of %s", val.Type()) + } + if vf.Len() == 0 { + continue + } + p.writeIndent(0) + p.WriteString("" is invalid grammar. Make it "- -->" + p.WriteByte(' ') + } + p.WriteString("-->") + continue + + case fInnerXml: + iface := vf.Interface() + switch raw := iface.(type) { + case []byte: + p.Write(raw) + continue + case string: + p.WriteString(raw) + continue + } + + case fElement, fElement | fAny: + if err := s.setParents(finfo, vf); err != nil { + return err + } + } + if err := p.marshalValue(vf, finfo, nil); err != nil { + return err + } + } + if err := s.setParents(&noField, reflect.Value{}); err != nil { + return err + } + return p.cachedWriteError() +} + +var noField fieldInfo + +// return the bufio Writer's cached write error +func (p *printer) cachedWriteError() error { + _, err := p.Write(nil) + return err +} + +func (p *printer) writeIndent(depthDelta int) { + if len(p.prefix) == 0 && len(p.indent) == 0 { + return + } + if depthDelta < 0 { + p.depth-- + if p.indentedIn { + p.indentedIn = false + return + } + p.indentedIn = false + } + if p.putNewline { + p.WriteByte('\n') + } else { + p.putNewline = true + } + if len(p.prefix) > 0 { + p.WriteString(p.prefix) + } + if len(p.indent) > 0 { + for i := 0; i < p.depth; i++ { + p.WriteString(p.indent) + } + } + if depthDelta > 0 { + p.depth++ + p.indentedIn = true + } +} + +type parentStack struct { + p *printer + xmlns string + parents []string +} + +// setParents sets the stack of current parents to those found in finfo. +// It only writes the start elements if vf holds a non-nil value. +// If finfo is &noField, it pops all elements. +func (s *parentStack) setParents(finfo *fieldInfo, vf reflect.Value) error { + xmlns := s.p.defaultNS + if finfo.xmlns != "" { + xmlns = finfo.xmlns + } + commonParents := 0 + if xmlns == s.xmlns { + for ; commonParents < len(finfo.parents) && commonParents < len(s.parents); commonParents++ { + if finfo.parents[commonParents] != s.parents[commonParents] { + break + } + } + } + // Pop off any parents that aren't in common with the previous field. + for i := len(s.parents) - 1; i >= commonParents; i-- { + if err := s.p.writeEnd(Name{ + Space: s.xmlns, + Local: s.parents[i], + }); err != nil { + return err + } + } + s.parents = finfo.parents + s.xmlns = xmlns + if commonParents >= len(s.parents) { + // No new elements to push. + return nil + } + if (vf.Kind() == reflect.Ptr || vf.Kind() == reflect.Interface) && vf.IsNil() { + // The element is nil, so no need for the start elements. + s.parents = s.parents[:commonParents] + return nil + } + // Push any new parents required. + for _, name := range s.parents[commonParents:] { + start := &StartElement{ + Name: Name{ + Space: s.xmlns, + Local: name, + }, + } + // Set the default name space for parent elements + // to match what we do with other elements. + if s.xmlns != s.p.defaultNS { + start.setDefaultNamespace() + } + if err := s.p.writeStart(start); err != nil { + return err + } + } + return nil +} + +// A MarshalXMLError is returned when Marshal encounters a type +// that cannot be converted into XML. +type UnsupportedTypeError struct { + Type reflect.Type +} + +func (e *UnsupportedTypeError) Error() string { + return "xml: unsupported type: " + e.Type.String() +} + +func isEmptyValue(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} diff --git a/drives/davServer/internal/xml/marshal_test.go b/drives/davServer/internal/xml/marshal_test.go new file mode 100644 index 00000000..226cfd01 --- /dev/null +++ b/drives/davServer/internal/xml/marshal_test.go @@ -0,0 +1,1939 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "testing" + "time" +) + +type DriveType int + +const ( + HyperDrive DriveType = iota + ImprobabilityDrive +) + +type Passenger struct { + Name []string `xml:"name"` + Weight float32 `xml:"weight"` +} + +type Ship struct { + XMLName struct{} `xml:"spaceship"` + + Name string `xml:"name,attr"` + Pilot string `xml:"pilot,attr"` + Drive DriveType `xml:"drive"` + Age uint `xml:"age"` + Passenger []*Passenger `xml:"passenger"` + secret string +} + +type NamedType string + +type Port struct { + XMLName struct{} `xml:"port"` + Type string `xml:"type,attr,omitempty"` + Comment string `xml:",comment"` + Number string `xml:",chardata"` +} + +type Domain struct { + XMLName struct{} `xml:"domain"` + Country string `xml:",attr,omitempty"` + Name []byte `xml:",chardata"` + Comment []byte `xml:",comment"` +} + +type Book struct { + XMLName struct{} `xml:"book"` + Title string `xml:",chardata"` +} + +type Event struct { + XMLName struct{} `xml:"event"` + Year int `xml:",chardata"` +} + +type Movie struct { + XMLName struct{} `xml:"movie"` + Length uint `xml:",chardata"` +} + +type Pi struct { + XMLName struct{} `xml:"pi"` + Approximation float32 `xml:",chardata"` +} + +type Universe struct { + XMLName struct{} `xml:"universe"` + Visible float64 `xml:",chardata"` +} + +type Particle struct { + XMLName struct{} `xml:"particle"` + HasMass bool `xml:",chardata"` +} + +type Departure struct { + XMLName struct{} `xml:"departure"` + When time.Time `xml:",chardata"` +} + +type SecretAgent struct { + XMLName struct{} `xml:"agent"` + Handle string `xml:"handle,attr"` + Identity string + Obfuscate string `xml:",innerxml"` +} + +type NestedItems struct { + XMLName struct{} `xml:"result"` + Items []string `xml:">item"` + Item1 []string `xml:"Items>item1"` +} + +type NestedOrder struct { + XMLName struct{} `xml:"result"` + Field1 string `xml:"parent>c"` + Field2 string `xml:"parent>b"` + Field3 string `xml:"parent>a"` +} + +type MixedNested struct { + XMLName struct{} `xml:"result"` + A string `xml:"parent1>a"` + B string `xml:"b"` + C string `xml:"parent1>parent2>c"` + D string `xml:"parent1>d"` +} + +type NilTest struct { + A interface{} `xml:"parent1>parent2>a"` + B interface{} `xml:"parent1>b"` + C interface{} `xml:"parent1>parent2>c"` +} + +type Service struct { + XMLName struct{} `xml:"service"` + Domain *Domain `xml:"host>domain"` + Port *Port `xml:"host>port"` + Extra1 interface{} + Extra2 interface{} `xml:"host>extra2"` +} + +var nilStruct *Ship + +type EmbedA struct { + EmbedC + EmbedB EmbedB + FieldA string +} + +type EmbedB struct { + FieldB string + *EmbedC +} + +type EmbedC struct { + FieldA1 string `xml:"FieldA>A1"` + FieldA2 string `xml:"FieldA>A2"` + FieldB string + FieldC string +} + +type NameCasing struct { + XMLName struct{} `xml:"casing"` + Xy string + XY string + XyA string `xml:"Xy,attr"` + XYA string `xml:"XY,attr"` +} + +type NamePrecedence struct { + XMLName Name `xml:"Parent"` + FromTag XMLNameWithoutTag `xml:"InTag"` + FromNameVal XMLNameWithoutTag + FromNameTag XMLNameWithTag + InFieldName string +} + +type XMLNameWithTag struct { + XMLName Name `xml:"InXMLNameTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithNSTag struct { + XMLName Name `xml:"ns InXMLNameWithNSTag"` + Value string `xml:",chardata"` +} + +type XMLNameWithoutTag struct { + XMLName Name + Value string `xml:",chardata"` +} + +type NameInField struct { + Foo Name `xml:"ns foo"` +} + +type AttrTest struct { + Int int `xml:",attr"` + Named int `xml:"int,attr"` + Float float64 `xml:",attr"` + Uint8 uint8 `xml:",attr"` + Bool bool `xml:",attr"` + Str string `xml:",attr"` + Bytes []byte `xml:",attr"` +} + +type OmitAttrTest struct { + Int int `xml:",attr,omitempty"` + Named int `xml:"int,attr,omitempty"` + Float float64 `xml:",attr,omitempty"` + Uint8 uint8 `xml:",attr,omitempty"` + Bool bool `xml:",attr,omitempty"` + Str string `xml:",attr,omitempty"` + Bytes []byte `xml:",attr,omitempty"` +} + +type OmitFieldTest struct { + Int int `xml:",omitempty"` + Named int `xml:"int,omitempty"` + Float float64 `xml:",omitempty"` + Uint8 uint8 `xml:",omitempty"` + Bool bool `xml:",omitempty"` + Str string `xml:",omitempty"` + Bytes []byte `xml:",omitempty"` + Ptr *PresenceTest `xml:",omitempty"` +} + +type AnyTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField AnyHolder `xml:",any"` +} + +type AnyOmitTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField *AnyHolder `xml:",any,omitempty"` +} + +type AnySliceTest struct { + XMLName struct{} `xml:"a"` + Nested string `xml:"nested>value"` + AnyField []AnyHolder `xml:",any"` +} + +type AnyHolder struct { + XMLName Name + XML string `xml:",innerxml"` +} + +type RecurseA struct { + A string + B *RecurseB +} + +type RecurseB struct { + A *RecurseA + B string +} + +type PresenceTest struct { + Exists *struct{} +} + +type IgnoreTest struct { + PublicSecret string `xml:"-"` +} + +type MyBytes []byte + +type Data struct { + Bytes []byte + Attr []byte `xml:",attr"` + Custom MyBytes +} + +type Plain struct { + V interface{} +} + +type MyInt int + +type EmbedInt struct { + MyInt +} + +type Strings struct { + X []string `xml:"A>B,omitempty"` +} + +type PointerFieldsTest struct { + XMLName Name `xml:"dummy"` + Name *string `xml:"name,attr"` + Age *uint `xml:"age,attr"` + Empty *string `xml:"empty,attr"` + Contents *string `xml:",chardata"` +} + +type ChardataEmptyTest struct { + XMLName Name `xml:"test"` + Contents *string `xml:",chardata"` +} + +type MyMarshalerTest struct { +} + +var _ Marshaler = (*MyMarshalerTest)(nil) + +func (m *MyMarshalerTest) MarshalXML(e *Encoder, start StartElement) error { + e.EncodeToken(start) + e.EncodeToken(CharData([]byte("hello world"))) + e.EncodeToken(EndElement{start.Name}) + return nil +} + +type MyMarshalerAttrTest struct{} + +var _ MarshalerAttr = (*MyMarshalerAttrTest)(nil) + +func (m *MyMarshalerAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MyMarshalerValueAttrTest struct{} + +var _ MarshalerAttr = MyMarshalerValueAttrTest{} + +func (m MyMarshalerValueAttrTest) MarshalXMLAttr(name Name) (Attr, error) { + return Attr{name, "hello world"}, nil +} + +type MarshalerStruct struct { + Foo MyMarshalerAttrTest `xml:",attr"` +} + +type MarshalerValueStruct struct { + Foo MyMarshalerValueAttrTest `xml:",attr"` +} + +type InnerStruct struct { + XMLName Name `xml:"testns outer"` +} + +type OuterStruct struct { + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterNamedStruct struct { + InnerStruct + XMLName Name `xml:"outerns test"` + IntAttr int `xml:"int,attr"` +} + +type OuterNamedOrderedStruct struct { + XMLName Name `xml:"outerns test"` + InnerStruct + IntAttr int `xml:"int,attr"` +} + +type OuterOuterStruct struct { + OuterStruct +} + +type NestedAndChardata struct { + AB []string `xml:"A>B"` + Chardata string `xml:",chardata"` +} + +type NestedAndComment struct { + AB []string `xml:"A>B"` + Comment string `xml:",comment"` +} + +type XMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body string +} + +type NamedXMLNSFieldStruct struct { + XMLName struct{} `xml:"testns test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type XMLNSFieldStructWithOmitEmpty struct { + Ns string `xml:"xmlns,attr,omitempty"` + Body string +} + +type NamedXMLNSFieldStructWithEmptyNamespace struct { + XMLName struct{} `xml:"test"` + Ns string `xml:"xmlns,attr"` + Body string +} + +type RecursiveXMLNSFieldStruct struct { + Ns string `xml:"xmlns,attr"` + Body *RecursiveXMLNSFieldStruct `xml:",omitempty"` + Text string `xml:",omitempty"` +} + +func ifaceptr(x interface{}) interface{} { + return &x +} + +var ( + nameAttr = "Sarah" + ageAttr = uint(12) + contentsAttr = "lorem ipsum" +) + +// Unless explicitly stated as such (or *Plain), all of the +// tests below are two-way tests. When introducing new tests, +// please try to make them two-way as well to ensure that +// marshalling and unmarshalling are as symmetrical as feasible. +var marshalTests = []struct { + Value interface{} + ExpectXML string + MarshalOnly bool + UnmarshalOnly bool +}{ + // Test nil marshals to nothing + {Value: nil, ExpectXML: ``, MarshalOnly: true}, + {Value: nilStruct, ExpectXML: ``, MarshalOnly: true}, + + // Test value types + {Value: &Plain{true}, ExpectXML: `true`}, + {Value: &Plain{false}, ExpectXML: `false`}, + {Value: &Plain{int(42)}, ExpectXML: `42`}, + {Value: &Plain{int8(42)}, ExpectXML: `42`}, + {Value: &Plain{int16(42)}, ExpectXML: `42`}, + {Value: &Plain{int32(42)}, ExpectXML: `42`}, + {Value: &Plain{uint(42)}, ExpectXML: `42`}, + {Value: &Plain{uint8(42)}, ExpectXML: `42`}, + {Value: &Plain{uint16(42)}, ExpectXML: `42`}, + {Value: &Plain{uint32(42)}, ExpectXML: `42`}, + {Value: &Plain{float32(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{float64(1.25)}, ExpectXML: `1.25`}, + {Value: &Plain{uintptr(0xFFDD)}, ExpectXML: `65501`}, + {Value: &Plain{"gopher"}, ExpectXML: `gopher`}, + {Value: &Plain{[]byte("gopher")}, ExpectXML: `gopher`}, + {Value: &Plain{""}, ExpectXML: `</>`}, + {Value: &Plain{[]byte("")}, ExpectXML: `</>`}, + {Value: &Plain{[3]byte{'<', '/', '>'}}, ExpectXML: `</>`}, + {Value: &Plain{NamedType("potato")}, ExpectXML: `potato`}, + {Value: &Plain{[]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: &Plain{[3]int{1, 2, 3}}, ExpectXML: `123`}, + {Value: ifaceptr(true), MarshalOnly: true, ExpectXML: `true`}, + + // Test time. + { + Value: &Plain{time.Unix(1e9, 123456789).UTC()}, + ExpectXML: `2001-09-09T01:46:40.123456789Z`, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A pointer to struct{} may be used to test for an element's presence. + { + Value: &PresenceTest{new(struct{})}, + ExpectXML: ``, + }, + { + Value: &PresenceTest{}, + ExpectXML: ``, + }, + + // A []byte field is only nil if the element was not found. + { + Value: &Data{}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + { + Value: &Data{Bytes: []byte{}, Custom: MyBytes{}, Attr: []byte{}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Check that []byte works, including named []byte types. + { + Value: &Data{Bytes: []byte("ab"), Custom: MyBytes("cd"), Attr: []byte{'v'}}, + ExpectXML: `abcd`, + }, + + // Test innerxml + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + ExpectXML: `James Bond`, + MarshalOnly: true, + }, + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "James Bond", + }, + ExpectXML: `James Bond`, + UnmarshalOnly: true, + }, + + // Test structs + {Value: &Port{Type: "ssl", Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Number: "443"}, ExpectXML: `443`}, + {Value: &Port{Type: ""}, ExpectXML: ``}, + {Value: &Port{Number: "443", Comment: "https"}, ExpectXML: `443`}, + {Value: &Port{Number: "443", Comment: "add space-"}, ExpectXML: `443`, MarshalOnly: true}, + {Value: &Domain{Name: []byte("google.com&friends")}, ExpectXML: `google.com&friends`}, + {Value: &Domain{Name: []byte("google.com"), Comment: []byte(" &friends ")}, ExpectXML: `google.com`}, + {Value: &Book{Title: "Pride & Prejudice"}, ExpectXML: `Pride & Prejudice`}, + {Value: &Event{Year: -3114}, ExpectXML: `-3114`}, + {Value: &Movie{Length: 13440}, ExpectXML: `13440`}, + {Value: &Pi{Approximation: 3.14159265}, ExpectXML: `3.1415927`}, + {Value: &Universe{Visible: 9.3e13}, ExpectXML: `9.3e+13`}, + {Value: &Particle{HasMass: true}, ExpectXML: `true`}, + {Value: &Departure{When: ParseTime("2013-01-09T00:15:00-09:00")}, ExpectXML: `2013-01-09T00:15:00-09:00`}, + {Value: atomValue, ExpectXML: atomXml}, + { + Value: &Ship{ + Name: "Heart of Gold", + Pilot: "Computer", + Age: 1, + Drive: ImprobabilityDrive, + Passenger: []*Passenger{ + { + Name: []string{"Zaphod", "Beeblebrox"}, + Weight: 7.25, + }, + { + Name: []string{"Trisha", "McMillen"}, + Weight: 5.5, + }, + { + Name: []string{"Ford", "Prefect"}, + Weight: 7, + }, + { + Name: []string{"Arthur", "Dent"}, + Weight: 6.75, + }, + }, + }, + ExpectXML: `` + + `` + strconv.Itoa(int(ImprobabilityDrive)) + `` + + `1` + + `` + + `Zaphod` + + `Beeblebrox` + + `7.25` + + `` + + `` + + `Trisha` + + `McMillen` + + `5.5` + + `` + + `` + + `Ford` + + `Prefect` + + `7` + + `` + + `` + + `Arthur` + + `Dent` + + `6.75` + + `` + + ``, + }, + + // Test a>b + { + Value: &NestedItems{Items: nil, Item1: nil}, + ExpectXML: `` + + `` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{}, Item1: []string{}}, + ExpectXML: `` + + `` + + `` + + ``, + MarshalOnly: true, + }, + { + Value: &NestedItems{Items: nil, Item1: []string{"A"}}, + ExpectXML: `` + + `` + + `A` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: nil}, + ExpectXML: `` + + `` + + `A` + + `B` + + `` + + ``, + }, + { + Value: &NestedItems{Items: []string{"A", "B"}, Item1: []string{"C"}}, + ExpectXML: `` + + `` + + `A` + + `B` + + `C` + + `` + + ``, + }, + { + Value: &NestedOrder{Field1: "C", Field2: "B", Field3: "A"}, + ExpectXML: `` + + `` + + `C` + + `B` + + `A` + + `` + + ``, + }, + { + Value: &NilTest{A: "A", B: nil, C: "C"}, + ExpectXML: `` + + `` + + `A` + + `C` + + `` + + ``, + MarshalOnly: true, // Uses interface{} + }, + { + Value: &MixedNested{A: "A", B: "B", C: "C", D: "D"}, + ExpectXML: `` + + `A` + + `B` + + `` + + `C` + + `D` + + `` + + ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}}, + ExpectXML: `80`, + }, + { + Value: &Service{}, + ExpectXML: ``, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra1: "A", Extra2: "B"}, + ExpectXML: `` + + `80` + + `A` + + `B` + + ``, + MarshalOnly: true, + }, + { + Value: &Service{Port: &Port{Number: "80"}, Extra2: "example"}, + ExpectXML: `` + + `80` + + `example` + + ``, + MarshalOnly: true, + }, + { + Value: &struct { + XMLName struct{} `xml:"space top"` + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + E1 string `xml:"x>e"` + }{ + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + E1: "e1", + }, + ExpectXML: `` + + `abc` + + `` + + `c1` + + `d1` + + `` + + `` + + `e1` + + `` + + ``, + }, + { + Value: &struct { + XMLName Name + A string `xml:"x>a"` + B string `xml:"x>b"` + C string `xml:"space x>c"` + C1 string `xml:"space1 x>c"` + D1 string `xml:"space1 x>d"` + }{ + XMLName: Name{ + Space: "space0", + Local: "top", + }, + A: "a", + B: "b", + C: "c", + C1: "c1", + D1: "d1", + }, + ExpectXML: `` + + `ab` + + `c` + + `` + + `c1` + + `d1` + + `` + + ``, + }, + { + Value: &struct { + XMLName struct{} `xml:"top"` + B string `xml:"space x>b"` + B1 string `xml:"space1 x>b"` + }{ + B: "b", + B1: "b1", + }, + ExpectXML: `` + + `b` + + `b1` + + ``, + }, + + // Test struct embedding + { + Value: &EmbedA{ + EmbedC: EmbedC{ + FieldA1: "", // Shadowed by A.A + FieldA2: "", // Shadowed by A.A + FieldB: "A.C.B", + FieldC: "A.C.C", + }, + EmbedB: EmbedB{ + FieldB: "A.B.B", + EmbedC: &EmbedC{ + FieldA1: "A.B.C.A1", + FieldA2: "A.B.C.A2", + FieldB: "", // Shadowed by A.B.B + FieldC: "A.B.C.C", + }, + }, + FieldA: "A.A", + }, + ExpectXML: `` + + `A.C.B` + + `A.C.C` + + `` + + `A.B.B` + + `` + + `A.B.C.A1` + + `A.B.C.A2` + + `` + + `A.B.C.C` + + `` + + `A.A` + + ``, + }, + + // Test that name casing matters + { + Value: &NameCasing{Xy: "mixed", XY: "upper", XyA: "mixedA", XYA: "upperA"}, + ExpectXML: `mixedupper`, + }, + + // Test the order in which the XML element name is chosen + { + Value: &NamePrecedence{ + FromTag: XMLNameWithoutTag{Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "InXMLName"}, Value: "B"}, + FromNameTag: XMLNameWithTag{Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + MarshalOnly: true, + }, + { + Value: &NamePrecedence{ + XMLName: Name{Local: "Parent"}, + FromTag: XMLNameWithoutTag{XMLName: Name{Local: "InTag"}, Value: "A"}, + FromNameVal: XMLNameWithoutTag{XMLName: Name{Local: "FromNameVal"}, Value: "B"}, + FromNameTag: XMLNameWithTag{XMLName: Name{Local: "InXMLNameTag"}, Value: "C"}, + InFieldName: "D", + }, + ExpectXML: `` + + `A` + + `B` + + `C` + + `D` + + ``, + UnmarshalOnly: true, + }, + + // xml.Name works in a plain field as well. + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + }, + { + Value: &NameInField{Name{Space: "ns", Local: "foo"}}, + ExpectXML: ``, + UnmarshalOnly: true, + }, + + // Marshaling zero xml.Name uses the tag or field name. + { + Value: &NameInField{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // Test attributes + { + Value: &AttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &AttrTest{Bytes: []byte{}}, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + }, + ExpectXML: ``, + }, + { + Value: &OmitAttrTest{}, + ExpectXML: ``, + }, + + // pointer fields + { + Value: &PointerFieldsTest{Name: &nameAttr, Age: &ageAttr, Contents: &contentsAttr}, + ExpectXML: `lorem ipsum`, + MarshalOnly: true, + }, + + // empty chardata pointer field + { + Value: &ChardataEmptyTest{}, + ExpectXML: ``, + MarshalOnly: true, + }, + + // omitempty on fields + { + Value: &OmitFieldTest{ + Int: 8, + Named: 9, + Float: 23.5, + Uint8: 255, + Bool: true, + Str: "str", + Bytes: []byte("byt"), + Ptr: &PresenceTest{}, + }, + ExpectXML: `` + + `8` + + `9` + + `23.5` + + `255` + + `true` + + `str` + + `byt` + + `` + + ``, + }, + { + Value: &OmitFieldTest{}, + ExpectXML: ``, + }, + + // Test ",any" + { + ExpectXML: `knownunknown`, + Value: &AnyTest{ + Nested: "known", + AnyField: AnyHolder{ + XMLName: Name{Local: "other"}, + XML: "unknown", + }, + }, + }, + { + Value: &AnyTest{Nested: "known", + AnyField: AnyHolder{ + XML: "", + XMLName: Name{Local: "AnyField"}, + }, + }, + ExpectXML: `known`, + }, + { + ExpectXML: `b`, + Value: &AnyOmitTest{ + Nested: "b", + }, + }, + { + ExpectXML: `bei`, + Value: &AnySliceTest{ + Nested: "b", + AnyField: []AnyHolder{ + { + XMLName: Name{Local: "c"}, + XML: "e", + }, + { + XMLName: Name{Space: "f", Local: "g"}, + XML: "i", + }, + }, + }, + }, + { + ExpectXML: `b`, + Value: &AnySliceTest{ + Nested: "b", + }, + }, + + // Test recursive types. + { + Value: &RecurseA{ + A: "a1", + B: &RecurseB{ + A: &RecurseA{"a2", nil}, + B: "b1", + }, + }, + ExpectXML: `a1a2b1`, + }, + + // Test ignoring fields via "-" tag + { + ExpectXML: ``, + Value: &IgnoreTest{}, + }, + { + ExpectXML: ``, + Value: &IgnoreTest{PublicSecret: "can't tell"}, + MarshalOnly: true, + }, + { + ExpectXML: `ignore me`, + Value: &IgnoreTest{}, + UnmarshalOnly: true, + }, + + // Test escaping. + { + ExpectXML: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + Value: &AnyTest{ + Nested: `dquote: "; squote: '; ampersand: &; less: <; greater: >;`, + AnyField: AnyHolder{XMLName: Name{Local: "empty"}}, + }, + }, + { + ExpectXML: `newline: ; cr: ; tab: ;`, + Value: &AnyTest{ + Nested: "newline: \n; cr: \r; tab: \t;", + AnyField: AnyHolder{XMLName: Name{Local: "AnyField"}}, + }, + }, + { + ExpectXML: "1\r2\r\n3\n\r4\n5", + Value: &AnyTest{ + Nested: "1\n2\n3\n\n4\n5", + }, + UnmarshalOnly: true, + }, + { + ExpectXML: `42`, + Value: &EmbedInt{ + MyInt: 42, + }, + }, + // Test omitempty with parent chain; see golang.org/issue/4168. + { + ExpectXML: ``, + Value: &Strings{}, + }, + // Custom marshalers. + { + ExpectXML: `hello world`, + Value: &MyMarshalerTest{}, + }, + { + ExpectXML: ``, + Value: &MarshalerStruct{}, + }, + { + ExpectXML: ``, + Value: &MarshalerValueStruct{}, + }, + { + ExpectXML: ``, + Value: &OuterStruct{IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterNamedOrderedStruct{XMLName: Name{Space: "outerns", Local: "test"}, IntAttr: 10}, + }, + { + ExpectXML: ``, + Value: &OuterOuterStruct{OuterStruct{IntAttr: 10}}, + }, + { + ExpectXML: `test`, + Value: &NestedAndChardata{AB: make([]string, 2), Chardata: "test"}, + }, + { + ExpectXML: ``, + Value: &NestedAndComment{AB: make([]string, 2), Comment: "test"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "http://example.com/ns", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStruct{Ns: "", Body: "hello world"}, + }, + { + ExpectXML: `hello world`, + Value: &XMLNSFieldStructWithOmitEmpty{Body: "hello world"}, + }, + { + // The xmlns attribute must be ignored because the + // element is in the empty namespace, so it's not possible + // to set the default namespace to something non-empty. + ExpectXML: `hello world`, + Value: &NamedXMLNSFieldStructWithEmptyNamespace{Ns: "foo", Body: "hello world"}, + MarshalOnly: true, + }, + { + ExpectXML: `hello world`, + Value: &RecursiveXMLNSFieldStruct{ + Ns: "foo", + Body: &RecursiveXMLNSFieldStruct{ + Text: "hello world", + }, + }, + }, +} + +func TestMarshal(t *testing.T) { + for idx, test := range marshalTests { + if test.UnmarshalOnly { + continue + } + data, err := Marshal(test.Value) + if err != nil { + t.Errorf("#%d: marshal(%#v): %s", idx, test.Value, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + if strings.Contains(want, "\n") { + t.Errorf("#%d: marshal(%#v):\nHAVE:\n%s\nWANT:\n%s", idx, test.Value, got, want) + } else { + t.Errorf("#%d: marshal(%#v):\nhave %#q\nwant %#q", idx, test.Value, got, want) + } + } + } +} + +type AttrParent struct { + X string `xml:"X>Y,attr"` +} + +type BadAttr struct { + Name []string `xml:"name,attr"` +} + +var marshalErrorTests = []struct { + Value interface{} + Err string + Kind reflect.Kind +}{ + { + Value: make(chan bool), + Err: "xml: unsupported type: chan bool", + Kind: reflect.Chan, + }, + { + Value: map[string]string{ + "question": "What do you get when you multiply six by nine?", + "answer": "42", + }, + Err: "xml: unsupported type: map[string]string", + Kind: reflect.Map, + }, + { + Value: map[*Ship]bool{nil: false}, + Err: "xml: unsupported type: map[*xml.Ship]bool", + Kind: reflect.Map, + }, + { + Value: &Domain{Comment: []byte("f--bar")}, + Err: `xml: comments must not contain "--"`, + }, + // Reject parent chain with attr, never worked; see golang.org/issue/5033. + { + Value: &AttrParent{}, + Err: `xml: X>Y chain not valid with attr flag`, + }, + { + Value: BadAttr{[]string{"X", "Y"}}, + Err: `xml: unsupported type: []string`, + }, +} + +var marshalIndentTests = []struct { + Value interface{} + Prefix string + Indent string + ExpectXML string +}{ + { + Value: &SecretAgent{ + Handle: "007", + Identity: "James Bond", + Obfuscate: "", + }, + Prefix: "", + Indent: "\t", + ExpectXML: fmt.Sprintf("\n\tJames Bond\n"), + }, +} + +func TestMarshalErrors(t *testing.T) { + for idx, test := range marshalErrorTests { + data, err := Marshal(test.Value) + if err == nil { + t.Errorf("#%d: marshal(%#v) = [success] %q, want error %v", idx, test.Value, data, test.Err) + continue + } + if err.Error() != test.Err { + t.Errorf("#%d: marshal(%#v) = [error] %v, want %v", idx, test.Value, err, test.Err) + } + if test.Kind != reflect.Invalid { + if kind := err.(*UnsupportedTypeError).Type.Kind(); kind != test.Kind { + t.Errorf("#%d: marshal(%#v) = [error kind] %s, want %s", idx, test.Value, kind, test.Kind) + } + } + } +} + +// Do invertibility testing on the various structures that we test +func TestUnmarshal(t *testing.T) { + for i, test := range marshalTests { + if test.MarshalOnly { + continue + } + if _, ok := test.Value.(*Plain); ok { + continue + } + vt := reflect.TypeOf(test.Value) + dest := reflect.New(vt.Elem()).Interface() + err := Unmarshal([]byte(test.ExpectXML), dest) + + switch fix := dest.(type) { + case *Feed: + fix.Author.InnerXML = "" + for i := range fix.Entry { + fix.Entry[i].Author.InnerXML = "" + } + } + + if err != nil { + t.Errorf("#%d: unexpected error: %#v", i, err) + } else if got, want := dest, test.Value; !reflect.DeepEqual(got, want) { + t.Errorf("#%d: unmarshal(%q):\nhave %#v\nwant %#v", i, test.ExpectXML, got, want) + } + } +} + +func TestMarshalIndent(t *testing.T) { + for i, test := range marshalIndentTests { + data, err := MarshalIndent(test.Value, test.Prefix, test.Indent) + if err != nil { + t.Errorf("#%d: Error: %s", i, err) + continue + } + if got, want := string(data), test.ExpectXML; got != want { + t.Errorf("#%d: MarshalIndent:\nGot:%s\nWant:\n%s", i, got, want) + } + } +} + +type limitedBytesWriter struct { + w io.Writer + remain int // until writes fail +} + +func (lw *limitedBytesWriter) Write(p []byte) (n int, err error) { + if lw.remain <= 0 { + println("error") + return 0, errors.New("write limit hit") + } + if len(p) > lw.remain { + p = p[:lw.remain] + n, _ = lw.w.Write(p) + lw.remain = 0 + return n, errors.New("write limit hit") + } + n, err = lw.w.Write(p) + lw.remain -= n + return n, err +} + +func TestMarshalWriteErrors(t *testing.T) { + var buf bytes.Buffer + const writeCap = 1024 + w := &limitedBytesWriter{&buf, writeCap} + enc := NewEncoder(w) + var err error + var i int + const n = 4000 + for i = 1; i <= n; i++ { + err = enc.Encode(&Passenger{ + Name: []string{"Alice", "Bob"}, + Weight: 5, + }) + if err != nil { + break + } + } + if err == nil { + t.Error("expected an error") + } + if i == n { + t.Errorf("expected to fail before the end") + } + if buf.Len() != writeCap { + t.Errorf("buf.Len() = %d; want %d", buf.Len(), writeCap) + } +} + +func TestMarshalWriteIOErrors(t *testing.T) { + enc := NewEncoder(errWriter{}) + + expectErr := "unwritable" + err := enc.Encode(&Passenger{}) + if err == nil || err.Error() != expectErr { + t.Errorf("EscapeTest = [error] %v, want %v", err, expectErr) + } +} + +func TestMarshalFlush(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(CharData("hello world")); err != nil { + t.Fatalf("enc.EncodeToken: %v", err) + } + if buf.Len() > 0 { + t.Fatalf("enc.EncodeToken caused actual write: %q", buf.Bytes()) + } + if err := enc.Flush(); err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if buf.String() != "hello world" { + t.Fatalf("after enc.Flush, buf.String() = %q, want %q", buf.String(), "hello world") + } +} + +var encodeElementTests = []struct { + desc string + value interface{} + start StartElement + expectXML string +}{{ + desc: "simple string", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + }, + expectXML: `hello`, +}, { + desc: "string with added attributes", + value: "hello", + start: StartElement{ + Name: Name{Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "x"}, + Value: "y", + }, { + Name: Name{Local: "foo"}, + Value: "bar", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element with default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "start element in name space with different default name space", + value: struct { + Foo XMLNameWithNSTag + }{ + Foo: XMLNameWithNSTag{ + Value: "hello", + }, + }, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello`, +}, { + desc: "XMLMarshaler with start element with default name space", + value: &MyMarshalerTest{}, + start: StartElement{ + Name: Name{Space: "ns2", Local: "a"}, + Attr: []Attr{{ + Name: Name{Local: "xmlns"}, + // "ns" is the name space defined in XMLNameWithNSTag + Value: "ns", + }}, + }, + expectXML: `hello world`, +}} + +func TestEncodeElement(t *testing.T) { + for idx, test := range encodeElementTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + err := enc.EncodeElement(test.value, test.start) + if err != nil { + t.Fatalf("enc.EncodeElement: %v", err) + } + err = enc.Flush() + if err != nil { + t.Fatalf("enc.Flush: %v", err) + } + if got, want := buf.String(), test.expectXML; got != want { + t.Errorf("#%d(%s): EncodeElement(%#v, %#v):\nhave %#q\nwant %#q", idx, test.desc, test.value, test.start, got, want) + } + } +} + +func BenchmarkMarshal(b *testing.B) { + b.ReportAllocs() + for i := 0; i < b.N; i++ { + Marshal(atomValue) + } +} + +func BenchmarkUnmarshal(b *testing.B) { + b.ReportAllocs() + xml := []byte(atomXml) + for i := 0; i < b.N; i++ { + Unmarshal(xml, &Feed{}) + } +} + +// golang.org/issue/6556 +func TestStructPointerMarshal(t *testing.T) { + type A struct { + XMLName string `xml:"a"` + B []interface{} + } + type C struct { + XMLName Name + Value string `xml:"value"` + } + + a := new(A) + a.B = append(a.B, &C{ + XMLName: Name{Local: "c"}, + Value: "x", + }) + + b, err := Marshal(a) + if err != nil { + t.Fatal(err) + } + if x := string(b); x != "x" { + t.Fatal(x) + } + var v A + err = Unmarshal(b, &v) + if err != nil { + t.Fatal(err) + } +} + +var encodeTokenTests = []struct { + desc string + toks []Token + want string + err string +}{{ + desc: "start element with name space", + toks: []Token{ + StartElement{Name{"space", "local"}, nil}, + }, + want: ``, +}, { + desc: "start element with no name", + toks: []Token{ + StartElement{Name{"space", ""}, nil}, + }, + err: "xml: start tag with no name", +}, { + desc: "end element with no name", + toks: []Token{ + EndElement{Name{"space", ""}}, + }, + err: "xml: end tag with no name", +}, { + desc: "char data", + toks: []Token{ + CharData("foo"), + }, + want: `foo`, +}, { + desc: "char data with escaped chars", + toks: []Token{ + CharData(" \t\n"), + }, + want: " \n", +}, { + desc: "comment", + toks: []Token{ + Comment("foo"), + }, + want: ``, +}, { + desc: "comment with invalid content", + toks: []Token{ + Comment("foo-->"), + }, + err: "xml: EncodeToken of Comment containing --> marker", +}, { + desc: "proc instruction", + toks: []Token{ + ProcInst{"Target", []byte("Instruction")}, + }, + want: ``, +}, { + desc: "proc instruction with empty target", + toks: []Token{ + ProcInst{"", []byte("Instruction")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "proc instruction with bad content", + toks: []Token{ + ProcInst{"", []byte("Instruction?>")}, + }, + err: "xml: EncodeToken of ProcInst with invalid Target", +}, { + desc: "directive", + toks: []Token{ + Directive("foo"), + }, + want: ``, +}, { + desc: "more complex directive", + toks: []Token{ + Directive("DOCTYPE doc [ '> ]"), + }, + want: `'> ]>`, +}, { + desc: "directive instruction with bad name", + toks: []Token{ + Directive("foo>"), + }, + err: "xml: EncodeToken of Directive containing wrong < or > markers", +}, { + desc: "end tag without start tag", + toks: []Token{ + EndElement{Name{"foo", "bar"}}, + }, + err: "xml: end tag without start tag", +}, { + desc: "mismatching end tag local name", + toks: []Token{ + StartElement{Name{"", "foo"}, nil}, + EndElement{Name{"", "bar"}}, + }, + err: "xml: end tag does not match start tag ", + want: ``, +}, { + desc: "mismatching end tag namespace", + toks: []Token{ + StartElement{Name{"space", "foo"}, nil}, + EndElement{Name{"another", "foo"}}, + }, + err: "xml: end tag in namespace another does not match start tag in namespace space", + want: ``, +}, { + desc: "start element with explicit namespace", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "start element with explicit namespace and colliding prefix", + toks: []Token{ + StartElement{Name{"space", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + {Name{"space", "foo"}, "value"}, + {Name{"x", "bar"}, "other"}, + }}, + }, + want: ``, +}, { + desc: "start element using previously defined namespace", + toks: []Token{ + StartElement{Name{"", "local"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"space", "x"}, "y"}, + }}, + }, + want: ``, +}, { + desc: "nested name space with same prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space1"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space2"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + EndElement{Name{"", "foo"}}, + EndElement{Name{"", "foo"}}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"space1", "a"}, "space1 value"}, + {Name{"space2", "b"}, "space2 value"}, + }}, + }, + want: ``, +}, { + desc: "start element defining several prefixes for the same name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "a"}, "space"}, + {Name{"xmlns", "b"}, "space"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element redefines name space", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element creates alias for default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xmlns", "y"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element defines default name space with existing prefix", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "x"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "a"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element uses empty attribute name space when default ns defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "redefine xmlns", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"foo", "xmlns"}, "space"}, + }}, + }, + err: `xml: cannot redefine xmlns attribute prefix`, +}, { + desc: "xmlns with explicit name space #1", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"xml", "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "xmlns with explicit name space #2", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{xmlURL, "xmlns"}, "space"}, + }}, + }, + want: ``, +}, { + desc: "empty name space declaration is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"xmlns", "foo"}, ""}, + }}, + }, + want: ``, +}, { + desc: "attribute with no name is ignored", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", ""}, "value"}, + }}, + }, + want: ``, +}, { + desc: "namespace URL with non-valid name", + toks: []Token{ + StartElement{Name{"/34", "foo"}, []Attr{ + {Name{"/34", "x"}, "value"}, + }}, + }, + want: `<_:foo xmlns:_="/34" _:x="value">`, +}, { + desc: "nested element resets default namespace to empty", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, ""}, + {Name{"", "x"}, "value"}, + {Name{"space", "x"}, "value"}, + }}, + }, + want: ``, +}, { + desc: "nested element requires empty default name space", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"", "foo"}, nil}, + }, + want: ``, +}, { + desc: "attribute uses name space from xmlns", + toks: []Token{ + StartElement{Name{"some/space", "foo"}, []Attr{ + {Name{"", "attr"}, "value"}, + {Name{"some/space", "other"}, "other value"}, + }}, + }, + want: ``, +}, { + desc: "default name space should not be used by attributes", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"xmlns", "bar"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "default name space not used by attributes, not explicitly defined", + toks: []Token{ + StartElement{Name{"space", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + {Name{"space", "baz"}, "foo"}, + }}, + StartElement{Name{"space", "baz"}, nil}, + EndElement{Name{"space", "baz"}}, + EndElement{Name{"space", "foo"}}, + }, + want: ``, +}, { + desc: "impossible xmlns declaration", + toks: []Token{ + StartElement{Name{"", "foo"}, []Attr{ + {Name{"", "xmlns"}, "space"}, + }}, + StartElement{Name{"space", "bar"}, []Attr{ + {Name{"space", "attr"}, "value"}, + }}, + }, + want: ``, +}} + +func TestEncodeToken(t *testing.T) { +loop: + for i, tt := range encodeTokenTests { + var buf bytes.Buffer + enc := NewEncoder(&buf) + var err error + for j, tok := range tt.toks { + err = enc.EncodeToken(tok) + if err != nil && j < len(tt.toks)-1 { + t.Errorf("#%d %s token #%d: %v", i, tt.desc, j, err) + continue loop + } + } + errorf := func(f string, a ...interface{}) { + t.Errorf("#%d %s token #%d:%s", i, tt.desc, len(tt.toks)-1, fmt.Sprintf(f, a...)) + } + switch { + case tt.err != "" && err == nil: + errorf(" expected error; got none") + continue + case tt.err == "" && err != nil: + errorf(" got error: %v", err) + continue + case tt.err != "" && err != nil && tt.err != err.Error(): + errorf(" error mismatch; got %v, want %v", err, tt.err) + continue + } + if err := enc.Flush(); err != nil { + errorf(" %v", err) + continue + } + if got := buf.String(); got != tt.want { + errorf("\ngot %v\nwant %v", got, tt.want) + continue + } + } +} + +func TestProcInstEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to encode xml target ProcInst as first token, %s", err) + } + + if err := enc.EncodeToken(ProcInst{"Target", []byte("Instruction")}); err != nil { + t.Fatalf("enc.EncodeToken: expected to be able to add non-xml target ProcInst") + } + + if err := enc.EncodeToken(ProcInst{"xml", []byte("Instruction")}); err == nil { + t.Fatalf("enc.EncodeToken: expected to not be allowed to encode xml target ProcInst when not first token") + } +} + +func TestDecodeEncode(t *testing.T) { + var in, out bytes.Buffer + in.WriteString(` + + + +`) + dec := NewDecoder(&in) + enc := NewEncoder(&out) + for tok, err := dec.Token(); err == nil; tok, err = dec.Token() { + err = enc.EncodeToken(tok) + if err != nil { + t.Fatalf("enc.EncodeToken: Unable to encode token (%#v), %v", tok, err) + } + } +} + +// Issue 9796. Used to fail with GORACE="halt_on_error=1" -race. +func TestRace9796(t *testing.T) { + type A struct{} + type B struct { + C []A `xml:"X>Y"` + } + var wg sync.WaitGroup + for i := 0; i < 2; i++ { + wg.Add(1) + go func() { + Marshal(B{[]A{{}}}) + wg.Done() + }() + } + wg.Wait() +} + +func TestIsValidDirective(t *testing.T) { + testOK := []string{ + "<>", + "< < > >", + "' '>' >", + " ]>", + " '<' ' doc ANY> ]>", + ">>> a < comment --> [ ] >", + } + testKO := []string{ + "<", + ">", + "", + "< > > < < >", + " -->", + "", + "'", + "", + } + for _, s := range testOK { + if !isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be valid", s) + } + } + for _, s := range testKO { + if isValidDirective(Directive(s)) { + t.Errorf("Directive %q is expected to be invalid", s) + } + } +} + +// Issue 11719. EncodeToken used to silently eat tokens with an invalid type. +func TestSimpleUseOfEncodeToken(t *testing.T) { + var buf bytes.Buffer + enc := NewEncoder(&buf) + if err := enc.EncodeToken(&StartElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(&EndElement{Name: Name{"", "object1"}}); err == nil { + t.Errorf("enc.EncodeToken: pointer type should be rejected") + } + if err := enc.EncodeToken(StartElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: StartElement %s", err) + } + if err := enc.EncodeToken(EndElement{Name: Name{"", "object2"}}); err != nil { + t.Errorf("enc.EncodeToken: EndElement %s", err) + } + if err := enc.EncodeToken(Universe{}); err == nil { + t.Errorf("enc.EncodeToken: invalid type not caught") + } + if err := enc.Flush(); err != nil { + t.Errorf("enc.Flush: %s", err) + } + if buf.Len() == 0 { + t.Errorf("enc.EncodeToken: empty buffer") + } + want := "" + if buf.String() != want { + t.Errorf("enc.EncodeToken: expected %q; got %q", want, buf.String()) + } +} diff --git a/drives/davServer/internal/xml/read.go b/drives/davServer/internal/xml/read.go new file mode 100644 index 00000000..bfaef6f1 --- /dev/null +++ b/drives/davServer/internal/xml/read.go @@ -0,0 +1,691 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" +) + +// BUG(rsc): Mapping between XML elements and data structures is inherently flawed: +// an XML element is an order-dependent collection of anonymous +// values, while a data structure is an order-independent collection +// of named values. +// See package json for a textual representation more suitable +// to data structures. + +// Unmarshal parses the XML-encoded data and stores the result in +// the value pointed to by v, which must be an arbitrary struct, +// slice, or string. Well-formed data that does not fit into v is +// discarded. +// +// Because Unmarshal uses the reflect package, it can only assign +// to exported (upper case) fields. Unmarshal uses a case-sensitive +// comparison to match XML element names to tag values and struct +// field names. +// +// Unmarshal maps an XML element to a struct using the following rules. +// In the rules, the tag of a field refers to the value associated with the +// key 'xml' in the struct field's tag (see the example above). +// +// - If the struct has a field of type []byte or string with tag +// ",innerxml", Unmarshal accumulates the raw XML nested inside the +// element in that field. The rest of the rules still apply. +// +// - If the struct has a field named XMLName of type xml.Name, +// Unmarshal records the element name in that field. +// +// - If the XMLName field has an associated tag of the form +// "name" or "namespace-URL name", the XML element must have +// the given name (and, optionally, name space) or else Unmarshal +// returns an error. +// +// - If the XML element has an attribute whose name matches a +// struct field name with an associated tag containing ",attr" or +// the explicit name in a struct field tag of the form "name,attr", +// Unmarshal records the attribute value in that field. +// +// - If the XML element contains character data, that data is +// accumulated in the first struct field that has tag ",chardata". +// The struct field may have type []byte or string. +// If there is no such field, the character data is discarded. +// +// - If the XML element contains comments, they are accumulated in +// the first struct field that has tag ",comment". The struct +// field may have type []byte or string. If there is no such +// field, the comments are discarded. +// +// - If the XML element contains a sub-element whose name matches +// the prefix of a tag formatted as "a" or "a>b>c", unmarshal +// will descend into the XML structure looking for elements with the +// given names, and will map the innermost elements to that struct +// field. A tag starting with ">" is equivalent to one starting +// with the field name followed by ">". +// +// - If the XML element contains a sub-element whose name matches +// a struct field's XMLName tag and the struct field has no +// explicit name tag as per the previous rule, unmarshal maps +// the sub-element to that struct field. +// +// - If the XML element contains a sub-element whose name matches a +// field without any mode flags (",attr", ",chardata", etc), Unmarshal +// maps the sub-element to that struct field. +// +// - If the XML element contains a sub-element that hasn't matched any +// of the above rules and the struct has a field with tag ",any", +// unmarshal maps the sub-element to that struct field. +// +// - An anonymous struct field is handled as if the fields of its +// value were part of the outer struct. +// +// - A struct field with tag "-" is never unmarshalled into. +// +// Unmarshal maps an XML element to a string or []byte by saving the +// concatenation of that element's character data in the string or +// []byte. The saved []byte is never nil. +// +// Unmarshal maps an attribute value to a string or []byte by saving +// the value in the string or slice. +// +// Unmarshal maps an XML element to a slice by extending the length of +// the slice and mapping the element to the newly created value. +// +// Unmarshal maps an XML element or attribute value to a bool by +// setting it to the boolean value represented by the string. +// +// Unmarshal maps an XML element or attribute value to an integer or +// floating-point field by setting the field to the result of +// interpreting the string value in decimal. There is no check for +// overflow. +// +// Unmarshal maps an XML element to an xml.Name by recording the +// element name. +// +// Unmarshal maps an XML element to a pointer by setting the pointer +// to a freshly allocated value and then mapping the element to that value. +func Unmarshal(data []byte, v interface{}) error { + return NewDecoder(bytes.NewReader(data)).Decode(v) +} + +// Decode works like xml.Unmarshal, except it reads the decoder +// stream to find the start element. +func (d *Decoder) Decode(v interface{}) error { + return d.DecodeElement(v, nil) +} + +// DecodeElement works like xml.Unmarshal except that it takes +// a pointer to the start XML element to decode into v. +// It is useful when a client reads some raw XML tokens itself +// but also wants to defer to Unmarshal for some elements. +func (d *Decoder) DecodeElement(v interface{}, start *StartElement) error { + val := reflect.ValueOf(v) + if val.Kind() != reflect.Ptr { + return errors.New("non-pointer passed to Unmarshal") + } + return d.unmarshal(val.Elem(), start) +} + +// An UnmarshalError represents an error in the unmarshalling process. +type UnmarshalError string + +func (e UnmarshalError) Error() string { return string(e) } + +// Unmarshaler is the interface implemented by objects that can unmarshal +// an XML element description of themselves. +// +// UnmarshalXML decodes a single XML element +// beginning with the given start element. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXML must consume exactly one XML element. +// One common implementation strategy is to unmarshal into +// a separate value with a layout matching the expected XML +// using d.DecodeElement, and then to copy the data from +// that value into the receiver. +// Another common strategy is to use d.Token to process the +// XML object one token at a time. +// UnmarshalXML may not use d.RawToken. +type Unmarshaler interface { + UnmarshalXML(d *Decoder, start StartElement) error +} + +// UnmarshalerAttr is the interface implemented by objects that can unmarshal +// an XML attribute description of themselves. +// +// UnmarshalXMLAttr decodes a single XML attribute. +// If it returns an error, the outer call to Unmarshal stops and +// returns that error. +// UnmarshalXMLAttr is used only for struct fields with the +// "attr" option in the field tag. +type UnmarshalerAttr interface { + UnmarshalXMLAttr(attr Attr) error +} + +// receiverType returns the receiver type to use in an expression like "%s.MethodName". +func receiverType(val interface{}) string { + t := reflect.TypeOf(val) + if t.Name() != "" { + return t.String() + } + return "(" + t.String() + ")" +} + +// unmarshalInterface unmarshals a single XML element into val. +// start is the opening tag of the element. +func (p *Decoder) unmarshalInterface(val Unmarshaler, start *StartElement) error { + // Record that decoder must stop at end tag corresponding to start. + p.pushEOF() + + p.unmarshalDepth++ + err := val.UnmarshalXML(p, *start) + p.unmarshalDepth-- + if err != nil { + p.popEOF() + return err + } + + if !p.popEOF() { + return fmt.Errorf("xml: %s.UnmarshalXML did not consume entire <%s> element", receiverType(val), start.Name.Local) + } + + return nil +} + +// unmarshalTextInterface unmarshals a single XML element into val. +// The chardata contained in the element (but not its children) +// is passed to the text unmarshaler. +func (p *Decoder) unmarshalTextInterface(val encoding.TextUnmarshaler, start *StartElement) error { + var buf []byte + depth := 1 + for depth > 0 { + t, err := p.Token() + if err != nil { + return err + } + switch t := t.(type) { + case CharData: + if depth == 1 { + buf = append(buf, t...) + } + case StartElement: + depth++ + case EndElement: + depth-- + } + } + return val.UnmarshalText(buf) +} + +// unmarshalAttr unmarshals a single XML attribute into val. +func (p *Decoder) unmarshalAttr(val reflect.Value, attr Attr) error { + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerAttrType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerAttrType) { + return pv.Interface().(UnmarshalerAttr).UnmarshalXMLAttr(attr) + } + } + + // Not an UnmarshalerAttr; try encoding.TextUnmarshaler. + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return val.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return pv.Interface().(encoding.TextUnmarshaler).UnmarshalText([]byte(attr.Value)) + } + } + + copyValue(val, []byte(attr.Value)) + return nil +} + +var ( + unmarshalerType = reflect.TypeOf((*Unmarshaler)(nil)).Elem() + unmarshalerAttrType = reflect.TypeOf((*UnmarshalerAttr)(nil)).Elem() + textUnmarshalerType = reflect.TypeOf((*encoding.TextUnmarshaler)(nil)).Elem() +) + +// Unmarshal a single XML element into val. +func (p *Decoder) unmarshal(val reflect.Value, start *StartElement) error { + // Find start element if we need it. + if start == nil { + for { + tok, err := p.Token() + if err != nil { + return err + } + if t, ok := tok.(StartElement); ok { + start = &t + break + } + } + } + + // Load value from interface, but only if the result will be + // usefully addressable. + if val.Kind() == reflect.Interface && !val.IsNil() { + e := val.Elem() + if e.Kind() == reflect.Ptr && !e.IsNil() { + val = e + } + } + + if val.Kind() == reflect.Ptr { + if val.IsNil() { + val.Set(reflect.New(val.Type().Elem())) + } + val = val.Elem() + } + + if val.CanInterface() && val.Type().Implements(unmarshalerType) { + // This is an unmarshaler with a non-pointer receiver, + // so it's likely to be incorrect, but we do what we're told. + return p.unmarshalInterface(val.Interface().(Unmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(unmarshalerType) { + return p.unmarshalInterface(pv.Interface().(Unmarshaler), start) + } + } + + if val.CanInterface() && val.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(val.Interface().(encoding.TextUnmarshaler), start) + } + + if val.CanAddr() { + pv := val.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + return p.unmarshalTextInterface(pv.Interface().(encoding.TextUnmarshaler), start) + } + } + + var ( + data []byte + saveData reflect.Value + comment []byte + saveComment reflect.Value + saveXML reflect.Value + saveXMLIndex int + saveXMLData []byte + saveAny reflect.Value + sv reflect.Value + tinfo *typeInfo + err error + ) + + switch v := val; v.Kind() { + default: + return errors.New("unknown type " + v.Type().String()) + + case reflect.Interface: + // TODO: For now, simply ignore the field. In the near + // future we may choose to unmarshal the start + // element on it, if not nil. + return p.Skip() + + case reflect.Slice: + typ := v.Type() + if typ.Elem().Kind() == reflect.Uint8 { + // []byte + saveData = v + break + } + + // Slice of element values. + // Grow slice. + n := v.Len() + if n >= v.Cap() { + ncap := 2 * n + if ncap < 4 { + ncap = 4 + } + new := reflect.MakeSlice(typ, n, ncap) + reflect.Copy(new, v) + v.Set(new) + } + v.SetLen(n + 1) + + // Recur to read element into slice. + if err := p.unmarshal(v.Index(n), start); err != nil { + v.SetLen(n) + return err + } + return nil + + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.String: + saveData = v + + case reflect.Struct: + typ := v.Type() + if typ == nameType { + v.Set(reflect.ValueOf(start.Name)) + break + } + + sv = v + tinfo, err = getTypeInfo(typ) + if err != nil { + return err + } + + // Validate and assign element name. + if tinfo.xmlname != nil { + finfo := tinfo.xmlname + if finfo.name != "" && finfo.name != start.Name.Local { + return UnmarshalError("expected element type <" + finfo.name + "> but have <" + start.Name.Local + ">") + } + if finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + e := "expected element <" + finfo.name + "> in name space " + finfo.xmlns + " but have " + if start.Name.Space == "" { + e += "no name space" + } else { + e += start.Name.Space + } + return UnmarshalError(e) + } + fv := finfo.value(sv) + if _, ok := fv.Interface().(Name); ok { + fv.Set(reflect.ValueOf(start.Name)) + } + } + + // Assign attributes. + // Also, determine whether we need to save character data or comments. + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + switch finfo.flags & fMode { + case fAttr: + strv := finfo.value(sv) + // Look for attribute. + for _, a := range start.Attr { + if a.Name.Local == finfo.name && (finfo.xmlns == "" || finfo.xmlns == a.Name.Space) { + if err := p.unmarshalAttr(strv, a); err != nil { + return err + } + break + } + } + + case fCharData: + if !saveData.IsValid() { + saveData = finfo.value(sv) + } + + case fComment: + if !saveComment.IsValid() { + saveComment = finfo.value(sv) + } + + case fAny, fAny | fElement: + if !saveAny.IsValid() { + saveAny = finfo.value(sv) + } + + case fInnerXml: + if !saveXML.IsValid() { + saveXML = finfo.value(sv) + if p.saved == nil { + saveXMLIndex = 0 + p.saved = new(bytes.Buffer) + } else { + saveXMLIndex = p.savedOffset() + } + } + } + } + } + + // Find end element. + // Process sub-elements along the way. +Loop: + for { + var savedOffset int + if saveXML.IsValid() { + savedOffset = p.savedOffset() + } + tok, err := p.Token() + if err != nil { + return err + } + switch t := tok.(type) { + case StartElement: + consumed := false + if sv.IsValid() { + consumed, err = p.unmarshalPath(tinfo, sv, nil, &t) + if err != nil { + return err + } + if !consumed && saveAny.IsValid() { + consumed = true + if err := p.unmarshal(saveAny, &t); err != nil { + return err + } + } + } + if !consumed { + if err := p.Skip(); err != nil { + return err + } + } + + case EndElement: + if saveXML.IsValid() { + saveXMLData = p.saved.Bytes()[saveXMLIndex:savedOffset] + if saveXMLIndex == 0 { + p.saved = nil + } + } + break Loop + + case CharData: + if saveData.IsValid() { + data = append(data, t...) + } + + case Comment: + if saveComment.IsValid() { + comment = append(comment, t...) + } + } + } + + if saveData.IsValid() && saveData.CanInterface() && saveData.Type().Implements(textUnmarshalerType) { + if err := saveData.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + + if saveData.IsValid() && saveData.CanAddr() { + pv := saveData.Addr() + if pv.CanInterface() && pv.Type().Implements(textUnmarshalerType) { + if err := pv.Interface().(encoding.TextUnmarshaler).UnmarshalText(data); err != nil { + return err + } + saveData = reflect.Value{} + } + } + + if err := copyValue(saveData, data); err != nil { + return err + } + + switch t := saveComment; t.Kind() { + case reflect.String: + t.SetString(string(comment)) + case reflect.Slice: + t.Set(reflect.ValueOf(comment)) + } + + switch t := saveXML; t.Kind() { + case reflect.String: + t.SetString(string(saveXMLData)) + case reflect.Slice: + t.Set(reflect.ValueOf(saveXMLData)) + } + + return nil +} + +func copyValue(dst reflect.Value, src []byte) (err error) { + dst0 := dst + + if dst.Kind() == reflect.Ptr { + if dst.IsNil() { + dst.Set(reflect.New(dst.Type().Elem())) + } + dst = dst.Elem() + } + + // Save accumulated data. + switch dst.Kind() { + case reflect.Invalid: + // Probably a comment. + default: + return errors.New("cannot unmarshal into " + dst0.Type().String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + itmp, err := strconv.ParseInt(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetInt(itmp) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + utmp, err := strconv.ParseUint(string(src), 10, dst.Type().Bits()) + if err != nil { + return err + } + dst.SetUint(utmp) + case reflect.Float32, reflect.Float64: + ftmp, err := strconv.ParseFloat(string(src), dst.Type().Bits()) + if err != nil { + return err + } + dst.SetFloat(ftmp) + case reflect.Bool: + value, err := strconv.ParseBool(strings.TrimSpace(string(src))) + if err != nil { + return err + } + dst.SetBool(value) + case reflect.String: + dst.SetString(string(src)) + case reflect.Slice: + if len(src) == 0 { + // non-nil to flag presence + src = []byte{} + } + dst.SetBytes(src) + } + return nil +} + +// unmarshalPath walks down an XML structure looking for wanted +// paths, and calls unmarshal on them. +// The consumed result tells whether XML elements have been consumed +// from the Decoder until start's matching end element, or if it's +// still untouched because start is uninteresting for sv's fields. +func (p *Decoder) unmarshalPath(tinfo *typeInfo, sv reflect.Value, parents []string, start *StartElement) (consumed bool, err error) { + recurse := false +Loop: + for i := range tinfo.fields { + finfo := &tinfo.fields[i] + if finfo.flags&fElement == 0 || len(finfo.parents) < len(parents) || finfo.xmlns != "" && finfo.xmlns != start.Name.Space { + continue + } + for j := range parents { + if parents[j] != finfo.parents[j] { + continue Loop + } + } + if len(finfo.parents) == len(parents) && finfo.name == start.Name.Local { + // It's a perfect match, unmarshal the field. + return true, p.unmarshal(finfo.value(sv), start) + } + if len(finfo.parents) > len(parents) && finfo.parents[len(parents)] == start.Name.Local { + // It's a prefix for the field. Break and recurse + // since it's not ok for one field path to be itself + // the prefix for another field path. + recurse = true + + // We can reuse the same slice as long as we + // don't try to append to it. + parents = finfo.parents[:len(parents)+1] + break + } + } + if !recurse { + // We have no business with this element. + return false, nil + } + // The element is not a perfect match for any field, but one + // or more fields have the path to this element as a parent + // prefix. Recurse and attempt to match these. + for { + var tok Token + tok, err = p.Token() + if err != nil { + return true, err + } + switch t := tok.(type) { + case StartElement: + consumed2, err := p.unmarshalPath(tinfo, sv, parents, &t) + if err != nil { + return true, err + } + if !consumed2 { + if err := p.Skip(); err != nil { + return true, err + } + } + case EndElement: + return true, nil + } + } +} + +// Skip reads tokens until it has consumed the end element +// matching the most recent start element already consumed. +// It recurs if it encounters a start element, so it can be used to +// skip nested structures. +// It returns nil if it finds an end element matching the start +// element; otherwise it returns an error describing the problem. +func (d *Decoder) Skip() error { + for { + tok, err := d.Token() + if err != nil { + return err + } + switch tok.(type) { + case StartElement: + if err := d.Skip(); err != nil { + return err + } + case EndElement: + return nil + } + } +} diff --git a/drives/davServer/internal/xml/read_test.go b/drives/davServer/internal/xml/read_test.go new file mode 100644 index 00000000..02f1e10c --- /dev/null +++ b/drives/davServer/internal/xml/read_test.go @@ -0,0 +1,744 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "bytes" + "fmt" + "io" + "reflect" + "strings" + "testing" + "time" +) + +// Stripped down Atom feed data structures. + +func TestUnmarshalFeed(t *testing.T) { + var f Feed + if err := Unmarshal([]byte(atomFeedString), &f); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(f, atomFeed) { + t.Fatalf("have %#v\nwant %#v", f, atomFeed) + } +} + +// hget http://codereview.appspot.com/rss/mine/rsc +const atomFeedString = ` + +Code Review - My issueshttp://codereview.appspot.com/rietveld<>rietveld: an attempt at pubsubhubbub +2009-10-04T01:35:58+00:00email-address-removedurn:md5:134d9179c41f806be79b3a5f7877d19a + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a &lt;link rel=&quot;hub&quot; href=&quot;hub-server&quot;&gt; tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can&#39;t quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed&#39;s actual URL in +the link rel=&quot;self&quot;, but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +rietveld: correct tab handling +2009-10-03T23:02:17+00:00email-address-removedurn:md5:0a2a4f19bb815101f0ba2904aed7c35a + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn&#39;t know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + + ` + +type Feed struct { + XMLName Name `xml:"http://www.w3.org/2005/Atom feed"` + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated,attr"` + Author Person `xml:"author"` + Entry []Entry `xml:"entry"` +} + +type Entry struct { + Title string `xml:"title"` + Id string `xml:"id"` + Link []Link `xml:"link"` + Updated time.Time `xml:"updated"` + Author Person `xml:"author"` + Summary Text `xml:"summary"` +} + +type Link struct { + Rel string `xml:"rel,attr,omitempty"` + Href string `xml:"href,attr"` +} + +type Person struct { + Name string `xml:"name"` + URI string `xml:"uri"` + Email string `xml:"email"` + InnerXML string `xml:",innerxml"` +} + +type Text struct { + Type string `xml:"type,attr,omitempty"` + Body string `xml:",chardata"` +} + +var atomFeed = Feed{ + XMLName: Name{"http://www.w3.org/2005/Atom", "feed"}, + Title: "Code Review - My issues", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/"}, + {Rel: "self", Href: "http://codereview.appspot.com/rss/mine/rsc"}, + }, + Id: "http://codereview.appspot.com/", + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "rietveld<>", + InnerXML: "rietveld<>", + }, + Entry: []Entry{ + { + Title: "rietveld: an attempt at pubsubhubbub\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/126085"}, + }, + Updated: ParseTime("2009-10-04T01:35:58+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:134d9179c41f806be79b3a5f7877d19a", + Summary: Text{ + Type: "html", + Body: ` + An attempt at adding pubsubhubbub support to Rietveld. +http://code.google.com/p/pubsubhubbub +http://code.google.com/p/rietveld/issues/detail?id=155 + +The server side of the protocol is trivial: + 1. add a <link rel="hub" href="hub-server"> tag to all + feeds that will be pubsubhubbubbed. + 2. every time one of those feeds changes, tell the hub + with a simple POST request. + +I have tested this by adding debug prints to a local hub +server and checking that the server got the right publish +requests. + +I can't quite get the server to work, but I think the bug +is not in my code. I think that the server expects to be +able to grab the feed and see the feed's actual URL in +the link rel="self", but the default value for that drops +the :port from the URL, and I cannot for the life of me +figure out how to get the Atom generator deep inside +django not to do that, or even where it is doing that, +or even what code is running to generate the Atom feed. +(I thought I knew but I added some assert False statements +and it kept running!) + +Ignoring that particular problem, I would appreciate +feedback on the right way to get the two values at +the top of feeds.py marked NOTE(rsc). + + +`, + }, + }, + { + Title: "rietveld: correct tab handling\n", + Link: []Link{ + {Rel: "alternate", Href: "http://codereview.appspot.com/124106"}, + }, + Updated: ParseTime("2009-10-03T23:02:17+00:00"), + Author: Person{ + Name: "email-address-removed", + InnerXML: "email-address-removed", + }, + Id: "urn:md5:0a2a4f19bb815101f0ba2904aed7c35a", + Summary: Text{ + Type: "html", + Body: ` + This fixes the buggy tab rendering that can be seen at +http://codereview.appspot.com/116075/diff/1/2 + +The fundamental problem was that the tab code was +not being told what column the text began in, so it +didn't know where to put the tab stops. Another problem +was that some of the code assumed that string byte +offsets were the same as column offsets, which is only +true if there are no tabs. + +In the process of fixing this, I cleaned up the arguments +to Fold and ExpandTabs and renamed them Break and +_ExpandTabs so that I could be sure that I found all the +call sites. I also wanted to verify that ExpandTabs was +not being used from outside intra_region_diff.py. + + +`, + }, + }, + }, +} + +const pathTestString = ` + + 1 + + + A + + + B + + + C + D + + <_> + E + + + 2 + +` + +type PathTestItem struct { + Value string +} + +type PathTestA struct { + Items []PathTestItem `xml:">Item1"` + Before, After string +} + +type PathTestB struct { + Other []PathTestItem `xml:"Items>Item1"` + Before, After string +} + +type PathTestC struct { + Values1 []string `xml:"Items>Item1>Value"` + Values2 []string `xml:"Items>Item2>Value"` + Before, After string +} + +type PathTestSet struct { + Item1 []PathTestItem +} + +type PathTestD struct { + Other PathTestSet `xml:"Items"` + Before, After string +} + +type PathTestE struct { + Underline string `xml:"Items>_>Value"` + Before, After string +} + +var pathTests = []interface{}{ + &PathTestA{Items: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestB{Other: []PathTestItem{{"A"}, {"D"}}, Before: "1", After: "2"}, + &PathTestC{Values1: []string{"A", "C", "D"}, Values2: []string{"B"}, Before: "1", After: "2"}, + &PathTestD{Other: PathTestSet{Item1: []PathTestItem{{"A"}, {"D"}}}, Before: "1", After: "2"}, + &PathTestE{Underline: "E", Before: "1", After: "2"}, +} + +func TestUnmarshalPaths(t *testing.T) { + for _, pt := range pathTests { + v := reflect.New(reflect.TypeOf(pt).Elem()).Interface() + if err := Unmarshal([]byte(pathTestString), v); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if !reflect.DeepEqual(v, pt) { + t.Fatalf("have %#v\nwant %#v", v, pt) + } + } +} + +type BadPathTestA struct { + First string `xml:"items>item1"` + Other string `xml:"items>item2"` + Second string `xml:"items"` +} + +type BadPathTestB struct { + Other string `xml:"items>item2>value"` + First string `xml:"items>item1"` + Second string `xml:"items>item1>value"` +} + +type BadPathTestC struct { + First string + Second string `xml:"First"` +} + +type BadPathTestD struct { + BadPathEmbeddedA + BadPathEmbeddedB +} + +type BadPathEmbeddedA struct { + First string +} + +type BadPathEmbeddedB struct { + Second string `xml:"First"` +} + +var badPathTests = []struct { + v, e interface{} +}{ + {&BadPathTestA{}, &TagPathError{reflect.TypeOf(BadPathTestA{}), "First", "items>item1", "Second", "items"}}, + {&BadPathTestB{}, &TagPathError{reflect.TypeOf(BadPathTestB{}), "First", "items>item1", "Second", "items>item1>value"}}, + {&BadPathTestC{}, &TagPathError{reflect.TypeOf(BadPathTestC{}), "First", "", "Second", "First"}}, + {&BadPathTestD{}, &TagPathError{reflect.TypeOf(BadPathTestD{}), "First", "", "Second", "First"}}, +} + +func TestUnmarshalBadPaths(t *testing.T) { + for _, tt := range badPathTests { + err := Unmarshal([]byte(pathTestString), tt.v) + if !reflect.DeepEqual(err, tt.e) { + t.Fatalf("Unmarshal with %#v didn't fail properly:\nhave %#v,\nwant %#v", tt.v, err, tt.e) + } + } +} + +const OK = "OK" +const withoutNameTypeData = ` + +` + +type TestThree struct { + XMLName Name `xml:"Test3"` + Attr string `xml:",attr"` +} + +func TestUnmarshalWithoutNameType(t *testing.T) { + var x TestThree + if err := Unmarshal([]byte(withoutNameTypeData), &x); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if x.Attr != OK { + t.Fatalf("have %v\nwant %v", x.Attr, OK) + } +} + +func TestUnmarshalAttr(t *testing.T) { + type ParamVal struct { + Int int `xml:"int,attr"` + } + + type ParamPtr struct { + Int *int `xml:"int,attr"` + } + + type ParamStringPtr struct { + Int *string `xml:"int,attr"` + } + + x := []byte(``) + + p1 := &ParamPtr{} + if err := Unmarshal(x, p1); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p1.Int == nil { + t.Fatalf("Unmarshal failed in to *int field") + } else if *p1.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p1.Int, 1) + } + + p2 := &ParamVal{} + if err := Unmarshal(x, p2); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p2.Int != 1 { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p2.Int, 1) + } + + p3 := &ParamStringPtr{} + if err := Unmarshal(x, p3); err != nil { + t.Fatalf("Unmarshal: %s", err) + } + if p3.Int == nil { + t.Fatalf("Unmarshal failed in to *string field") + } else if *p3.Int != "1" { + t.Fatalf("Unmarshal with %s failed:\nhave %#v,\n want %#v", x, p3.Int, 1) + } +} + +type Tables struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table"` + FTable string `xml:"http://www.w3schools.com/furniture table"` +} + +var tables = []struct { + xml string + tab Tables + ns string +}{ + { + xml: `` + + `hello
` + + `world
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world
` + + `hello
` + + `
`, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `world` + + `hello` + + ``, + tab: Tables{"hello", "world"}, + }, + { + xml: `` + + `bogus
` + + `
`, + tab: Tables{}, + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{HTable: "only"}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{FTable: "only"}, + ns: "http://www.w3schools.com/furniture", + }, + { + xml: `` + + `only
` + + `
`, + tab: Tables{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNS(t *testing.T) { + for i, tt := range tables { + var dst Tables + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestRoundTrip(t *testing.T) { + // From issue 7535 + const s = `` + in := bytes.NewBufferString(s) + for i := 0; i < 10; i++ { + out := &bytes.Buffer{} + d := NewDecoder(in) + e := NewEncoder(out) + + for { + t, err := d.Token() + if err == io.EOF { + break + } + if err != nil { + fmt.Println("failed:", err) + return + } + e.EncodeToken(t) + } + e.Flush() + in = out + } + if got := in.String(); got != s { + t.Errorf("have: %q\nwant: %q\n", got, s) + } +} + +func TestMarshalNS(t *testing.T) { + dst := Tables{"hello", "world"} + data, err := Marshal(&dst) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `hello
world
` + str := string(data) + if str != want { + t.Errorf("have: %q\nwant: %q\n", str, want) + } +} + +type TableAttrs struct { + TAttr TAttr +} + +type TAttr struct { + HTable string `xml:"http://www.w3.org/TR/html4/ table,attr"` + FTable string `xml:"http://www.w3schools.com/furniture table,attr"` + Lang string `xml:"http://www.w3.org/XML/1998/namespace lang,attr,omitempty"` + Other1 string `xml:"http://golang.org/xml/ other,attr,omitempty"` + Other2 string `xml:"http://golang.org/xmlfoo/ other,attr,omitempty"` + Other3 string `xml:"http://golang.org/json/ other,attr,omitempty"` + Other4 string `xml:"http://golang.org/2/json/ other,attr,omitempty"` +} + +var tableAttrs = []struct { + xml string + tab TableAttrs + ns string +}{ + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: "world"}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + }, + { + xml: ``, + tab: TableAttrs{}, + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "hello", FTable: ""}}, + ns: "http://www.w3schools.com/furniture", + }, + { + // Default space does not apply to attribute names. + xml: ``, + tab: TableAttrs{TAttr{HTable: "", FTable: "world"}}, + ns: "http://www.w3.org/TR/html4/", + }, + { + xml: ``, + tab: TableAttrs{}, + ns: "something else entirely", + }, +} + +func TestUnmarshalNSAttr(t *testing.T) { + for i, tt := range tableAttrs { + var dst TableAttrs + var err error + if tt.ns != "" { + d := NewDecoder(strings.NewReader(tt.xml)) + d.DefaultSpace = tt.ns + err = d.Decode(&dst) + } else { + err = Unmarshal([]byte(tt.xml), &dst) + } + if err != nil { + t.Errorf("#%d: Unmarshal: %v", i, err) + continue + } + want := tt.tab + if dst != want { + t.Errorf("#%d: dst=%+v, want %+v", i, dst, want) + } + } +} + +func TestMarshalNSAttr(t *testing.T) { + src := TableAttrs{TAttr{"hello", "world", "en_US", "other1", "other2", "other3", "other4"}} + data, err := Marshal(&src) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + want := `` + str := string(data) + if str != want { + t.Errorf("Marshal:\nhave: %#q\nwant: %#q\n", str, want) + } + + var dst TableAttrs + if err := Unmarshal(data, &dst); err != nil { + t.Errorf("Unmarshal: %v", err) + } + + if dst != src { + t.Errorf("Unmarshal = %q, want %q", dst, src) + } +} + +type MyCharData struct { + body string +} + +func (m *MyCharData) UnmarshalXML(d *Decoder, start StartElement) error { + for { + t, err := d.Token() + if err == io.EOF { // found end of element + break + } + if err != nil { + return err + } + if char, ok := t.(CharData); ok { + m.body += string(char) + } + } + return nil +} + +var _ Unmarshaler = (*MyCharData)(nil) + +func (m *MyCharData) UnmarshalXMLAttr(attr Attr) error { + panic("must not call") +} + +type MyAttr struct { + attr string +} + +func (m *MyAttr) UnmarshalXMLAttr(attr Attr) error { + m.attr = attr.Value + return nil +} + +var _ UnmarshalerAttr = (*MyAttr)(nil) + +type MyStruct struct { + Data *MyCharData + Attr *MyAttr `xml:",attr"` + + Data2 MyCharData + Attr2 MyAttr `xml:",attr"` +} + +func TestUnmarshaler(t *testing.T) { + xml := ` + + hello world + howdy world + + ` + + var m MyStruct + if err := Unmarshal([]byte(xml), &m); err != nil { + t.Fatal(err) + } + + if m.Data == nil || m.Attr == nil || m.Data.body != "hello world" || m.Attr.attr != "attr1" || m.Data2.body != "howdy world" || m.Attr2.attr != "attr2" { + t.Errorf("m=%#+v\n", m) + } +} + +type Pea struct { + Cotelydon string +} + +type Pod struct { + Pea interface{} `xml:"Pea"` +} + +// https://golang.org/issue/6836 +func TestUnmarshalIntoInterface(t *testing.T) { + pod := new(Pod) + pod.Pea = new(Pea) + xml := `Green stuff` + err := Unmarshal([]byte(xml), pod) + if err != nil { + t.Fatalf("failed to unmarshal %q: %v", xml, err) + } + pea, ok := pod.Pea.(*Pea) + if !ok { + t.Fatalf("unmarshalled into wrong type: have %T want *Pea", pod.Pea) + } + have, want := pea.Cotelydon, "Green stuff" + if have != want { + t.Errorf("failed to unmarshal into interface, have %q want %q", have, want) + } +} diff --git a/drives/davServer/internal/xml/typeinfo.go b/drives/davServer/internal/xml/typeinfo.go new file mode 100644 index 00000000..fdde288b --- /dev/null +++ b/drives/davServer/internal/xml/typeinfo.go @@ -0,0 +1,371 @@ +// Copyright 2011 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package xml + +import ( + "fmt" + "reflect" + "strings" + "sync" +) + +// typeInfo holds details for the xml representation of a type. +type typeInfo struct { + xmlname *fieldInfo + fields []fieldInfo +} + +// fieldInfo holds details for the xml representation of a single field. +type fieldInfo struct { + idx []int + name string + xmlns string + flags fieldFlags + parents []string +} + +type fieldFlags int + +const ( + fElement fieldFlags = 1 << iota + fAttr + fCharData + fInnerXml + fComment + fAny + + fOmitEmpty + + fMode = fElement | fAttr | fCharData | fInnerXml | fComment | fAny +) + +var tinfoMap = make(map[reflect.Type]*typeInfo) +var tinfoLock sync.RWMutex + +var nameType = reflect.TypeOf(Name{}) + +// getTypeInfo returns the typeInfo structure with details necessary +// for marshalling and unmarshalling typ. +func getTypeInfo(typ reflect.Type) (*typeInfo, error) { + tinfoLock.RLock() + tinfo, ok := tinfoMap[typ] + tinfoLock.RUnlock() + if ok { + return tinfo, nil + } + tinfo = &typeInfo{} + if typ.Kind() == reflect.Struct && typ != nameType { + n := typ.NumField() + for i := 0; i < n; i++ { + f := typ.Field(i) + if f.PkgPath != "" || f.Tag.Get("xml") == "-" { + continue // Private field + } + + // For embedded structs, embed its fields. + if f.Anonymous { + t := f.Type + if t.Kind() == reflect.Ptr { + t = t.Elem() + } + if t.Kind() == reflect.Struct { + inner, err := getTypeInfo(t) + if err != nil { + return nil, err + } + if tinfo.xmlname == nil { + tinfo.xmlname = inner.xmlname + } + for _, finfo := range inner.fields { + finfo.idx = append([]int{i}, finfo.idx...) + if err := addFieldInfo(typ, tinfo, &finfo); err != nil { + return nil, err + } + } + continue + } + } + + finfo, err := structFieldInfo(typ, &f) + if err != nil { + return nil, err + } + + if f.Name == "XMLName" { + tinfo.xmlname = finfo + continue + } + + // Add the field if it doesn't conflict with other fields. + if err := addFieldInfo(typ, tinfo, finfo); err != nil { + return nil, err + } + } + } + tinfoLock.Lock() + tinfoMap[typ] = tinfo + tinfoLock.Unlock() + return tinfo, nil +} + +// structFieldInfo builds and returns a fieldInfo for f. +func structFieldInfo(typ reflect.Type, f *reflect.StructField) (*fieldInfo, error) { + finfo := &fieldInfo{idx: f.Index} + + // Split the tag from the xml namespace if necessary. + tag := f.Tag.Get("xml") + if i := strings.Index(tag, " "); i >= 0 { + finfo.xmlns, tag = tag[:i], tag[i+1:] + } + + // Parse flags. + tokens := strings.Split(tag, ",") + if len(tokens) == 1 { + finfo.flags = fElement + } else { + tag = tokens[0] + for _, flag := range tokens[1:] { + switch flag { + case "attr": + finfo.flags |= fAttr + case "chardata": + finfo.flags |= fCharData + case "innerxml": + finfo.flags |= fInnerXml + case "comment": + finfo.flags |= fComment + case "any": + finfo.flags |= fAny + case "omitempty": + finfo.flags |= fOmitEmpty + } + } + + // Validate the flags used. + valid := true + switch mode := finfo.flags & fMode; mode { + case 0: + finfo.flags |= fElement + case fAttr, fCharData, fInnerXml, fComment, fAny: + if f.Name == "XMLName" || tag != "" && mode != fAttr { + valid = false + } + default: + // This will also catch multiple modes in a single field. + valid = false + } + if finfo.flags&fMode == fAny { + finfo.flags |= fElement + } + if finfo.flags&fOmitEmpty != 0 && finfo.flags&(fElement|fAttr) == 0 { + valid = false + } + if !valid { + return nil, fmt.Errorf("xml: invalid tag in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + } + + // Use of xmlns without a name is not allowed. + if finfo.xmlns != "" && tag == "" { + return nil, fmt.Errorf("xml: namespace without name in field %s of type %s: %q", + f.Name, typ, f.Tag.Get("xml")) + } + + if f.Name == "XMLName" { + // The XMLName field records the XML element name. Don't + // process it as usual because its name should default to + // empty rather than to the field name. + finfo.name = tag + return finfo, nil + } + + if tag == "" { + // If the name part of the tag is completely empty, get + // default from XMLName of underlying struct if feasible, + // or field name otherwise. + if xmlname := lookupXMLName(f.Type); xmlname != nil { + finfo.xmlns, finfo.name = xmlname.xmlns, xmlname.name + } else { + finfo.name = f.Name + } + return finfo, nil + } + + if finfo.xmlns == "" && finfo.flags&fAttr == 0 { + // If it's an element no namespace specified, get the default + // from the XMLName of enclosing struct if possible. + if xmlname := lookupXMLName(typ); xmlname != nil { + finfo.xmlns = xmlname.xmlns + } + } + + // Prepare field name and parents. + parents := strings.Split(tag, ">") + if parents[0] == "" { + parents[0] = f.Name + } + if parents[len(parents)-1] == "" { + return nil, fmt.Errorf("xml: trailing '>' in field %s of type %s", f.Name, typ) + } + finfo.name = parents[len(parents)-1] + if len(parents) > 1 { + if (finfo.flags & fElement) == 0 { + return nil, fmt.Errorf("xml: %s chain not valid with %s flag", tag, strings.Join(tokens[1:], ",")) + } + finfo.parents = parents[:len(parents)-1] + } + + // If the field type has an XMLName field, the names must match + // so that the behavior of both marshalling and unmarshalling + // is straightforward and unambiguous. + if finfo.flags&fElement != 0 { + ftyp := f.Type + xmlname := lookupXMLName(ftyp) + if xmlname != nil && xmlname.name != finfo.name { + return nil, fmt.Errorf("xml: name %q in tag of %s.%s conflicts with name %q in %s.XMLName", + finfo.name, typ, f.Name, xmlname.name, ftyp) + } + } + return finfo, nil +} + +// lookupXMLName returns the fieldInfo for typ's XMLName field +// in case it exists and has a valid xml field tag, otherwise +// it returns nil. +func lookupXMLName(typ reflect.Type) (xmlname *fieldInfo) { + for typ.Kind() == reflect.Ptr { + typ = typ.Elem() + } + if typ.Kind() != reflect.Struct { + return nil + } + for i, n := 0, typ.NumField(); i < n; i++ { + f := typ.Field(i) + if f.Name != "XMLName" { + continue + } + finfo, err := structFieldInfo(typ, &f) + if finfo.name != "" && err == nil { + return finfo + } + // Also consider errors as a non-existent field tag + // and let getTypeInfo itself report the error. + break + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +// addFieldInfo adds finfo to tinfo.fields if there are no +// conflicts, or if conflicts arise from previous fields that were +// obtained from deeper embedded structures than finfo. In the latter +// case, the conflicting entries are dropped. +// A conflict occurs when the path (parent + name) to a field is +// itself a prefix of another path, or when two paths match exactly. +// It is okay for field paths to share a common, shorter prefix. +func addFieldInfo(typ reflect.Type, tinfo *typeInfo, newf *fieldInfo) error { + var conflicts []int +Loop: + // First, figure all conflicts. Most working code will have none. + for i := range tinfo.fields { + oldf := &tinfo.fields[i] + if oldf.flags&fMode != newf.flags&fMode { + continue + } + if oldf.xmlns != "" && newf.xmlns != "" && oldf.xmlns != newf.xmlns { + continue + } + minl := min(len(newf.parents), len(oldf.parents)) + for p := 0; p < minl; p++ { + if oldf.parents[p] != newf.parents[p] { + continue Loop + } + } + if len(oldf.parents) > len(newf.parents) { + if oldf.parents[len(newf.parents)] == newf.name { + conflicts = append(conflicts, i) + } + } else if len(oldf.parents) < len(newf.parents) { + if newf.parents[len(oldf.parents)] == oldf.name { + conflicts = append(conflicts, i) + } + } else { + if newf.name == oldf.name { + conflicts = append(conflicts, i) + } + } + } + // Without conflicts, add the new field and return. + if conflicts == nil { + tinfo.fields = append(tinfo.fields, *newf) + return nil + } + + // If any conflict is shallower, ignore the new field. + // This matches the Go field resolution on embedding. + for _, i := range conflicts { + if len(tinfo.fields[i].idx) < len(newf.idx) { + return nil + } + } + + // Otherwise, if any of them is at the same depth level, it's an error. + for _, i := range conflicts { + oldf := &tinfo.fields[i] + if len(oldf.idx) == len(newf.idx) { + f1 := typ.FieldByIndex(oldf.idx) + f2 := typ.FieldByIndex(newf.idx) + return &TagPathError{typ, f1.Name, f1.Tag.Get("xml"), f2.Name, f2.Tag.Get("xml")} + } + } + + // Otherwise, the new field is shallower, and thus takes precedence, + // so drop the conflicting fields from tinfo and append the new one. + for c := len(conflicts) - 1; c >= 0; c-- { + i := conflicts[c] + copy(tinfo.fields[i:], tinfo.fields[i+1:]) + tinfo.fields = tinfo.fields[:len(tinfo.fields)-1] + } + tinfo.fields = append(tinfo.fields, *newf) + return nil +} + +// A TagPathError represents an error in the unmarshalling process +// caused by the use of field tags with conflicting paths. +type TagPathError struct { + Struct reflect.Type + Field1, Tag1 string + Field2, Tag2 string +} + +func (e *TagPathError) Error() string { + return fmt.Sprintf("%s field %q with tag %q conflicts with field %q with tag %q", e.Struct, e.Field1, e.Tag1, e.Field2, e.Tag2) +} + +// value returns v's field value corresponding to finfo. +// It's equivalent to v.FieldByIndex(finfo.idx), but initializes +// and dereferences pointers as necessary. +func (finfo *fieldInfo) value(v reflect.Value) reflect.Value { + for i, x := range finfo.idx { + if i > 0 { + t := v.Type() + if t.Kind() == reflect.Ptr && t.Elem().Kind() == reflect.Struct { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + } + } + v = v.Field(x) + } + return v +} diff --git a/drives/davServer/internal/xml/xml.go b/drives/davServer/internal/xml/xml.go new file mode 100644 index 00000000..7d88dac7 --- /dev/null +++ b/drives/davServer/internal/xml/xml.go @@ -0,0 +1,1998 @@ +// Copyright 2009 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package xml implements a simple XML 1.0 parser that +// understands XML name spaces. +package xml + +// References: +// Annotated XML spec: http://www.xml.com/axml/testaxml.htm +// XML name spaces: http://www.w3.org/TR/REC-xml-names/ + +// TODO(rsc): +// Test error handling. + +import ( + "bufio" + "bytes" + "errors" + "fmt" + "io" + "strconv" + "strings" + "unicode" + "unicode/utf8" +) + +// A SyntaxError represents a syntax error in the XML input stream. +type SyntaxError struct { + Msg string + Line int +} + +func (e *SyntaxError) Error() string { + return "XML syntax error on line " + strconv.Itoa(e.Line) + ": " + e.Msg +} + +// A Name represents an XML name (Local) annotated with a name space +// identifier (Space). In tokens returned by Decoder.Token, the Space +// identifier is given as a canonical URL, not the short prefix used in +// the document being parsed. +// +// As a special case, XML namespace declarations will use the literal +// string "xmlns" for the Space field instead of the fully resolved URL. +// See Encoder.EncodeToken for more information on namespace encoding +// behaviour. +type Name struct { + Space, Local string +} + +// isNamespace reports whether the name is a namespace-defining name. +func (name Name) isNamespace() bool { + return name.Local == "xmlns" || name.Space == "xmlns" +} + +// An Attr represents an attribute in an XML element (Name=Value). +type Attr struct { + Name Name + Value string +} + +// A Token is an interface holding one of the token types: +// StartElement, EndElement, CharData, Comment, ProcInst, or Directive. +type Token interface{} + +// A StartElement represents an XML start element. +type StartElement struct { + Name Name + Attr []Attr +} + +func (e StartElement) Copy() StartElement { + attrs := make([]Attr, len(e.Attr)) + copy(attrs, e.Attr) + e.Attr = attrs + return e +} + +// End returns the corresponding XML end element. +func (e StartElement) End() EndElement { + return EndElement{e.Name} +} + +// setDefaultNamespace sets the namespace of the element +// as the default for all elements contained within it. +func (e *StartElement) setDefaultNamespace() { + if e.Name.Space == "" { + // If there's no namespace on the element, don't + // set the default. Strictly speaking this might be wrong, as + // we can't tell if the element had no namespace set + // or was just using the default namespace. + return + } + // Don't add a default name space if there's already one set. + for _, attr := range e.Attr { + if attr.Name.Space == "" && attr.Name.Local == "xmlns" { + return + } + } + e.Attr = append(e.Attr, Attr{ + Name: Name{ + Local: "xmlns", + }, + Value: e.Name.Space, + }) +} + +// An EndElement represents an XML end element. +type EndElement struct { + Name Name +} + +// A CharData represents XML character data (raw text), +// in which XML escape sequences have been replaced by +// the characters they represent. +type CharData []byte + +func makeCopy(b []byte) []byte { + b1 := make([]byte, len(b)) + copy(b1, b) + return b1 +} + +func (c CharData) Copy() CharData { return CharData(makeCopy(c)) } + +// A Comment represents an XML comment of the form . +// The bytes do not include the comment markers. +type Comment []byte + +func (c Comment) Copy() Comment { return Comment(makeCopy(c)) } + +// A ProcInst represents an XML processing instruction of the form +type ProcInst struct { + Target string + Inst []byte +} + +func (p ProcInst) Copy() ProcInst { + p.Inst = makeCopy(p.Inst) + return p +} + +// A Directive represents an XML directive of the form . +// The bytes do not include the markers. +type Directive []byte + +func (d Directive) Copy() Directive { return Directive(makeCopy(d)) } + +// CopyToken returns a copy of a Token. +func CopyToken(t Token) Token { + switch v := t.(type) { + case CharData: + return v.Copy() + case Comment: + return v.Copy() + case Directive: + return v.Copy() + case ProcInst: + return v.Copy() + case StartElement: + return v.Copy() + } + return t +} + +// A Decoder represents an XML parser reading a particular input stream. +// The parser assumes that its input is encoded in UTF-8. +type Decoder struct { + // Strict defaults to true, enforcing the requirements + // of the XML specification. + // If set to false, the parser allows input containing common + // mistakes: + // * If an element is missing an end tag, the parser invents + // end tags as necessary to keep the return values from Token + // properly balanced. + // * In attribute values and character data, unknown or malformed + // character entities (sequences beginning with &) are left alone. + // + // Setting: + // + // d.Strict = false; + // d.AutoClose = HTMLAutoClose; + // d.Entity = HTMLEntity + // + // creates a parser that can handle typical HTML. + // + // Strict mode does not enforce the requirements of the XML name spaces TR. + // In particular it does not reject name space tags using undefined prefixes. + // Such tags are recorded with the unknown prefix as the name space URL. + Strict bool + + // When Strict == false, AutoClose indicates a set of elements to + // consider closed immediately after they are opened, regardless + // of whether an end element is present. + AutoClose []string + + // Entity can be used to map non-standard entity names to string replacements. + // The parser behaves as if these standard mappings are present in the map, + // regardless of the actual map content: + // + // "lt": "<", + // "gt": ">", + // "amp": "&", + // "apos": "'", + // "quot": `"`, + Entity map[string]string + + // CharsetReader, if non-nil, defines a function to generate + // charset-conversion readers, converting from the provided + // non-UTF-8 charset into UTF-8. If CharsetReader is nil or + // returns an error, parsing stops with an error. One of the + // the CharsetReader's result values must be non-nil. + CharsetReader func(charset string, input io.Reader) (io.Reader, error) + + // DefaultSpace sets the default name space used for unadorned tags, + // as if the entire XML stream were wrapped in an element containing + // the attribute xmlns="DefaultSpace". + DefaultSpace string + + r io.ByteReader + buf bytes.Buffer + saved *bytes.Buffer + stk *stack + free *stack + needClose bool + toClose Name + nextToken Token + nextByte int + ns map[string]string + err error + line int + offset int64 + unmarshalDepth int +} + +// NewDecoder creates a new XML parser reading from r. +// If r does not implement io.ByteReader, NewDecoder will +// do its own buffering. +func NewDecoder(r io.Reader) *Decoder { + d := &Decoder{ + ns: make(map[string]string), + nextByte: -1, + line: 1, + Strict: true, + } + d.switchToReader(r) + return d +} + +// Token returns the next XML token in the input stream. +// At the end of the input stream, Token returns nil, io.EOF. +// +// Slices of bytes in the returned token data refer to the +// parser's internal buffer and remain valid only until the next +// call to Token. To acquire a copy of the bytes, call CopyToken +// or the token's Copy method. +// +// Token expands self-closing elements such as
+// into separate start and end elements returned by successive calls. +// +// Token guarantees that the StartElement and EndElement +// tokens it returns are properly nested and matched: +// if Token encounters an unexpected end element, +// it will return an error. +// +// Token implements XML name spaces as described by +// http://www.w3.org/TR/REC-xml-names/. Each of the +// Name structures contained in the Token has the Space +// set to the URL identifying its name space when known. +// If Token encounters an unrecognized name space prefix, +// it uses the prefix as the Space rather than report an error. +func (d *Decoder) Token() (t Token, err error) { + if d.stk != nil && d.stk.kind == stkEOF { + err = io.EOF + return + } + if d.nextToken != nil { + t = d.nextToken + d.nextToken = nil + } else if t, err = d.rawToken(); err != nil { + return + } + + if !d.Strict { + if t1, ok := d.autoClose(t); ok { + d.nextToken = t + t = t1 + } + } + switch t1 := t.(type) { + case StartElement: + // In XML name spaces, the translations listed in the + // attributes apply to the element name and + // to the other attribute names, so process + // the translations first. + for _, a := range t1.Attr { + if a.Name.Space == "xmlns" { + v, ok := d.ns[a.Name.Local] + d.pushNs(a.Name.Local, v, ok) + d.ns[a.Name.Local] = a.Value + } + if a.Name.Space == "" && a.Name.Local == "xmlns" { + // Default space for untagged names + v, ok := d.ns[""] + d.pushNs("", v, ok) + d.ns[""] = a.Value + } + } + + d.translate(&t1.Name, true) + for i := range t1.Attr { + d.translate(&t1.Attr[i].Name, false) + } + d.pushElement(t1.Name) + t = t1 + + case EndElement: + d.translate(&t1.Name, true) + if !d.popElement(&t1) { + return nil, d.err + } + t = t1 + } + return +} + +const xmlURL = "http://www.w3.org/XML/1998/namespace" + +// Apply name space translation to name n. +// The default name space (for Space=="") +// applies only to element names, not to attribute names. +func (d *Decoder) translate(n *Name, isElementName bool) { + switch { + case n.Space == "xmlns": + return + case n.Space == "" && !isElementName: + return + case n.Space == "xml": + n.Space = xmlURL + case n.Space == "" && n.Local == "xmlns": + return + } + if v, ok := d.ns[n.Space]; ok { + n.Space = v + } else if n.Space == "" { + n.Space = d.DefaultSpace + } +} + +func (d *Decoder) switchToReader(r io.Reader) { + // Get efficient byte at a time reader. + // Assume that if reader has its own + // ReadByte, it's efficient enough. + // Otherwise, use bufio. + if rb, ok := r.(io.ByteReader); ok { + d.r = rb + } else { + d.r = bufio.NewReader(r) + } +} + +// Parsing state - stack holds old name space translations +// and the current set of open elements. The translations to pop when +// ending a given tag are *below* it on the stack, which is +// more work but forced on us by XML. +type stack struct { + next *stack + kind int + name Name + ok bool +} + +const ( + stkStart = iota + stkNs + stkEOF +) + +func (d *Decoder) push(kind int) *stack { + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.next = d.stk + s.kind = kind + d.stk = s + return s +} + +func (d *Decoder) pop() *stack { + s := d.stk + if s != nil { + d.stk = s.next + s.next = d.free + d.free = s + } + return s +} + +// Record that after the current element is finished +// (that element is already pushed on the stack) +// Token should return EOF until popEOF is called. +func (d *Decoder) pushEOF() { + // Walk down stack to find Start. + // It might not be the top, because there might be stkNs + // entries above it. + start := d.stk + for start.kind != stkStart { + start = start.next + } + // The stkNs entries below a start are associated with that + // element too; skip over them. + for start.next != nil && start.next.kind == stkNs { + start = start.next + } + s := d.free + if s != nil { + d.free = s.next + } else { + s = new(stack) + } + s.kind = stkEOF + s.next = start.next + start.next = s +} + +// Undo a pushEOF. +// The element must have been finished, so the EOF should be at the top of the stack. +func (d *Decoder) popEOF() bool { + if d.stk == nil || d.stk.kind != stkEOF { + return false + } + d.pop() + return true +} + +// Record that we are starting an element with the given name. +func (d *Decoder) pushElement(name Name) { + s := d.push(stkStart) + s.name = name +} + +// Record that we are changing the value of ns[local]. +// The old value is url, ok. +func (d *Decoder) pushNs(local string, url string, ok bool) { + s := d.push(stkNs) + s.name.Local = local + s.name.Space = url + s.ok = ok +} + +// Creates a SyntaxError with the current line number. +func (d *Decoder) syntaxError(msg string) error { + return &SyntaxError{Msg: msg, Line: d.line} +} + +// Record that we are ending an element with the given name. +// The name must match the record at the top of the stack, +// which must be a pushElement record. +// After popping the element, apply any undo records from +// the stack to restore the name translations that existed +// before we saw this element. +func (d *Decoder) popElement(t *EndElement) bool { + s := d.pop() + name := t.Name + switch { + case s == nil || s.kind != stkStart: + d.err = d.syntaxError("unexpected end element ") + return false + case s.name.Local != name.Local: + if !d.Strict { + d.needClose = true + d.toClose = t.Name + t.Name = s.name + return true + } + d.err = d.syntaxError("element <" + s.name.Local + "> closed by ") + return false + case s.name.Space != name.Space: + d.err = d.syntaxError("element <" + s.name.Local + "> in space " + s.name.Space + + "closed by in space " + name.Space) + return false + } + + // Pop stack until a Start or EOF is on the top, undoing the + // translations that were associated with the element we just closed. + for d.stk != nil && d.stk.kind != stkStart && d.stk.kind != stkEOF { + s := d.pop() + if s.ok { + d.ns[s.name.Local] = s.name.Space + } else { + delete(d.ns, s.name.Local) + } + } + + return true +} + +// If the top element on the stack is autoclosing and +// t is not the end tag, invent the end tag. +func (d *Decoder) autoClose(t Token) (Token, bool) { + if d.stk == nil || d.stk.kind != stkStart { + return nil, false + } + name := strings.ToLower(d.stk.name.Local) + for _, s := range d.AutoClose { + if strings.ToLower(s) == name { + // This one should be auto closed if t doesn't close it. + et, ok := t.(EndElement) + if !ok || et.Name.Local != name { + return EndElement{d.stk.name}, true + } + break + } + } + return nil, false +} + +var errRawToken = errors.New("xml: cannot use RawToken from UnmarshalXML method") + +// RawToken is like Token but does not verify that +// start and end elements match and does not translate +// name space prefixes to their corresponding URLs. +func (d *Decoder) RawToken() (Token, error) { + if d.unmarshalDepth > 0 { + return nil, errRawToken + } + return d.rawToken() +} + +func (d *Decoder) rawToken() (Token, error) { + if d.err != nil { + return nil, d.err + } + if d.needClose { + // The last element we read was self-closing and + // we returned just the StartElement half. + // Return the EndElement half now. + d.needClose = false + return EndElement{d.toClose}, nil + } + + b, ok := d.getc() + if !ok { + return nil, d.err + } + + if b != '<' { + // Text section. + d.ungetc(b) + data := d.text(-1, false) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + switch b { + case '/': + // ' { + d.err = d.syntaxError("invalid characters between ") + return nil, d.err + } + return EndElement{name}, nil + + case '?': + // ' { + break + } + b0 = b + } + data := d.buf.Bytes() + data = data[0 : len(data)-2] // chop ?> + + if target == "xml" { + content := string(data) + ver := procInst("version", content) + if ver != "" && ver != "1.0" { + d.err = fmt.Errorf("xml: unsupported version %q; only version 1.0 is supported", ver) + return nil, d.err + } + enc := procInst("encoding", content) + if enc != "" && enc != "utf-8" && enc != "UTF-8" { + if d.CharsetReader == nil { + d.err = fmt.Errorf("xml: encoding %q declared but Decoder.CharsetReader is nil", enc) + return nil, d.err + } + newr, err := d.CharsetReader(enc, d.r.(io.Reader)) + if err != nil { + d.err = fmt.Errorf("xml: opening charset %q: %v", enc, err) + return nil, d.err + } + if newr == nil { + panic("CharsetReader returned a nil Reader for charset " + enc) + } + d.switchToReader(newr) + } + } + return ProcInst{target, data}, nil + + case '!': + // ' { + break + } + b0, b1 = b1, b + } + data := d.buf.Bytes() + data = data[0 : len(data)-3] // chop --> + return Comment(data), nil + + case '[': // . + data := d.text(-1, true) + if data == nil { + return nil, d.err + } + return CharData(data), nil + } + + // Probably a directive: , , etc. + // We don't care, but accumulate for caller. Quoted angle + // brackets do not count for nesting. + d.buf.Reset() + d.buf.WriteByte(b) + inquote := uint8(0) + depth := 0 + for { + if b, ok = d.mustgetc(); !ok { + return nil, d.err + } + if inquote == 0 && b == '>' && depth == 0 { + break + } + HandleB: + d.buf.WriteByte(b) + switch { + case b == inquote: + inquote = 0 + + case inquote != 0: + // in quotes, no special action + + case b == '\'' || b == '"': + inquote = b + + case b == '>' && inquote == 0: + depth-- + + case b == '<' && inquote == 0: + // Look for ` + +var testEntity = map[string]string{"何": "What", "is-it": "is it?"} + +var rawTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"", "hello"}}, + CharData("\n "), + StartElement{Name{"", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"", "query"}}, + CharData("\n "), + StartElement{Name{"", "goodbye"}, []Attr{}}, + EndElement{Name{"", "goodbye"}}, + CharData("\n "), + StartElement{Name{"", "outer"}, []Attr{{Name{"foo", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"", "inner"}, []Attr{}}, + EndElement{Name{"", "inner"}}, + CharData("\n "), + EndElement{Name{"", "outer"}}, + CharData("\n "), + StartElement{Name{"tag", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"tag", "name"}}, + CharData("\n"), + EndElement{Name{"", "body"}}, + Comment(" missing final newline "), +} + +var cookedTokens = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="UTF-8"`)}, + CharData("\n"), + Directive(`DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" + "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd"`), + CharData("\n"), + StartElement{Name{"ns2", "body"}, []Attr{{Name{"xmlns", "foo"}, "ns1"}, {Name{"", "xmlns"}, "ns2"}, {Name{"xmlns", "tag"}, "ns3"}}}, + CharData("\n "), + StartElement{Name{"ns2", "hello"}, []Attr{{Name{"", "lang"}, "en"}}}, + CharData("World <>'\" 白鵬翔"), + EndElement{Name{"ns2", "hello"}}, + CharData("\n "), + StartElement{Name{"ns2", "query"}, []Attr{}}, + CharData("What is it?"), + EndElement{Name{"ns2", "query"}}, + CharData("\n "), + StartElement{Name{"ns2", "goodbye"}, []Attr{}}, + EndElement{Name{"ns2", "goodbye"}}, + CharData("\n "), + StartElement{Name{"ns2", "outer"}, []Attr{{Name{"ns1", "attr"}, "value"}, {Name{"xmlns", "tag"}, "ns4"}}}, + CharData("\n "), + StartElement{Name{"ns2", "inner"}, []Attr{}}, + EndElement{Name{"ns2", "inner"}}, + CharData("\n "), + EndElement{Name{"ns2", "outer"}}, + CharData("\n "), + StartElement{Name{"ns3", "name"}, []Attr{}}, + CharData("\n "), + CharData("Some text here."), + CharData("\n "), + EndElement{Name{"ns3", "name"}}, + CharData("\n"), + EndElement{Name{"ns2", "body"}}, + Comment(" missing final newline "), +} + +const testInputAltEncoding = ` + +VALUE` + +var rawTokensAltEncoding = []Token{ + CharData("\n"), + ProcInst{"xml", []byte(`version="1.0" encoding="x-testing-uppercase"`)}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("value"), + EndElement{Name{"", "tag"}}, +} + +var xmlInput = []string{ + // unexpected EOF cases + "<", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "", + "", + " c;", + "", + "", + "", + // "", // let the Token() caller handle + "", + "", + "cdata]]>", +} + +func TestRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + testRawToken(t, d, testInput, rawTokens) +} + +const nonStrictInput = ` +non&entity +&unknown;entity +{ +&#zzz; +&なまえ3; +<-gt; +&; +&0a; +` + +var nonStringEntity = map[string]string{"": "oops!", "0a": "oops!"} + +var nonStrictTokens = []Token{ + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("non&entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&unknown;entity"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("{"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&#zzz;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&なまえ3;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("<-gt;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), + StartElement{Name{"", "tag"}, []Attr{}}, + CharData("&0a;"), + EndElement{Name{"", "tag"}}, + CharData("\n"), +} + +func TestNonStrictRawToken(t *testing.T) { + d := NewDecoder(strings.NewReader(nonStrictInput)) + d.Strict = false + testRawToken(t, d, nonStrictInput, nonStrictTokens) +} + +type downCaser struct { + t *testing.T + r io.ByteReader +} + +func (d *downCaser) ReadByte() (c byte, err error) { + c, err = d.r.ReadByte() + if c >= 'A' && c <= 'Z' { + c += 'a' - 'A' + } + return +} + +func (d *downCaser) Read(p []byte) (int, error) { + d.t.Fatalf("unexpected Read call on downCaser reader") + panic("unreachable") +} + +func TestRawTokenAltEncoding(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + d.CharsetReader = func(charset string, input io.Reader) (io.Reader, error) { + if charset != "x-testing-uppercase" { + t.Fatalf("unexpected charset %q", charset) + } + return &downCaser{t, input.(io.ByteReader)}, nil + } + testRawToken(t, d, testInputAltEncoding, rawTokensAltEncoding) +} + +func TestRawTokenAltEncodingNoConverter(t *testing.T) { + d := NewDecoder(strings.NewReader(testInputAltEncoding)) + token, err := d.RawToken() + if token == nil { + t.Fatalf("expected a token on first RawToken call") + } + if err != nil { + t.Fatal(err) + } + token, err = d.RawToken() + if token != nil { + t.Errorf("expected a nil token; got %#v", token) + } + if err == nil { + t.Fatalf("expected an error on second RawToken call") + } + const encoding = "x-testing-uppercase" + if !strings.Contains(err.Error(), encoding) { + t.Errorf("expected error to contain %q; got error: %v", + encoding, err) + } +} + +func testRawToken(t *testing.T, d *Decoder, raw string, rawTokens []Token) { + lastEnd := int64(0) + for i, want := range rawTokens { + start := d.InputOffset() + have, err := d.RawToken() + end := d.InputOffset() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + var shave, swant string + if _, ok := have.(CharData); ok { + shave = fmt.Sprintf("CharData(%q)", have) + } else { + shave = fmt.Sprintf("%#v", have) + } + if _, ok := want.(CharData); ok { + swant = fmt.Sprintf("CharData(%q)", want) + } else { + swant = fmt.Sprintf("%#v", want) + } + t.Errorf("token %d = %s, want %s", i, shave, swant) + } + + // Check that InputOffset returned actual token. + switch { + case start < lastEnd: + t.Errorf("token %d: position [%d,%d) for %T is before previous token", i, start, end, have) + case start >= end: + // Special case: EndElement can be synthesized. + if start == end && end == lastEnd { + break + } + t.Errorf("token %d: position [%d,%d) for %T is empty", i, start, end, have) + case end > int64(len(raw)): + t.Errorf("token %d: position [%d,%d) for %T extends beyond input", i, start, end, have) + default: + text := raw[start:end] + if strings.ContainsAny(text, "<>") && (!strings.HasPrefix(text, "<") || !strings.HasSuffix(text, ">")) { + t.Errorf("token %d: misaligned raw token %#q for %T", i, text, have) + } + } + lastEnd = end + } +} + +// Ensure that directives (specifically !DOCTYPE) include the complete +// text of any nested directives, noting that < and > do not change +// nesting depth if they are in single or double quotes. + +var nestedDirectivesInput = ` +]> +">]> +]> +'>]> +]> +'>]> +]> +` + +var nestedDirectivesTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE [">]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE ['>]`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestNestedDirectives(t *testing.T) { + d := NewDecoder(strings.NewReader(nestedDirectivesInput)) + + for i, want := range nestedDirectivesTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestToken(t *testing.T) { + d := NewDecoder(strings.NewReader(testInput)) + d.Entity = testEntity + + for i, want := range cookedTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +func TestSyntax(t *testing.T) { + for i := range xmlInput { + d := NewDecoder(strings.NewReader(xmlInput[i])) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if _, ok := err.(*SyntaxError); !ok { + t.Fatalf(`xmlInput "%s": expected SyntaxError not received`, xmlInput[i]) + } + } +} + +type allScalars struct { + True1 bool + True2 bool + False1 bool + False2 bool + Int int + Int8 int8 + Int16 int16 + Int32 int32 + Int64 int64 + Uint int + Uint8 uint8 + Uint16 uint16 + Uint32 uint32 + Uint64 uint64 + Uintptr uintptr + Float32 float32 + Float64 float64 + String string + PtrString *string +} + +var all = allScalars{ + True1: true, + True2: true, + False1: false, + False2: false, + Int: 1, + Int8: -2, + Int16: 3, + Int32: -4, + Int64: 5, + Uint: 6, + Uint8: 7, + Uint16: 8, + Uint32: 9, + Uint64: 10, + Uintptr: 11, + Float32: 13.0, + Float64: 14.0, + String: "15", + PtrString: &sixteen, +} + +var sixteen = "16" + +const testScalarsInput = ` + true + 1 + false + 0 + 1 + -2 + 3 + -4 + 5 + 6 + 7 + 8 + 9 + 10 + 11 + 12.0 + 13.0 + 14.0 + 15 + 16 +` + +func TestAllScalars(t *testing.T) { + var a allScalars + err := Unmarshal([]byte(testScalarsInput), &a) + + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(a, all) { + t.Errorf("have %+v want %+v", a, all) + } +} + +type item struct { + Field_a string +} + +func TestIssue569(t *testing.T) { + data := `abcd` + var i item + err := Unmarshal([]byte(data), &i) + + if err != nil || i.Field_a != "abcd" { + t.Fatal("Expecting abcd") + } +} + +func TestUnquotedAttrs(t *testing.T) { + data := "" + d := NewDecoder(strings.NewReader(data)) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != "tag" { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != "azAZ09:-_" { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != "attr" { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } +} + +func TestValuelessAttrs(t *testing.T) { + tests := [][3]string{ + {"

", "p", "nowrap"}, + {"

", "p", "nowrap"}, + {"", "input", "checked"}, + {"", "input", "checked"}, + } + for _, test := range tests { + d := NewDecoder(strings.NewReader(test[0])) + d.Strict = false + token, err := d.Token() + if _, ok := err.(*SyntaxError); ok { + t.Errorf("Unexpected error: %v", err) + } + if token.(StartElement).Name.Local != test[1] { + t.Errorf("Unexpected tag name: %v", token.(StartElement).Name.Local) + } + attr := token.(StartElement).Attr[0] + if attr.Value != test[2] { + t.Errorf("Unexpected attribute value: %v", attr.Value) + } + if attr.Name.Local != test[2] { + t.Errorf("Unexpected attribute name: %v", attr.Name.Local) + } + } +} + +func TestCopyTokenCharData(t *testing.T) { + data := []byte("same data") + var tok1 Token = CharData(data) + tok2 := CopyToken(tok1) + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) != CharData") + } + data[1] = 'o' + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestCopyTokenStartElement(t *testing.T) { + elt := StartElement{Name{"", "hello"}, []Attr{{Name{"", "lang"}, "en"}}} + var tok1 Token = elt + tok2 := CopyToken(tok1) + if tok1.(StartElement).Attr[0].Value != "en" { + t.Error("CopyToken overwrote Attr[0]") + } + if !reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(StartElement) != StartElement") + } + tok1.(StartElement).Attr[0] = Attr{Name{"", "lang"}, "de"} + if reflect.DeepEqual(tok1, tok2) { + t.Error("CopyToken(CharData) uses same buffer.") + } +} + +func TestSyntaxErrorLineNum(t *testing.T) { + testInput := "

Foo

\n\n

Bar\n" + d := NewDecoder(strings.NewReader(testInput)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Error("Expected SyntaxError.") + } + if synerr.Line != 3 { + t.Error("SyntaxError didn't have correct line number.") + } +} + +func TestTrailingRawToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.RawToken(); err == nil; _, err = d.RawToken() { + } + if err != io.EOF { + t.Fatalf("d.RawToken() = _, %v, want _, io.EOF", err) + } +} + +func TestTrailingToken(t *testing.T) { + input := ` ` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +func TestEntityInsideCDATA(t *testing.T) { + input := `` + d := NewDecoder(strings.NewReader(input)) + var err error + for _, err = d.Token(); err == nil; _, err = d.Token() { + } + if err != io.EOF { + t.Fatalf("d.Token() = _, %v, want _, io.EOF", err) + } +} + +var characterTests = []struct { + in string + err string +}{ + {"\x12", "illegal character code U+0012"}, + {"\x0b", "illegal character code U+000B"}, + {"\xef\xbf\xbe", "illegal character code U+FFFE"}, + {"\r\n\x07", "illegal character code U+0007"}, + {"what's up", "expected attribute name in element"}, + {"&abc\x01;", "invalid character entity &abc (no semicolon)"}, + {"&\x01;", "invalid character entity & (no semicolon)"}, + {"&\xef\xbf\xbe;", "invalid character entity &\uFFFE;"}, + {"&hello;", "invalid character entity &hello;"}, +} + +func TestDisallowedCharacters(t *testing.T) { + + for i, tt := range characterTests { + d := NewDecoder(strings.NewReader(tt.in)) + var err error + + for err == nil { + _, err = d.Token() + } + synerr, ok := err.(*SyntaxError) + if !ok { + t.Fatalf("input %d d.Token() = _, %v, want _, *SyntaxError", i, err) + } + if synerr.Msg != tt.err { + t.Fatalf("input %d synerr.Msg wrong: want %q, got %q", i, tt.err, synerr.Msg) + } + } +} + +type procInstEncodingTest struct { + expect, got string +} + +var procInstTests = []struct { + input string + expect [2]string +}{ + {`version="1.0" encoding="utf-8"`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8'`, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding='utf-8' `, [2]string{"1.0", "utf-8"}}, + {`version="1.0" encoding=utf-8`, [2]string{"1.0", ""}}, + {`encoding="FOO" `, [2]string{"", "FOO"}}, +} + +func TestProcInstEncoding(t *testing.T) { + for _, test := range procInstTests { + if got := procInst("version", test.input); got != test.expect[0] { + t.Errorf("procInst(version, %q) = %q; want %q", test.input, got, test.expect[0]) + } + if got := procInst("encoding", test.input); got != test.expect[1] { + t.Errorf("procInst(encoding, %q) = %q; want %q", test.input, got, test.expect[1]) + } + } +} + +// Ensure that directives with comments include the complete +// text of any nested directives. + +var directivesWithCommentsInput = ` +]> +]> + --> --> []> +` + +var directivesWithCommentsTokens = []Token{ + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), + Directive(`DOCTYPE []`), + CharData("\n"), +} + +func TestDirectivesWithComments(t *testing.T) { + d := NewDecoder(strings.NewReader(directivesWithCommentsInput)) + + for i, want := range directivesWithCommentsTokens { + have, err := d.Token() + if err != nil { + t.Fatalf("token %d: unexpected error: %s", i, err) + } + if !reflect.DeepEqual(have, want) { + t.Errorf("token %d = %#v want %#v", i, have, want) + } + } +} + +// Writer whose Write method always returns an error. +type errWriter struct{} + +func (errWriter) Write(p []byte) (n int, err error) { return 0, fmt.Errorf("unwritable") } + +func TestEscapeTextIOErrors(t *testing.T) { + expectErr := "unwritable" + err := EscapeText(errWriter{}, []byte{'A'}) + + if err == nil || err.Error() != expectErr { + t.Errorf("have %v, want %v", err, expectErr) + } +} + +func TestEscapeTextInvalidChar(t *testing.T) { + input := []byte("A \x00 terminated string.") + expected := "A \uFFFD terminated string." + + buff := new(bytes.Buffer) + if err := EscapeText(buff, input); err != nil { + t.Fatalf("have %v, want nil", err) + } + text := buff.String() + + if text != expected { + t.Errorf("have %v, want %v", text, expected) + } +} + +func TestIssue5880(t *testing.T) { + type T []byte + data, err := Marshal(T{192, 168, 0, 1}) + if err != nil { + t.Errorf("Marshal error: %v", err) + } + if !utf8.Valid(data) { + t.Errorf("Marshal generated invalid UTF-8: %x", data) + } +} diff --git a/drives/davServer/litmus_test_server.go b/drives/davServer/litmus_test_server.go new file mode 100644 index 00000000..91cbc5de --- /dev/null +++ b/drives/davServer/litmus_test_server.go @@ -0,0 +1,94 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build ignore +// +build ignore + +/* +This program is a server for the WebDAV 'litmus' compliance test at +http://www.webdav.org/neon/litmus/ +To run the test: + +go run litmus_test_server.go + +and separately, from the downloaded litmus-xxx directory: + +make URL=http://localhost:9999/ check +*/ +package main + +import ( + "flag" + "fmt" + "github.com/openziti/zrok/endpoints/drive/driveServer" + "log" + "net/http" + "net/url" +) + +var port = flag.Int("port", 9999, "server port") + +func main() { + flag.Parse() + log.SetFlags(0) + h := &driveServer.Handler{ + FileSystem: driveServer.NewMemFS(), + LockSystem: driveServer.NewMemLS(), + Logger: func(r *http.Request, err error) { + litmus := r.Header.Get("X-Litmus") + if len(litmus) > 19 { + litmus = litmus[:16] + "..." + } + + switch r.Method { + case "COPY", "MOVE": + dst := "" + if u, err := url.Parse(r.Header.Get("Destination")); err == nil { + dst = u.Path + } + o := r.Header.Get("Overwrite") + log.Printf("%-20s%-10s%-30s%-30so=%-2s%v", litmus, r.Method, r.URL.Path, dst, o, err) + default: + log.Printf("%-20s%-10s%-30s%v", litmus, r.Method, r.URL.Path, err) + } + }, + } + + // The next line would normally be: + // http.Handle("/", h) + // but we wrap that HTTP handler h to cater for a special case. + // + // The propfind_invalid2 litmus test case expects an empty namespace prefix + // declaration to be an error. The FAQ in the webdav litmus test says: + // + // "What does the "propfind_invalid2" test check for?... + // + // If a request was sent with an XML body which included an empty namespace + // prefix declaration (xmlns:ns1=""), then the server must reject that with + // a "400 Bad Request" response, as it is invalid according to the XML + // Namespace specification." + // + // On the other hand, the Go standard library's encoding/xml package + // accepts an empty xmlns namespace, as per the discussion at + // https://github.com/golang/go/issues/8068 + // + // Empty namespaces seem disallowed in the second (2006) edition of the XML + // standard, but allowed in a later edition. The grammar differs between + // http://www.w3.org/TR/2006/REC-xml-names-20060816/#ns-decl and + // http://www.w3.org/TR/REC-xml-names/#dt-prefix + // + // Thus, we assume that the propfind_invalid2 test is obsolete, and + // hard-code the 400 Bad Request response that the test expects. + http.Handle("/", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + if r.Header.Get("X-Litmus") == "props: 3 (propfind_invalid2)" { + http.Error(w, "400 Bad Request", http.StatusBadRequest) + return + } + h.ServeHTTP(w, r) + })) + + addr := fmt.Sprintf(":%d", *port) + log.Printf("Serving %v", addr) + log.Fatal(http.ListenAndServe(addr, nil)) +} diff --git a/drives/davServer/lock.go b/drives/davServer/lock.go new file mode 100644 index 00000000..871c7647 --- /dev/null +++ b/drives/davServer/lock.go @@ -0,0 +1,445 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "container/heap" + "errors" + "strconv" + "strings" + "sync" + "time" +) + +var ( + // ErrConfirmationFailed is returned by a LockSystem's Confirm method. + ErrConfirmationFailed = errors.New("webdav: confirmation failed") + // ErrForbidden is returned by a LockSystem's Unlock method. + ErrForbidden = errors.New("webdav: forbidden") + // ErrLocked is returned by a LockSystem's Create, Refresh and Unlock methods. + ErrLocked = errors.New("webdav: locked") + // ErrNoSuchLock is returned by a LockSystem's Refresh and Unlock methods. + ErrNoSuchLock = errors.New("webdav: no such lock") +) + +// Condition can match a WebDAV resource, based on a token or ETag. +// Exactly one of Token and ETag should be non-empty. +type Condition struct { + Not bool + Token string + ETag string +} + +// LockSystem manages access to a collection of named resources. The elements +// in a lock name are separated by slash ('/', U+002F) characters, regardless +// of host operating system convention. +type LockSystem interface { + // Confirm confirms that the caller can claim all of the locks specified by + // the given conditions, and that holding the union of all of those locks + // gives exclusive access to all of the named resources. Up to two resources + // can be named. Empty names are ignored. + // + // Exactly one of release and err will be non-nil. If release is non-nil, + // all of the requested locks are held until release is called. Calling + // release does not unlock the lock, in the WebDAV UNLOCK sense, but once + // Confirm has confirmed that a lock claim is valid, that lock cannot be + // Confirmed again until it has been released. + // + // If Confirm returns ErrConfirmationFailed then the Handler will continue + // to try any other set of locks presented (a WebDAV HTTP request can + // present more than one set of locks). If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + Confirm(now time.Time, name0, name1 string, conditions ...Condition) (release func(), err error) + + // Create creates a lock with the given depth, duration, owner and root + // (name). The depth will either be negative (meaning infinite) or zero. + // + // If Create returns ErrLocked then the Handler will write a "423 Locked" + // HTTP status. If it returns any other non-nil error, the Handler will + // write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + // + // The token returned identifies the created lock. It should be an absolute + // URI as defined by RFC 3986, Section 4.3. In particular, it should not + // contain whitespace. + Create(now time.Time, details LockDetails) (token string, err error) + + // Refresh refreshes the lock with the given token. + // + // If Refresh returns ErrLocked then the Handler will write a "423 Locked" + // HTTP Status. If Refresh returns ErrNoSuchLock then the Handler will write + // a "412 Precondition Failed" HTTP Status. If it returns any other non-nil + // error, the Handler will write a "500 Internal Server Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.10.6 for + // when to use each error. + Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) + + // Unlock unlocks the lock with the given token. + // + // If Unlock returns ErrForbidden then the Handler will write a "403 + // Forbidden" HTTP Status. If Unlock returns ErrLocked then the Handler + // will write a "423 Locked" HTTP status. If Unlock returns ErrNoSuchLock + // then the Handler will write a "409 Conflict" HTTP Status. If it returns + // any other non-nil error, the Handler will write a "500 Internal Server + // Error" HTTP status. + // + // See http://www.webdav.org/specs/rfc4918.html#rfc.section.9.11.1 for + // when to use each error. + Unlock(now time.Time, token string) error +} + +// LockDetails are a lock's metadata. +type LockDetails struct { + // Root is the root resource name being locked. For a zero-depth lock, the + // root is the only resource being locked. + Root string + // Duration is the lock timeout. A negative duration means infinite. + Duration time.Duration + // OwnerXML is the verbatim XML given in a LOCK HTTP request. + // + // TODO: does the "verbatim" nature play well with XML namespaces? + // Does the OwnerXML field need to have more structure? See + // https://codereview.appspot.com/175140043/#msg2 + OwnerXML string + // ZeroDepth is whether the lock has zero depth. If it does not have zero + // depth, it has infinite depth. + ZeroDepth bool +} + +// NewMemLS returns a new in-memory LockSystem. +func NewMemLS() LockSystem { + return &memLS{ + byName: make(map[string]*memLSNode), + byToken: make(map[string]*memLSNode), + gen: uint64(time.Now().Unix()), + } +} + +type memLS struct { + mu sync.Mutex + byName map[string]*memLSNode + byToken map[string]*memLSNode + gen uint64 + // byExpiry only contains those nodes whose LockDetails have a finite + // Duration and are yet to expire. + byExpiry byExpiry +} + +func (m *memLS) nextToken() string { + m.gen++ + return strconv.FormatUint(m.gen, 10) +} + +func (m *memLS) collectExpiredNodes(now time.Time) { + for len(m.byExpiry) > 0 { + if now.Before(m.byExpiry[0].expiry) { + break + } + m.remove(m.byExpiry[0]) + } +} + +func (m *memLS) Confirm(now time.Time, name0, name1 string, conditions ...Condition) (func(), error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + var n0, n1 *memLSNode + if name0 != "" { + if n0 = m.lookup(slashClean(name0), conditions...); n0 == nil { + return nil, ErrConfirmationFailed + } + } + if name1 != "" { + if n1 = m.lookup(slashClean(name1), conditions...); n1 == nil { + return nil, ErrConfirmationFailed + } + } + + // Don't hold the same node twice. + if n1 == n0 { + n1 = nil + } + + if n0 != nil { + m.hold(n0) + } + if n1 != nil { + m.hold(n1) + } + return func() { + m.mu.Lock() + defer m.mu.Unlock() + if n1 != nil { + m.unhold(n1) + } + if n0 != nil { + m.unhold(n0) + } + }, nil +} + +// lookup returns the node n that locks the named resource, provided that n +// matches at least one of the given conditions and that lock isn't held by +// another party. Otherwise, it returns nil. +// +// n may be a parent of the named resource, if n is an infinite depth lock. +func (m *memLS) lookup(name string, conditions ...Condition) (n *memLSNode) { + // TODO: support Condition.Not and Condition.ETag. + for _, c := range conditions { + n = m.byToken[c.Token] + if n == nil || n.held { + continue + } + if name == n.details.Root { + return n + } + if n.details.ZeroDepth { + continue + } + if n.details.Root == "/" || strings.HasPrefix(name, n.details.Root+"/") { + return n + } + } + return nil +} + +func (m *memLS) hold(n *memLSNode) { + if n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = true + if n.details.Duration >= 0 && n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func (m *memLS) unhold(n *memLSNode) { + if !n.held { + panic("webdav: memLS inconsistent held state") + } + n.held = false + if n.details.Duration >= 0 { + heap.Push(&m.byExpiry, n) + } +} + +func (m *memLS) Create(now time.Time, details LockDetails) (string, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + details.Root = slashClean(details.Root) + + if !m.canCreate(details.Root, details.ZeroDepth) { + return "", ErrLocked + } + n := m.create(details.Root) + n.token = m.nextToken() + m.byToken[n.token] = n + n.details = details + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.token, nil +} + +func (m *memLS) Refresh(now time.Time, token string, duration time.Duration) (LockDetails, error) { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return LockDetails{}, ErrNoSuchLock + } + if n.held { + return LockDetails{}, ErrLocked + } + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } + n.details.Duration = duration + if n.details.Duration >= 0 { + n.expiry = now.Add(n.details.Duration) + heap.Push(&m.byExpiry, n) + } + return n.details, nil +} + +func (m *memLS) Unlock(now time.Time, token string) error { + m.mu.Lock() + defer m.mu.Unlock() + m.collectExpiredNodes(now) + + n := m.byToken[token] + if n == nil { + return ErrNoSuchLock + } + if n.held { + return ErrLocked + } + m.remove(n) + return nil +} + +func (m *memLS) canCreate(name string, zeroDepth bool) bool { + return walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + return true + } + if first { + if n.token != "" { + // The target node is already locked. + return false + } + if !zeroDepth { + // The requested lock depth is infinite, and the fact that n exists + // (n != nil) means that a descendent of the target node is locked. + return false + } + } else if n.token != "" && !n.details.ZeroDepth { + // An ancestor of the target node is locked with infinite depth. + return false + } + return true + }) +} + +func (m *memLS) create(name string) (ret *memLSNode) { + walkToRoot(name, func(name0 string, first bool) bool { + n := m.byName[name0] + if n == nil { + n = &memLSNode{ + details: LockDetails{ + Root: name0, + }, + byExpiryIndex: -1, + } + m.byName[name0] = n + } + n.refCount++ + if first { + ret = n + } + return true + }) + return ret +} + +func (m *memLS) remove(n *memLSNode) { + delete(m.byToken, n.token) + n.token = "" + walkToRoot(n.details.Root, func(name0 string, first bool) bool { + x := m.byName[name0] + x.refCount-- + if x.refCount == 0 { + delete(m.byName, name0) + } + return true + }) + if n.byExpiryIndex >= 0 { + heap.Remove(&m.byExpiry, n.byExpiryIndex) + } +} + +func walkToRoot(name string, f func(name0 string, first bool) bool) bool { + for first := true; ; first = false { + if !f(name, first) { + return false + } + if name == "/" { + break + } + name = name[:strings.LastIndex(name, "/")] + if name == "" { + name = "/" + } + } + return true +} + +type memLSNode struct { + // details are the lock metadata. Even if this node's name is not explicitly locked, + // details.Root will still equal the node's name. + details LockDetails + // token is the unique identifier for this node's lock. An empty token means that + // this node is not explicitly locked. + token string + // refCount is the number of self-or-descendent nodes that are explicitly locked. + refCount int + // expiry is when this node's lock expires. + expiry time.Time + // byExpiryIndex is the index of this node in memLS.byExpiry. It is -1 + // if this node does not expire, or has expired. + byExpiryIndex int + // held is whether this node's lock is actively held by a Confirm call. + held bool +} + +type byExpiry []*memLSNode + +func (b *byExpiry) Len() int { + return len(*b) +} + +func (b *byExpiry) Less(i, j int) bool { + return (*b)[i].expiry.Before((*b)[j].expiry) +} + +func (b *byExpiry) Swap(i, j int) { + (*b)[i], (*b)[j] = (*b)[j], (*b)[i] + (*b)[i].byExpiryIndex = i + (*b)[j].byExpiryIndex = j +} + +func (b *byExpiry) Push(x interface{}) { + n := x.(*memLSNode) + n.byExpiryIndex = len(*b) + *b = append(*b, n) +} + +func (b *byExpiry) Pop() interface{} { + i := len(*b) - 1 + n := (*b)[i] + (*b)[i] = nil + n.byExpiryIndex = -1 + *b = (*b)[:i] + return n +} + +const infiniteTimeout = -1 + +// parseTimeout parses the Timeout HTTP header, as per section 10.7. If s is +// empty, an infiniteTimeout is returned. +func parseTimeout(s string) (time.Duration, error) { + if s == "" { + return infiniteTimeout, nil + } + if i := strings.IndexByte(s, ','); i >= 0 { + s = s[:i] + } + s = strings.TrimSpace(s) + if s == "Infinite" { + return infiniteTimeout, nil + } + const pre = "Second-" + if !strings.HasPrefix(s, pre) { + return 0, errInvalidTimeout + } + s = s[len(pre):] + if s == "" || s[0] < '0' || '9' < s[0] { + return 0, errInvalidTimeout + } + n, err := strconv.ParseInt(s, 10, 64) + if err != nil || 1<<32-1 < n { + return 0, errInvalidTimeout + } + return time.Duration(n) * time.Second, nil +} diff --git a/drives/davServer/lock_test.go b/drives/davServer/lock_test.go new file mode 100644 index 00000000..1791c4e0 --- /dev/null +++ b/drives/davServer/lock_test.go @@ -0,0 +1,735 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "fmt" + "math/rand" + "path" + "reflect" + "sort" + "strconv" + "strings" + "testing" + "time" +) + +func TestWalkToRoot(t *testing.T) { + testCases := []struct { + name string + want []string + }{{ + "/a/b/c/d", + []string{ + "/a/b/c/d", + "/a/b/c", + "/a/b", + "/a", + "/", + }, + }, { + "/a", + []string{ + "/a", + "/", + }, + }, { + "/", + []string{ + "/", + }, + }} + + for _, tc := range testCases { + var got []string + if !walkToRoot(tc.name, func(name0 string, first bool) bool { + if first != (len(got) == 0) { + t.Errorf("name=%q: first=%t but len(got)==%d", tc.name, first, len(got)) + return false + } + got = append(got, name0) + return true + }) { + continue + } + if !reflect.DeepEqual(got, tc.want) { + t.Errorf("name=%q:\ngot %q\nwant %q", tc.name, got, tc.want) + } + } +} + +var lockTestDurations = []time.Duration{ + infiniteTimeout, // infiniteTimeout means to never expire. + 0, // A zero duration means to expire immediately. + 100 * time.Hour, // A very large duration will not expire in these tests. +} + +// lockTestNames are the names of a set of mutually compatible locks. For each +// name fragment: +// - _ means no explicit lock. +// - i means an infinite-depth lock, +// - z means a zero-depth lock, +var lockTestNames = []string{ + "/_/_/_/_/z", + "/_/_/i", + "/_/z", + "/_/z/i", + "/_/z/z", + "/_/z/_/i", + "/_/z/_/z", + "/i", + "/z", + "/z/_/i", + "/z/_/z", +} + +func lockTestZeroDepth(name string) bool { + switch name[len(name)-1] { + case 'i': + return false + case 'z': + return true + } + panic(fmt.Sprintf("lock name %q did not end with 'i' or 'z'", name)) +} + +func TestMemLSCanCreate(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + for _, name := range lockTestNames { + _, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + } + + wantCanCreate := func(name string, zeroDepth bool) bool { + for _, n := range lockTestNames { + switch { + case n == name: + // An existing lock has the same name as the proposed lock. + return false + case strings.HasPrefix(n, name): + // An existing lock would be a child of the proposed lock, + // which conflicts if the proposed lock has infinite depth. + if !zeroDepth { + return false + } + case strings.HasPrefix(name, n): + // An existing lock would be an ancestor of the proposed lock, + // which conflicts if the ancestor has infinite depth. + if n[len(n)-1] == 'i' { + return false + } + } + } + return true + } + + var check func(int, string) + check = func(recursion int, name string) { + for _, zeroDepth := range []bool{false, true} { + got := m.canCreate(name, zeroDepth) + want := wantCanCreate(name, zeroDepth) + if got != want { + t.Errorf("canCreate name=%q zeroDepth=%t: got %t, want %t", name, zeroDepth, got, want) + } + } + if recursion == 6 { + return + } + if name != "/" { + name += "/" + } + for _, c := range "_iz" { + check(recursion+1, name+string(c)) + } + } + check(0, "/") +} + +func TestMemLSLookup(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + + badToken := m.nextToken() + t.Logf("badToken=%q", badToken) + + for _, name := range lockTestNames { + token, err := m.Create(now, LockDetails{ + Root: name, + Duration: infiniteTimeout, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("creating lock for %q: %v", name, err) + } + t.Logf("%-15q -> node=%p token=%q", name, m.byName[name], token) + } + + baseNames := append([]string{"/a", "/b/c"}, lockTestNames...) + for _, baseName := range baseNames { + for _, suffix := range []string{"", "/0", "/1/2/3"} { + name := baseName + suffix + + goodToken := "" + base := m.byName[baseName] + if base != nil && (suffix == "" || !lockTestZeroDepth(baseName)) { + goodToken = base.token + } + + for _, token := range []string{badToken, goodToken} { + if token == "" { + continue + } + + got := m.lookup(name, Condition{Token: token}) + want := base + if token == badToken { + want = nil + } + if got != want { + t.Errorf("name=%-20qtoken=%q (bad=%t): got %p, want %p", + name, token, token == badToken, got, want) + } + } + } + } +} + +func TestMemLSConfirm(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + alice, err := m.Create(now, LockDetails{ + Root: "/alice", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + + tweedle, err := m.Create(now, LockDetails{ + Root: "/tweedle", + Duration: infiniteTimeout, + ZeroDepth: false, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + + // Test a mismatch between name and condition. + _, err = m.Confirm(now, "/tweedle/dee", "", Condition{Token: alice}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (mismatch): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (mismatch): inconsistent state: %v", err) + } + + // Test two names (that fall under the same lock) in the one Confirm call. + release, err := m.Confirm(now, "/tweedle/dee", "/tweedle/dum", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (twins): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (twins): inconsistent state: %v", err) + } + release() + if err := m.consistent(); err != nil { + t.Fatalf("release (twins): inconsistent state: %v", err) + } + + // Test the same two names in overlapping Confirm / release calls. + releaseDee, err := m.Confirm(now, "/tweedle/dee", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #0): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #0): inconsistent state: %v", err) + } + + _, err = m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != ErrConfirmationFailed { + t.Fatalf("Confirm (sequence #1): got %v, want ErrConfirmationFailed", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #1): inconsistent state: %v", err) + } + + releaseDee() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #2): inconsistent state: %v", err) + } + + releaseDum, err := m.Confirm(now, "/tweedle/dum", "", Condition{Token: tweedle}) + if err != nil { + t.Fatalf("Confirm (sequence #3): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Confirm (sequence #3): inconsistent state: %v", err) + } + + // Test that you can't unlock a held lock. + err = m.Unlock(now, tweedle) + if err != ErrLocked { + t.Fatalf("Unlock (sequence #4): got %v, want ErrLocked", err) + } + + releaseDum() + if err := m.consistent(); err != nil { + t.Fatalf("release (sequence #5): inconsistent state: %v", err) + } + + err = m.Unlock(now, tweedle) + if err != nil { + t.Fatalf("Unlock (sequence #6): %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock (sequence #6): inconsistent state: %v", err) + } +} + +func TestMemLSNonCanonicalRoot(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + token, err := m.Create(now, LockDetails{ + Root: "/foo/./bar//", + Duration: 1 * time.Second, + }) + if err != nil { + t.Fatalf("Create: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Create: inconsistent state: %v", err) + } + if err := m.Unlock(now, token); err != nil { + t.Fatalf("Unlock: %v", err) + } + if err := m.consistent(); err != nil { + t.Fatalf("Unlock: inconsistent state: %v", err) + } +} + +func TestMemLSExpiry(t *testing.T) { + m := NewMemLS().(*memLS) + testCases := []string{ + "setNow 0", + "create /a.5", + "want /a.5", + "create /c.6", + "want /a.5 /c.6", + "create /a/b.7", + "want /a.5 /a/b.7 /c.6", + "setNow 4", + "want /a.5 /a/b.7 /c.6", + "setNow 5", + "want /a/b.7 /c.6", + "setNow 6", + "want /a/b.7", + "setNow 7", + "want ", + "setNow 8", + "want ", + "create /a.12", + "create /b.13", + "create /c.15", + "create /a/d.16", + "want /a.12 /a/d.16 /b.13 /c.15", + "refresh /a.14", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 12", + "want /a.14 /a/d.16 /b.13 /c.15", + "setNow 13", + "want /a.14 /a/d.16 /c.15", + "setNow 14", + "want /a/d.16 /c.15", + "refresh /a/d.20", + "refresh /c.20", + "want /a/d.20 /c.20", + "setNow 20", + "want ", + } + + tokens := map[string]string{} + zTime := time.Unix(0, 0) + now := zTime + for i, tc := range testCases { + j := strings.IndexByte(tc, ' ') + if j < 0 { + t.Fatalf("test case #%d %q: invalid command", i, tc) + } + op, arg := tc[:j], tc[j+1:] + switch op { + default: + t.Fatalf("test case #%d %q: invalid operation %q", i, tc, op) + + case "create", "refresh": + parts := strings.Split(arg, ".") + if len(parts) != 2 { + t.Fatalf("test case #%d %q: invalid create", i, tc) + } + root := parts[0] + d, err := strconv.Atoi(parts[1]) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + dur := time.Unix(0, 0).Add(time.Duration(d) * time.Second).Sub(now) + + switch op { + case "create": + token, err := m.Create(now, LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + }) + if err != nil { + t.Fatalf("test case #%d %q: Create: %v", i, tc, err) + } + tokens[root] = token + + case "refresh": + token := tokens[root] + if token == "" { + t.Fatalf("test case #%d %q: no token for %q", i, tc, root) + } + got, err := m.Refresh(now, token, dur) + if err != nil { + t.Fatalf("test case #%d %q: Refresh: %v", i, tc, err) + } + want := LockDetails{ + Root: root, + Duration: dur, + ZeroDepth: true, + } + if got != want { + t.Fatalf("test case #%d %q:\ngot %v\nwant %v", i, tc, got, want) + } + } + + case "setNow": + d, err := strconv.Atoi(arg) + if err != nil { + t.Fatalf("test case #%d %q: invalid duration", i, tc) + } + now = time.Unix(0, 0).Add(time.Duration(d) * time.Second) + + case "want": + m.mu.Lock() + m.collectExpiredNodes(now) + got := make([]string, 0, len(m.byToken)) + for _, n := range m.byToken { + got = append(got, fmt.Sprintf("%s.%d", + n.details.Root, n.expiry.Sub(zTime)/time.Second)) + } + m.mu.Unlock() + sort.Strings(got) + want := []string{} + if arg != "" { + want = strings.Split(arg, " ") + } + if !reflect.DeepEqual(got, want) { + t.Fatalf("test case #%d %q:\ngot %q\nwant %q", i, tc, got, want) + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("test case #%d %q: inconsistent state: %v", i, tc, err) + } + } +} + +func TestMemLS(t *testing.T) { + now := time.Unix(0, 0) + m := NewMemLS().(*memLS) + rng := rand.New(rand.NewSource(0)) + tokens := map[string]string{} + nConfirm, nCreate, nRefresh, nUnlock := 0, 0, 0, 0 + const N = 2000 + + for i := 0; i < N; i++ { + name := lockTestNames[rng.Intn(len(lockTestNames))] + duration := lockTestDurations[rng.Intn(len(lockTestDurations))] + confirmed, unlocked := false, false + + // If the name was already locked, we randomly confirm/release, refresh + // or unlock it. Otherwise, we create a lock. + token := tokens[name] + if token != "" { + switch rng.Intn(3) { + case 0: + confirmed = true + nConfirm++ + release, err := m.Confirm(now, name, "", Condition{Token: token}) + if err != nil { + t.Fatalf("iteration #%d: Confirm %q: %v", i, name, err) + } + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + release() + + case 1: + nRefresh++ + if _, err := m.Refresh(now, token, duration); err != nil { + t.Fatalf("iteration #%d: Refresh %q: %v", i, name, err) + } + + case 2: + unlocked = true + nUnlock++ + if err := m.Unlock(now, token); err != nil { + t.Fatalf("iteration #%d: Unlock %q: %v", i, name, err) + } + } + + } else { + nCreate++ + var err error + token, err = m.Create(now, LockDetails{ + Root: name, + Duration: duration, + ZeroDepth: lockTestZeroDepth(name), + }) + if err != nil { + t.Fatalf("iteration #%d: Create %q: %v", i, name, err) + } + } + + if !confirmed { + if duration == 0 || unlocked { + // A zero-duration lock should expire immediately and is + // effectively equivalent to being unlocked. + tokens[name] = "" + } else { + tokens[name] = token + } + } + + if err := m.consistent(); err != nil { + t.Fatalf("iteration #%d: inconsistent state: %v", i, err) + } + } + + if nConfirm < N/10 { + t.Fatalf("too few Confirm calls: got %d, want >= %d", nConfirm, N/10) + } + if nCreate < N/10 { + t.Fatalf("too few Create calls: got %d, want >= %d", nCreate, N/10) + } + if nRefresh < N/10 { + t.Fatalf("too few Refresh calls: got %d, want >= %d", nRefresh, N/10) + } + if nUnlock < N/10 { + t.Fatalf("too few Unlock calls: got %d, want >= %d", nUnlock, N/10) + } +} + +func (m *memLS) consistent() error { + m.mu.Lock() + defer m.mu.Unlock() + + // If m.byName is non-empty, then it must contain an entry for the root "/", + // and its refCount should equal the number of locked nodes. + if len(m.byName) > 0 { + n := m.byName["/"] + if n == nil { + return fmt.Errorf(`non-empty m.byName does not contain the root "/"`) + } + if n.refCount != len(m.byToken) { + return fmt.Errorf("root node refCount=%d, differs from len(m.byToken)=%d", n.refCount, len(m.byToken)) + } + } + + for name, n := range m.byName { + // The map keys should be consistent with the node's copy of the key. + if n.details.Root != name { + return fmt.Errorf("node name %q != byName map key %q", n.details.Root, name) + } + + // A name must be clean, and start with a "/". + if len(name) == 0 || name[0] != '/' { + return fmt.Errorf(`node name %q does not start with "/"`, name) + } + if name != path.Clean(name) { + return fmt.Errorf(`node name %q is not clean`, name) + } + + // A node's refCount should be positive. + if n.refCount <= 0 { + return fmt.Errorf("non-positive refCount for node at name %q", name) + } + + // A node's refCount should be the number of self-or-descendents that + // are locked (i.e. have a non-empty token). + var list []string + for name0, n0 := range m.byName { + // All of lockTestNames' name fragments are one byte long: '_', 'i' or 'z', + // so strings.HasPrefix is equivalent to self-or-descendent name match. + // We don't have to worry about "/foo/bar" being a false positive match + // for "/foo/b". + if strings.HasPrefix(name0, name) && n0.token != "" { + list = append(list, name0) + } + } + if n.refCount != len(list) { + sort.Strings(list) + return fmt.Errorf("node at name %q has refCount %d but locked self-or-descendents are %q (len=%d)", + name, n.refCount, list, len(list)) + } + + // A node n is in m.byToken if it has a non-empty token. + if n.token != "" { + if _, ok := m.byToken[n.token]; !ok { + return fmt.Errorf("node at name %q has token %q but not in m.byToken", name, n.token) + } + } + + // A node n is in m.byExpiry if it has a non-negative byExpiryIndex. + if n.byExpiryIndex >= 0 { + if n.byExpiryIndex >= len(m.byExpiry) { + return fmt.Errorf("node at name %q has byExpiryIndex %d but m.byExpiry has length %d", name, n.byExpiryIndex, len(m.byExpiry)) + } + if n != m.byExpiry[n.byExpiryIndex] { + return fmt.Errorf("node at name %q has byExpiryIndex %d but that indexes a different node", name, n.byExpiryIndex) + } + } + } + + for token, n := range m.byToken { + // The map keys should be consistent with the node's copy of the key. + if n.token != token { + return fmt.Errorf("node token %q != byToken map key %q", n.token, token) + } + + // Every node in m.byToken is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byToken but not in m.byName", n.details.Root) + } + } + + for i, n := range m.byExpiry { + // The slice indices should be consistent with the node's copy of the index. + if n.byExpiryIndex != i { + return fmt.Errorf("node byExpiryIndex %d != byExpiry slice index %d", n.byExpiryIndex, i) + } + + // Every node in m.byExpiry is in m.byName. + if _, ok := m.byName[n.details.Root]; !ok { + return fmt.Errorf("node at name %q in m.byExpiry but not in m.byName", n.details.Root) + } + + // No node in m.byExpiry should be held. + if n.held { + return fmt.Errorf("node at name %q in m.byExpiry is held", n.details.Root) + } + } + return nil +} + +func TestParseTimeout(t *testing.T) { + testCases := []struct { + s string + want time.Duration + wantErr error + }{{ + "", + infiniteTimeout, + nil, + }, { + "Infinite", + infiniteTimeout, + nil, + }, { + "Infinitesimal", + 0, + errInvalidTimeout, + }, { + "infinite", + 0, + errInvalidTimeout, + }, { + "Second-0", + 0 * time.Second, + nil, + }, { + "Second-123", + 123 * time.Second, + nil, + }, { + " Second-456 ", + 456 * time.Second, + nil, + }, { + "Second-4100000000", + 4100000000 * time.Second, + nil, + }, { + "junk", + 0, + errInvalidTimeout, + }, { + "Second-", + 0, + errInvalidTimeout, + }, { + "Second--1", + 0, + errInvalidTimeout, + }, { + "Second--123", + 0, + errInvalidTimeout, + }, { + "Second-+123", + 0, + errInvalidTimeout, + }, { + "Second-0x123", + 0, + errInvalidTimeout, + }, { + "second-123", + 0, + errInvalidTimeout, + }, { + "Second-4294967295", + 4294967295 * time.Second, + nil, + }, { + // Section 10.7 says that "The timeout value for TimeType "Second" + // must not be greater than 2^32-1." + "Second-4294967296", + 0, + errInvalidTimeout, + }, { + // This test case comes from section 9.10.9 of the spec. It says, + // + // "In this request, the client has specified that it desires an + // infinite-length lock, if available, otherwise a timeout of 4.1 + // billion seconds, if available." + // + // The Go WebDAV package always supports infinite length locks, + // and ignores the fallback after the comma. + "Infinite, Second-4100000000", + infiniteTimeout, + nil, + }} + + for _, tc := range testCases { + got, gotErr := parseTimeout(tc.s) + if got != tc.want || gotErr != tc.wantErr { + t.Errorf("parsing %q:\ngot %v, %v\nwant %v, %v", tc.s, got, gotErr, tc.want, tc.wantErr) + } + } +} diff --git a/drives/davServer/prop.go b/drives/davServer/prop.go new file mode 100644 index 00000000..df812c7d --- /dev/null +++ b/drives/davServer/prop.go @@ -0,0 +1,469 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "bytes" + "context" + "encoding/xml" + "errors" + "fmt" + "io" + "mime" + "net/http" + "os" + "path/filepath" + "strconv" +) + +// Proppatch describes a property update instruction as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPPATCH +type Proppatch struct { + // Remove specifies whether this patch removes properties. If it does not + // remove them, it sets them. + Remove bool + // Props contains the properties to be set or removed. + Props []Property +} + +// Propstat describes a XML propstat element as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +type Propstat struct { + // Props contains the properties for which Status applies. + Props []Property + + // Status defines the HTTP status code of the properties in Prop. + // Allowed values include, but are not limited to the WebDAV status + // code extensions for HTTP/1.1. + // http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 + Status int + + // XMLError contains the XML representation of the optional error element. + // XML content within this field must not rely on any predefined + // namespace declarations or prefixes. If empty, the XML error element + // is omitted. + XMLError string + + // ResponseDescription contains the contents of the optional + // responsedescription field. If empty, the XML element is omitted. + ResponseDescription string +} + +// makePropstats returns a slice containing those of x and y whose Props slice +// is non-empty. If both are empty, it returns a slice containing an otherwise +// zero Propstat whose HTTP status code is 200 OK. +func makePropstats(x, y Propstat) []Propstat { + pstats := make([]Propstat, 0, 2) + if len(x.Props) != 0 { + pstats = append(pstats, x) + } + if len(y.Props) != 0 { + pstats = append(pstats, y) + } + if len(pstats) == 0 { + pstats = append(pstats, Propstat{ + Status: http.StatusOK, + }) + } + return pstats +} + +// DeadPropsHolder holds the dead properties of a resource. +// +// Dead properties are those properties that are explicitly defined. In +// comparison, live properties, such as DAV:getcontentlength, are implicitly +// defined by the underlying resource, and cannot be explicitly overridden or +// removed. See the Terminology section of +// http://www.webdav.org/specs/rfc4918.html#rfc.section.3 +// +// There is a whitelist of the names of live properties. This package handles +// all live properties, and will only pass non-whitelisted names to the Patch +// method of DeadPropsHolder implementations. +type DeadPropsHolder interface { + // DeadProps returns a copy of the dead properties held. + DeadProps() (map[xml.Name]Property, error) + + // Patch patches the dead properties held. + // + // Patching is atomic; either all or no patches succeed. It returns (nil, + // non-nil) if an internal server error occurred, otherwise the Propstats + // collectively contain one Property for each proposed patch Property. If + // all patches succeed, Patch returns a slice of length one and a Propstat + // element with a 200 OK HTTP status code. If none succeed, for reasons + // other than an internal server error, no Propstat has status 200 OK. + // + // For more details on when various HTTP status codes apply, see + // http://www.webdav.org/specs/rfc4918.html#PROPPATCH-status + Patch([]Proppatch) ([]Propstat, error) +} + +// liveProps contains all supported, protected DAV: properties. +var liveProps = map[xml.Name]struct { + // findFn implements the propfind function of this property. If nil, + // it indicates a hidden property. + findFn func(context.Context, FileSystem, LockSystem, string, os.FileInfo) (string, error) + // dir is true if the property applies to directories. + dir bool +}{ + {Space: "DAV:", Local: "resourcetype"}: { + findFn: findResourceType, + dir: true, + }, + {Space: "DAV:", Local: "displayname"}: { + findFn: findDisplayName, + dir: true, + }, + {Space: "DAV:", Local: "getcontentlength"}: { + findFn: findContentLength, + dir: false, + }, + {Space: "DAV:", Local: "getlastmodified"}: { + findFn: findLastModified, + // http://webdav.org/specs/rfc4918.html#PROPERTY_getlastmodified + // suggests that getlastmodified should only apply to GETable + // resources, and this package does not support GET on directories. + // + // Nonetheless, some WebDAV clients expect child directories to be + // sortable by getlastmodified date, so this value is true, not false. + // See golang.org/issue/15334. + dir: true, + }, + {Space: "DAV:", Local: "creationdate"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontentlanguage"}: { + findFn: nil, + dir: false, + }, + {Space: "DAV:", Local: "getcontenttype"}: { + findFn: findContentType, + dir: false, + }, + {Space: "DAV:", Local: "getetag"}: { + findFn: findETag, + // findETag implements ETag as the concatenated hex values of a file's + // modification time and size. This is not a reliable synchronization + // mechanism for directories, so we do not advertise getetag for DAV + // collections. + dir: false, + }, + + // TODO: The lockdiscovery property requires LockSystem to list the + // active locks on a resource. + {Space: "DAV:", Local: "lockdiscovery"}: {}, + {Space: "DAV:", Local: "supportedlock"}: { + findFn: findSupportedLock, + dir: true, + }, +} + +// TODO(nigeltao) merge props and allprop? + +// props returns the status of the properties named pnames for resource name. +// +// Each Propstat has a unique status and each property name will only be part +// of one Propstat element. +func props(ctx context.Context, fs FileSystem, ls LockSystem, name string, pnames []xml.Name) ([]Propstat, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pstatOK := Propstat{Status: http.StatusOK} + pstatNotFound := Propstat{Status: http.StatusNotFound} + for _, pn := range pnames { + // If this file has dead properties, check if they contain pn. + if dp, ok := deadProps[pn]; ok { + pstatOK.Props = append(pstatOK.Props, dp) + continue + } + // Otherwise, it must either be a live property or we don't know it. + if prop := liveProps[pn]; prop.findFn != nil && (prop.dir || !isDir) { + innerXML, err := prop.findFn(ctx, fs, ls, name, fi) + if err != nil { + return nil, err + } + pstatOK.Props = append(pstatOK.Props, Property{ + XMLName: pn, + InnerXML: []byte(innerXML), + }) + } else { + pstatNotFound.Props = append(pstatNotFound.Props, Property{ + XMLName: pn, + }) + } + } + return makePropstats(pstatOK, pstatNotFound), nil +} + +// propnames returns the property names defined for resource name. +func propnames(ctx context.Context, fs FileSystem, ls LockSystem, name string) ([]xml.Name, error) { + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return nil, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return nil, err + } + isDir := fi.IsDir() + + var deadProps map[xml.Name]Property + if dph, ok := f.(DeadPropsHolder); ok { + deadProps, err = dph.DeadProps() + if err != nil { + return nil, err + } + } + + pnames := make([]xml.Name, 0, len(liveProps)+len(deadProps)) + for pn, prop := range liveProps { + if prop.findFn != nil && (prop.dir || !isDir) { + pnames = append(pnames, pn) + } + } + for pn := range deadProps { + pnames = append(pnames, pn) + } + return pnames, nil +} + +// allprop returns the properties defined for resource name and the properties +// named in include. +// +// Note that RFC 4918 defines 'allprop' to return the DAV: properties defined +// within the RFC plus dead properties. Other live properties should only be +// returned if they are named in 'include'. +// +// See http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND +func allprop(ctx context.Context, fs FileSystem, ls LockSystem, name string, include []xml.Name) ([]Propstat, error) { + pnames, err := propnames(ctx, fs, ls, name) + if err != nil { + return nil, err + } + // Add names from include if they are not already covered in pnames. + nameset := make(map[xml.Name]bool) + for _, pn := range pnames { + nameset[pn] = true + } + for _, pn := range include { + if !nameset[pn] { + pnames = append(pnames, pn) + } + } + return props(ctx, fs, ls, name, pnames) +} + +// patch patches the properties of resource name. The return values are +// constrained in the same manner as DeadPropsHolder.Patch. +func patch(ctx context.Context, fs FileSystem, ls LockSystem, name string, patches []Proppatch) ([]Propstat, error) { + conflict := false +loop: + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + conflict = true + break loop + } + } + } + if conflict { + pstatForbidden := Propstat{ + Status: http.StatusForbidden, + XMLError: ``, + } + pstatFailedDep := Propstat{ + Status: StatusFailedDependency, + } + for _, patch := range patches { + for _, p := range patch.Props { + if _, ok := liveProps[p.XMLName]; ok { + pstatForbidden.Props = append(pstatForbidden.Props, Property{XMLName: p.XMLName}) + } else { + pstatFailedDep.Props = append(pstatFailedDep.Props, Property{XMLName: p.XMLName}) + } + } + } + return makePropstats(pstatForbidden, pstatFailedDep), nil + } + + f, err := fs.OpenFile(ctx, name, os.O_RDWR, 0) + if err != nil { + return nil, err + } + defer f.Close() + if dph, ok := f.(DeadPropsHolder); ok { + ret, err := dph.Patch(patches) + if err != nil { + return nil, err + } + // http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat says that + // "The contents of the prop XML element must only list the names of + // properties to which the result in the status element applies." + for _, pstat := range ret { + for i, p := range pstat.Props { + pstat.Props[i] = Property{XMLName: p.XMLName} + } + } + return ret, nil + } + // The file doesn't implement the optional DeadPropsHolder interface, so + // all patches are forbidden. + pstat := Propstat{Status: http.StatusForbidden} + for _, patch := range patches { + for _, p := range patch.Props { + pstat.Props = append(pstat.Props, Property{XMLName: p.XMLName}) + } + } + return []Propstat{pstat}, nil +} + +func escapeXML(s string) string { + for i := 0; i < len(s); i++ { + // As an optimization, if s contains only ASCII letters, digits or a + // few special characters, the escaped value is s itself and we don't + // need to allocate a buffer and convert between string and []byte. + switch c := s[i]; { + case c == ' ' || c == '_' || + ('+' <= c && c <= '9') || // Digits as well as + , - . and / + ('A' <= c && c <= 'Z') || + ('a' <= c && c <= 'z'): + continue + } + // Otherwise, go through the full escaping process. + var buf bytes.Buffer + xml.EscapeText(&buf, []byte(s)) + return buf.String() + } + return s +} + +func findResourceType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if fi.IsDir() { + return ``, nil + } + return "", nil +} + +func findDisplayName(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if slashClean(name) == "/" { + // Hide the real name of a possibly prefixed root directory. + return "", nil + } + return escapeXML(fi.Name()), nil +} + +func findContentLength(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return strconv.FormatInt(fi.Size(), 10), nil +} + +func findLastModified(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return fi.ModTime().UTC().Format(http.TimeFormat), nil +} + +// ErrNotImplemented should be returned by optional interfaces if they +// want the original implementation to be used. +var ErrNotImplemented = errors.New("not implemented") + +// ContentTyper is an optional interface for the os.FileInfo +// objects returned by the FileSystem. +// +// If this interface is defined then it will be used to read the +// content type from the object. +// +// If this interface is not defined the file will be opened and the +// content type will be guessed from the initial contents of the file. +type ContentTyper interface { + // ContentType returns the content type for the file. + // + // If this returns error ErrNotImplemented then the error will + // be ignored and the base implementation will be used + // instead. + ContentType(ctx context.Context) (string, error) +} + +func findContentType(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if do, ok := fi.(ContentTyper); ok { + ctype, err := do.ContentType(ctx) + if err != ErrNotImplemented { + return ctype, err + } + } + f, err := fs.OpenFile(ctx, name, os.O_RDONLY, 0) + if err != nil { + return "", err + } + defer f.Close() + // This implementation is based on serveContent's code in the standard net/http package. + ctype := mime.TypeByExtension(filepath.Ext(name)) + if ctype != "" { + return ctype, nil + } + // Read a chunk to decide between utf-8 text and binary. + var buf [512]byte + n, err := io.ReadFull(f, buf[:]) + if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF { + return "", err + } + ctype = http.DetectContentType(buf[:n]) + // Rewind file. + _, err = f.Seek(0, io.SeekStart) + return ctype, err +} + +// ETager is an optional interface for the os.FileInfo objects +// returned by the FileSystem. +// +// If this interface is defined then it will be used to read the ETag +// for the object. +// +// If this interface is not defined an ETag will be computed using the +// ModTime() and the Size() methods of the os.FileInfo object. +type ETager interface { + // ETag returns an ETag for the file. This should be of the + // form "value" or W/"value" + // + // If this returns error ErrNotImplemented then the error will + // be ignored and the base implementation will be used + // instead. + ETag(ctx context.Context) (string, error) +} + +func findETag(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + if do, ok := fi.(ETager); ok { + etag, err := do.ETag(ctx) + if err != ErrNotImplemented { + return etag, err + } + } + // The Apache http 2.4 web server by default concatenates the + // modification time and size of a file. We replicate the heuristic + // with nanosecond granularity. + return fmt.Sprintf(`"%x%x"`, fi.ModTime().UnixNano(), fi.Size()), nil +} + +func findSupportedLock(ctx context.Context, fs FileSystem, ls LockSystem, name string, fi os.FileInfo) (string, error) { + return `` + + `` + + `` + + `` + + ``, nil +} diff --git a/drives/davServer/prop_test.go b/drives/davServer/prop_test.go new file mode 100644 index 00000000..e1ca3022 --- /dev/null +++ b/drives/davServer/prop_test.go @@ -0,0 +1,716 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "context" + "encoding/xml" + "fmt" + "net/http" + "os" + "reflect" + "regexp" + "sort" + "testing" +) + +func TestMemPS(t *testing.T) { + ctx := context.Background() + // calcProps calculates the getlastmodified and getetag DAV: property + // values in pstats for resource name in file-system fs. + calcProps := func(name string, fs FileSystem, ls LockSystem, pstats []Propstat) error { + fi, err := fs.Stat(ctx, name) + if err != nil { + return err + } + for _, pst := range pstats { + for i, p := range pst.Props { + switch p.XMLName { + case xml.Name{Space: "DAV:", Local: "getlastmodified"}: + p.InnerXML = []byte(fi.ModTime().UTC().Format(http.TimeFormat)) + pst.Props[i] = p + case xml.Name{Space: "DAV:", Local: "getetag"}: + if fi.IsDir() { + continue + } + etag, err := findETag(ctx, fs, ls, name, fi) + if err != nil { + return err + } + p.InnerXML = []byte(etag) + pst.Props[i] = p + } + } + } + return nil + } + + const ( + lockEntry = `` + + `` + + `` + + `` + + `` + statForbiddenError = `` + ) + + type propOp struct { + op string + name string + pnames []xml.Name + patches []Proppatch + wantPnames []xml.Name + wantPropstats []Propstat + } + + testCases := []struct { + desc string + noDeadProps bool + buildfs []string + propOp []propOp + }{{ + desc: "propname", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propname", + name: "/dir", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "DAV:", Local: "getlastmodified"}, + }, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + }, + }}, + }, { + desc: "allprop dir and file", + buildfs: []string{"mkdir /dir", "write /file foobarbaz"}, + propOp: []propOp{{ + op: "allprop", + name: "/dir", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("dir"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}, + }}, + }, { + op: "allprop", + name: "/file", + pnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "foo", Local: "bar"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("file"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlength"}, + InnerXML: []byte("9"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getlastmodified"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getcontenttype"}, + InnerXML: []byte("text/plain; charset=utf-8"), + }, { + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }, { + XMLName: xml.Name{Space: "DAV:", Local: "supportedlock"}, + InnerXML: []byte(lockEntry), + }}}, { + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}}, + }, + }}, + }, { + desc: "propfind DAV:resourcetype", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(``), + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{Space: "DAV:", Local: "resourcetype"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "resourcetype"}, + InnerXML: []byte(""), + }}, + }}, + }}, + }, { + desc: "propfind unsupported DAV properties", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "DAV:", Local: "getcontentlanguage"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getcontentlanguage"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "DAV:", Local: "creationdate"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "creationdate"}, + }}, + }}, + }}, + }, { + desc: "propfind getetag for files but not for directories", + buildfs: []string{"mkdir /dir", "touch /file"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }, { + op: "propfind", + name: "/file", + pnames: []xml.Name{{Space: "DAV:", Local: "getetag"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + InnerXML: nil, // Calculated during test. + }}, + }}, + }}, + }, { + desc: "proppatch property on no-dead-properties file system", + buildfs: []string{"mkdir /dir"}, + noDeadProps: true, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "getetag"}, + }}, + }}, + }}, + }, { + desc: "proppatch dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + }}, + }, { + desc: "proppatch dead property with failed dependency", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }, { + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + InnerXML: []byte("xxx"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusForbidden, + XMLError: statForbiddenError, + Props: []Property{{ + XMLName: xml.Name{Space: "DAV:", Local: "displayname"}, + }}, + }, { + Status: StatusFailedDependency, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "proppatch remove dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }, { + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }, { + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propfind", + name: "/dir", + pnames: []xml.Name{ + {Space: "foo", Local: "bar"}, + {Space: "spam", Local: "ham"}, + }, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }, { + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "spam", Local: "ham"}, + InnerXML: []byte("eggs"), + }}, + }}, + }}, + }, { + desc: "propname with dead property", + buildfs: []string{"touch /file"}, + propOp: []propOp{{ + op: "proppatch", + name: "/file", + patches: []Proppatch{{ + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + InnerXML: []byte("baz"), + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }, { + op: "propname", + name: "/file", + wantPnames: []xml.Name{ + {Space: "DAV:", Local: "resourcetype"}, + {Space: "DAV:", Local: "displayname"}, + {Space: "DAV:", Local: "getcontentlength"}, + {Space: "DAV:", Local: "getlastmodified"}, + {Space: "DAV:", Local: "getcontenttype"}, + {Space: "DAV:", Local: "getetag"}, + {Space: "DAV:", Local: "supportedlock"}, + {Space: "foo", Local: "bar"}, + }, + }}, + }, { + desc: "proppatch remove unknown dead property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "proppatch", + name: "/dir", + patches: []Proppatch{{ + Remove: true, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + wantPropstats: []Propstat{{ + Status: http.StatusOK, + Props: []Property{{ + XMLName: xml.Name{Space: "foo", Local: "bar"}, + }}, + }}, + }}, + }, { + desc: "bad: propfind unknown property", + buildfs: []string{"mkdir /dir"}, + propOp: []propOp{{ + op: "propfind", + name: "/dir", + pnames: []xml.Name{{Space: "foo:", Local: "bar"}}, + wantPropstats: []Propstat{{ + Status: http.StatusNotFound, + Props: []Property{{ + XMLName: xml.Name{Space: "foo:", Local: "bar"}, + }}, + }}, + }}, + }} + + for _, tc := range testCases { + fs, err := buildTestFS(tc.buildfs) + if err != nil { + t.Fatalf("%s: cannot create test filesystem: %v", tc.desc, err) + } + if tc.noDeadProps { + fs = noDeadPropsFS{fs} + } + ls := NewMemLS() + for _, op := range tc.propOp { + desc := fmt.Sprintf("%s: %s %s", tc.desc, op.op, op.name) + if err = calcProps(op.name, fs, ls, op.wantPropstats); err != nil { + t.Fatalf("%s: calcProps: %v", desc, err) + } + + // Call property system. + var propstats []Propstat + switch op.op { + case "propname": + pnames, err := propnames(ctx, fs, ls, op.name) + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + sort.Sort(byXMLName(pnames)) + sort.Sort(byXMLName(op.wantPnames)) + if !reflect.DeepEqual(pnames, op.wantPnames) { + t.Errorf("%s: pnames\ngot %q\nwant %q", desc, pnames, op.wantPnames) + } + continue + case "allprop": + propstats, err = allprop(ctx, fs, ls, op.name, op.pnames) + case "propfind": + propstats, err = props(ctx, fs, ls, op.name, op.pnames) + case "proppatch": + propstats, err = patch(ctx, fs, ls, op.name, op.patches) + default: + t.Fatalf("%s: %s not implemented", desc, op.op) + } + if err != nil { + t.Errorf("%s: got error %v, want nil", desc, err) + continue + } + // Compare return values from allprop, propfind or proppatch. + for _, pst := range propstats { + sort.Sort(byPropname(pst.Props)) + } + for _, pst := range op.wantPropstats { + sort.Sort(byPropname(pst.Props)) + } + sort.Sort(byStatus(propstats)) + sort.Sort(byStatus(op.wantPropstats)) + if !reflect.DeepEqual(propstats, op.wantPropstats) { + t.Errorf("%s: propstat\ngot %q\nwant %q", desc, propstats, op.wantPropstats) + } + } + } +} + +func cmpXMLName(a, b xml.Name) bool { + if a.Space != b.Space { + return a.Space < b.Space + } + return a.Local < b.Local +} + +type byXMLName []xml.Name + +func (b byXMLName) Len() int { return len(b) } +func (b byXMLName) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byXMLName) Less(i, j int) bool { return cmpXMLName(b[i], b[j]) } + +type byPropname []Property + +func (b byPropname) Len() int { return len(b) } +func (b byPropname) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byPropname) Less(i, j int) bool { return cmpXMLName(b[i].XMLName, b[j].XMLName) } + +type byStatus []Propstat + +func (b byStatus) Len() int { return len(b) } +func (b byStatus) Swap(i, j int) { b[i], b[j] = b[j], b[i] } +func (b byStatus) Less(i, j int) bool { return b[i].Status < b[j].Status } + +type noDeadPropsFS struct { + FileSystem +} + +func (fs noDeadPropsFS) OpenFile(ctx context.Context, name string, flag int, perm os.FileMode) (File, error) { + f, err := fs.FileSystem.OpenFile(ctx, name, flag, perm) + if err != nil { + return nil, err + } + return noDeadPropsFile{f}, nil +} + +// noDeadPropsFile wraps a File but strips any optional DeadPropsHolder methods +// provided by the underlying File implementation. +type noDeadPropsFile struct { + f File +} + +func (f noDeadPropsFile) Close() error { return f.f.Close() } +func (f noDeadPropsFile) Read(p []byte) (int, error) { return f.f.Read(p) } +func (f noDeadPropsFile) Readdir(count int) ([]os.FileInfo, error) { return f.f.Readdir(count) } +func (f noDeadPropsFile) Seek(off int64, whence int) (int64, error) { return f.f.Seek(off, whence) } +func (f noDeadPropsFile) Stat() (os.FileInfo, error) { return f.f.Stat() } +func (f noDeadPropsFile) Write(p []byte) (int, error) { return f.f.Write(p) } + +type overrideContentType struct { + os.FileInfo + contentType string + err error +} + +func (o *overrideContentType) ContentType(ctx context.Context) (string, error) { + return o.contentType, o.err +} + +func TestFindContentTypeOverride(t *testing.T) { + fs, err := buildTestFS([]string{"touch /file"}) + if err != nil { + t.Fatalf("cannot create test filesystem: %v", err) + } + ctx := context.Background() + fi, err := fs.Stat(ctx, "/file") + if err != nil { + t.Fatalf("cannot Stat /file: %v", err) + } + + // Check non overridden case + originalContentType, err := findContentType(ctx, fs, nil, "/file", fi) + if err != nil { + t.Fatalf("findContentType /file failed: %v", err) + } + if originalContentType != "text/plain; charset=utf-8" { + t.Fatalf("ContentType wrong want %q got %q", "text/plain; charset=utf-8", originalContentType) + } + + // Now try overriding the ContentType + o := &overrideContentType{fi, "OverriddenContentType", nil} + ContentType, err := findContentType(ctx, fs, nil, "/file", o) + if err != nil { + t.Fatalf("findContentType /file failed: %v", err) + } + if ContentType != o.contentType { + t.Fatalf("ContentType wrong want %q got %q", o.contentType, ContentType) + } + + // Now return ErrNotImplemented and check we get the original content type + o = &overrideContentType{fi, "OverriddenContentType", ErrNotImplemented} + ContentType, err = findContentType(ctx, fs, nil, "/file", o) + if err != nil { + t.Fatalf("findContentType /file failed: %v", err) + } + if ContentType != originalContentType { + t.Fatalf("ContentType wrong want %q got %q", originalContentType, ContentType) + } +} + +type overrideETag struct { + os.FileInfo + eTag string + err error +} + +func (o *overrideETag) ETag(ctx context.Context) (string, error) { + return o.eTag, o.err +} + +func TestFindETagOverride(t *testing.T) { + fs, err := buildTestFS([]string{"touch /file"}) + if err != nil { + t.Fatalf("cannot create test filesystem: %v", err) + } + ctx := context.Background() + fi, err := fs.Stat(ctx, "/file") + if err != nil { + t.Fatalf("cannot Stat /file: %v", err) + } + + // Check non overridden case + originalETag, err := findETag(ctx, fs, nil, "/file", fi) + if err != nil { + t.Fatalf("findETag /file failed: %v", err) + } + matchETag := regexp.MustCompile(`^"-?[0-9a-f]{6,}"$`) + if !matchETag.MatchString(originalETag) { + t.Fatalf("ETag wrong, wanted something matching %v got %q", matchETag, originalETag) + } + + // Now try overriding the ETag + o := &overrideETag{fi, `"OverriddenETag"`, nil} + ETag, err := findETag(ctx, fs, nil, "/file", o) + if err != nil { + t.Fatalf("findETag /file failed: %v", err) + } + if ETag != o.eTag { + t.Fatalf("ETag wrong want %q got %q", o.eTag, ETag) + } + + // Now return ErrNotImplemented and check we get the original Etag + o = &overrideETag{fi, `"OverriddenETag"`, ErrNotImplemented} + ETag, err = findETag(ctx, fs, nil, "/file", o) + if err != nil { + t.Fatalf("findETag /file failed: %v", err) + } + if ETag != originalETag { + t.Fatalf("ETag wrong want %q got %q", originalETag, ETag) + } +} diff --git a/drives/davServer/webdav.go b/drives/davServer/webdav.go new file mode 100644 index 00000000..6d516831 --- /dev/null +++ b/drives/davServer/webdav.go @@ -0,0 +1,754 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package davServer provides a WebDAV server implementation. +package davServer + +import ( + "errors" + "fmt" + "github.com/sirupsen/logrus" + "io" + "net/http" + "net/url" + "os" + "path" + "path/filepath" + "strconv" + "strings" + "time" +) + +type Handler struct { + // Prefix is the URL path prefix to strip from WebDAV resource paths. + Prefix string + // FileSystem is the virtual file system. + FileSystem FileSystem + // LockSystem is the lock management system. + LockSystem LockSystem + // Logger is an optional error logger. If non-nil, it will be called + // for all HTTP requests. + Logger func(*http.Request, error) +} + +func (h *Handler) stripPrefix(p string) (string, int, error) { + if h.Prefix == "" { + return p, http.StatusOK, nil + } + if r := strings.TrimPrefix(p, h.Prefix); len(r) < len(p) { + return r, http.StatusOK, nil + } + return p, http.StatusNotFound, errPrefixMismatch +} + +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + status, err := http.StatusBadRequest, errUnsupportedMethod + if h.FileSystem == nil { + status, err = http.StatusInternalServerError, errNoFileSystem + } else if h.LockSystem == nil { + status, err = http.StatusInternalServerError, errNoLockSystem + } else { + switch r.Method { + case "OPTIONS": + status, err = h.handleOptions(w, r) + case "GET", "HEAD", "POST": + status, err = h.handleGetHeadPost(w, r) + case "DELETE": + status, err = h.handleDelete(w, r) + case "PUT": + status, err = h.handlePut(w, r) + case "MKCOL": + status, err = h.handleMkcol(w, r) + case "COPY", "MOVE": + status, err = h.handleCopyMove(w, r) + case "LOCK": + status, err = h.handleLock(w, r) + case "UNLOCK": + status, err = h.handleUnlock(w, r) + case "PROPFIND": + status, err = h.handlePropfind(w, r) + case "PROPPATCH": + status, err = h.handleProppatch(w, r) + } + } + + if status != 0 { + w.WriteHeader(status) + if status != http.StatusNoContent { + w.Write([]byte(StatusText(status))) + } + } + if h.Logger != nil { + h.Logger(r, err) + } +} + +func (h *Handler) lock(now time.Time, root string) (token string, status int, err error) { + token, err = h.LockSystem.Create(now, LockDetails{ + Root: root, + Duration: infiniteTimeout, + ZeroDepth: true, + }) + if err != nil { + if err == ErrLocked { + return "", StatusLocked, err + } + return "", http.StatusInternalServerError, err + } + return token, 0, nil +} + +func (h *Handler) confirmLocks(r *http.Request, src, dst string) (release func(), status int, err error) { + hdr := r.Header.Get("If") + if hdr == "" { + // An empty If header means that the client hasn't previously created locks. + // Even if this client doesn't care about locks, we still need to check that + // the resources aren't locked by another client, so we create temporary + // locks that would conflict with another client's locks. These temporary + // locks are unlocked at the end of the HTTP request. + now, srcToken, dstToken := time.Now(), "", "" + if src != "" { + srcToken, status, err = h.lock(now, src) + if err != nil { + return nil, status, err + } + } + if dst != "" { + dstToken, status, err = h.lock(now, dst) + if err != nil { + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + return nil, status, err + } + } + + return func() { + if dstToken != "" { + h.LockSystem.Unlock(now, dstToken) + } + if srcToken != "" { + h.LockSystem.Unlock(now, srcToken) + } + }, 0, nil + } + + ih, ok := parseIfHeader(hdr) + if !ok { + return nil, http.StatusBadRequest, errInvalidIfHeader + } + // ih is a disjunction (OR) of ifLists, so any ifList will do. + for _, l := range ih.lists { + lsrc := l.resourceTag + if lsrc == "" { + lsrc = src + } else { + u, err := url.Parse(lsrc) + if err != nil { + continue + } + if u.Host != r.Host { + continue + } + lsrc, status, err = h.stripPrefix(u.Path) + if err != nil { + return nil, status, err + } + } + release, err = h.LockSystem.Confirm(time.Now(), lsrc, dst, l.conditions...) + if err == ErrConfirmationFailed { + continue + } + if err != nil { + return nil, http.StatusInternalServerError, err + } + return release, 0, nil + } + // Section 10.4.1 says that "If this header is evaluated and all state lists + // fail, then the request must fail with a 412 (Precondition Failed) status." + // We follow the spec even though the cond_put_corrupt_token test case from + // the litmus test warns on seeing a 412 instead of a 423 (Locked). + return nil, http.StatusPreconditionFailed, ErrLocked +} + +func (h *Handler) handleOptions(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := r.Context() + allow := "OPTIONS, LOCK, PUT, MKCOL" + if fi, err := h.FileSystem.Stat(ctx, reqPath); err == nil { + if fi.IsDir() { + allow = "OPTIONS, LOCK, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND" + } else { + allow = "OPTIONS, LOCK, GET, HEAD, POST, DELETE, PROPPATCH, COPY, MOVE, UNLOCK, PROPFIND, PUT" + } + } + w.Header().Set("Allow", allow) + // http://www.webdav.org/specs/rfc4918.html#dav.compliance.classes + w.Header().Set("DAV", "1, 2") + // http://msdn.microsoft.com/en-au/library/cc250217.aspx + w.Header().Set("MS-Author-Via", "DAV") + return 0, nil +} + +func (h *Handler) handleGetHeadPost(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + // TODO: check locks for read-only access?? + ctx := r.Context() + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDONLY, 0) + if err != nil { + return http.StatusNotFound, err + } + defer f.Close() + fi, err := f.Stat() + if err != nil { + return http.StatusNotFound, err + } + if fi.IsDir() { + return http.StatusMethodNotAllowed, nil + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + // Let ServeContent determine the Content-Type header. + http.ServeContent(w, r, reqPath, fi.ModTime(), f) + return 0, nil +} + +func (h *Handler) handleDelete(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := r.Context() + + // TODO: return MultiStatus where appropriate. + + // "godoc os RemoveAll" says that "If the path does not exist, RemoveAll + // returns nil (no error)." WebDAV semantics are that it should return a + // "404 Not Found". We therefore have to Stat before we RemoveAll. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + if err := h.FileSystem.RemoveAll(ctx, reqPath); err != nil { + return http.StatusMethodNotAllowed, err + } + return http.StatusNoContent, nil +} + +func (h *Handler) handlePut(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + // TODO(rost): Support the If-Match, If-None-Match headers? See bradfitz' + // comments in http.checkEtag. + ctx := r.Context() + + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + return http.StatusNotFound, err + } + _, copyErr := io.Copy(f, r.Body) + fi, statErr := f.Stat() + + modTimes := r.Header["Zrok-Modtime"] + if len(modTimes) > 0 { + if modTimeV, err := strconv.ParseInt(modTimes[0], 10, 64); err == nil { + if v, ok := f.(*webdavFile); ok { + if err := v.updateModtime(reqPath, time.Unix(modTimeV, 0)); err != nil { + logrus.Warn(err) + } + } else { + logrus.Error("!ok") + } + } else { + logrus.Error(err) + } + } + + closeErr := f.Close() + // TODO(rost): Returning 405 Method Not Allowed might not be appropriate. + if copyErr != nil { + return http.StatusMethodNotAllowed, copyErr + } + if statErr != nil { + return http.StatusMethodNotAllowed, statErr + } + if closeErr != nil { + return http.StatusMethodNotAllowed, closeErr + } + etag, err := findETag(ctx, h.FileSystem, h.LockSystem, reqPath, fi) + if err != nil { + return http.StatusInternalServerError, err + } + w.Header().Set("ETag", etag) + return http.StatusCreated, nil +} + +func (h *Handler) handleMkcol(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := r.Context() + + if r.ContentLength > 0 { + return http.StatusUnsupportedMediaType, nil + } + if err := h.FileSystem.Mkdir(ctx, reqPath, 0777); err != nil { + if os.IsNotExist(err) { + return http.StatusConflict, err + } + return http.StatusMethodNotAllowed, err + } + return http.StatusCreated, nil +} + +func (h *Handler) handleCopyMove(w http.ResponseWriter, r *http.Request) (status int, err error) { + hdr := r.Header.Get("Destination") + if hdr == "" { + return http.StatusBadRequest, errInvalidDestination + } + u, err := url.Parse(hdr) + if err != nil { + return http.StatusBadRequest, errInvalidDestination + } + if u.Host != "" && u.Host != r.Host { + return http.StatusBadGateway, errInvalidDestination + } + + src, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + + dst, status, err := h.stripPrefix(u.Path) + if err != nil { + return status, err + } + + if dst == "" { + return http.StatusBadGateway, errInvalidDestination + } + if dst == src { + return http.StatusForbidden, errDestinationEqualsSource + } + + ctx := r.Context() + + if r.Method == "COPY" { + // Section 7.5.1 says that a COPY only needs to lock the destination, + // not both destination and source. Strictly speaking, this is racy, + // even though a COPY doesn't modify the source, if a concurrent + // operation modifies the source. However, the litmus test explicitly + // checks that COPYing a locked-by-another source is OK. + release, status, err := h.confirmLocks(r, "", dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.8.3 says that "The COPY method on a collection without a Depth + // header must act as if a Depth header with value "infinity" was included". + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.8.3 says that "A client may submit a Depth header on a + // COPY on a collection with a value of "0" or "infinity"." + return http.StatusBadRequest, errInvalidDepth + } + } + return copyFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") != "F", depth, 0) + } + + release, status, err := h.confirmLocks(r, src, dst) + if err != nil { + return status, err + } + defer release() + + // Section 9.9.2 says that "The MOVE method on a collection must act as if + // a "Depth: infinity" header was used on it. A client must not submit a + // Depth header on a MOVE on a collection with any value but "infinity"." + if hdr := r.Header.Get("Depth"); hdr != "" { + if parseDepth(hdr) != infiniteDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + return moveFiles(ctx, h.FileSystem, src, dst, r.Header.Get("Overwrite") == "T") +} + +func (h *Handler) handleLock(w http.ResponseWriter, r *http.Request) (retStatus int, retErr error) { + duration, err := parseTimeout(r.Header.Get("Timeout")) + if err != nil { + return http.StatusBadRequest, err + } + li, status, err := readLockInfo(r.Body) + if err != nil { + return status, err + } + + ctx := r.Context() + token, ld, now, created := "", LockDetails{}, time.Now(), false + if li == (lockInfo{}) { + // An empty lockInfo means to refresh the lock. + ih, ok := parseIfHeader(r.Header.Get("If")) + if !ok { + return http.StatusBadRequest, errInvalidIfHeader + } + if len(ih.lists) == 1 && len(ih.lists[0].conditions) == 1 { + token = ih.lists[0].conditions[0].Token + } + if token == "" { + return http.StatusBadRequest, errInvalidLockToken + } + ld, err = h.LockSystem.Refresh(now, token, duration) + if err != nil { + if err == ErrNoSuchLock { + return http.StatusPreconditionFailed, err + } + return http.StatusInternalServerError, err + } + + } else { + // Section 9.10.3 says that "If no Depth header is submitted on a LOCK request, + // then the request MUST act as if a "Depth:infinity" had been submitted." + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth != 0 && depth != infiniteDepth { + // Section 9.10.3 says that "Values other than 0 or infinity must not be + // used with the Depth header on a LOCK method". + return http.StatusBadRequest, errInvalidDepth + } + } + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ld = LockDetails{ + Root: reqPath, + Duration: duration, + OwnerXML: li.Owner.InnerXML, + ZeroDepth: depth == 0, + } + token, err = h.LockSystem.Create(now, ld) + if err != nil { + if err == ErrLocked { + return StatusLocked, err + } + return http.StatusInternalServerError, err + } + defer func() { + if retErr != nil { + h.LockSystem.Unlock(now, token) + } + }() + + // Create the resource if it didn't previously exist. + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + f, err := h.FileSystem.OpenFile(ctx, reqPath, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0666) + if err != nil { + // TODO: detect missing intermediate dirs and return http.StatusConflict? + return http.StatusInternalServerError, err + } + f.Close() + created = true + } + + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We add angle brackets. + w.Header().Set("Lock-Token", "<"+token+">") + } + + w.Header().Set("Content-Type", "application/xml; charset=utf-8") + if created { + // This is "w.WriteHeader(http.StatusCreated)" and not "return + // http.StatusCreated, nil" because we write our own (XML) response to w + // and Handler.ServeHTTP would otherwise write "Created". + w.WriteHeader(http.StatusCreated) + } + writeLockInfo(w, token, ld) + return 0, nil +} + +func (h *Handler) handleUnlock(w http.ResponseWriter, r *http.Request) (status int, err error) { + // http://www.webdav.org/specs/rfc4918.html#HEADER_Lock-Token says that the + // Lock-Token value is a Coded-URL. We strip its angle brackets. + t := r.Header.Get("Lock-Token") + if len(t) < 2 || t[0] != '<' || t[len(t)-1] != '>' { + return http.StatusBadRequest, errInvalidLockToken + } + t = t[1 : len(t)-1] + + switch err = h.LockSystem.Unlock(time.Now(), t); err { + case nil: + return http.StatusNoContent, err + case ErrForbidden: + return http.StatusForbidden, err + case ErrLocked: + return StatusLocked, err + case ErrNoSuchLock: + return http.StatusConflict, err + default: + return http.StatusInternalServerError, err + } +} + +func (h *Handler) handlePropfind(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + ctx := r.Context() + fi, err := h.FileSystem.Stat(ctx, reqPath) + if err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + depth := infiniteDepth + if hdr := r.Header.Get("Depth"); hdr != "" { + depth = parseDepth(hdr) + if depth == invalidDepth { + return http.StatusBadRequest, errInvalidDepth + } + } + pf, status, err := readPropfind(r.Body) + if err != nil { + return status, err + } + + mw := multistatusWriter{w: w} + + walkFn := func(reqPath string, info os.FileInfo, err error) error { + if err != nil { + return handlePropfindError(err, info) + } + + var pstats []Propstat + if pf.Propname != nil { + pnames, err := propnames(ctx, h.FileSystem, h.LockSystem, reqPath) + if err != nil { + return handlePropfindError(err, info) + } + pstat := Propstat{Status: http.StatusOK} + for _, xmlname := range pnames { + pstat.Props = append(pstat.Props, Property{XMLName: xmlname}) + } + pstats = append(pstats, pstat) + } else if pf.Allprop != nil { + pstats, err = allprop(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } else { + pstats, err = props(ctx, h.FileSystem, h.LockSystem, reqPath, pf.Prop) + } + if err != nil { + return handlePropfindError(err, info) + } + href := path.Join(h.Prefix, reqPath) + if href != "/" && info.IsDir() { + href += "/" + } + return mw.write(makePropstatResponse(href, pstats)) + } + + walkErr := walkFS(ctx, h.FileSystem, depth, reqPath, fi, walkFn) + closeErr := mw.close() + if walkErr != nil { + return http.StatusInternalServerError, walkErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func (h *Handler) handleProppatch(w http.ResponseWriter, r *http.Request) (status int, err error) { + reqPath, status, err := h.stripPrefix(r.URL.Path) + if err != nil { + return status, err + } + release, status, err := h.confirmLocks(r, reqPath, "") + if err != nil { + return status, err + } + defer release() + + ctx := r.Context() + + if _, err := h.FileSystem.Stat(ctx, reqPath); err != nil { + if os.IsNotExist(err) { + return http.StatusNotFound, err + } + return http.StatusMethodNotAllowed, err + } + patches, status, err := readProppatch(r.Body) + if err != nil { + return status, err + } + pstats, err := patch(ctx, h.FileSystem, h.LockSystem, reqPath, patches) + if err != nil { + return http.StatusInternalServerError, err + } + mw := multistatusWriter{w: w} + writeErr := mw.write(makePropstatResponse(r.URL.Path, pstats)) + closeErr := mw.close() + if writeErr != nil { + return http.StatusInternalServerError, writeErr + } + if closeErr != nil { + return http.StatusInternalServerError, closeErr + } + return 0, nil +} + +func makePropstatResponse(href string, pstats []Propstat) *response { + resp := response{ + Href: []string{(&url.URL{Path: href}).EscapedPath()}, + Propstat: make([]propstat, 0, len(pstats)), + } + for _, p := range pstats { + var xmlErr *xmlError + if p.XMLError != "" { + xmlErr = &xmlError{InnerXML: []byte(p.XMLError)} + } + resp.Propstat = append(resp.Propstat, propstat{ + Status: fmt.Sprintf("HTTP/1.1 %d %s", p.Status, StatusText(p.Status)), + Prop: p.Props, + ResponseDescription: p.ResponseDescription, + Error: xmlErr, + }) + } + return &resp +} + +func handlePropfindError(err error, info os.FileInfo) error { + var skipResp error = nil + if info != nil && info.IsDir() { + skipResp = filepath.SkipDir + } + + if errors.Is(err, os.ErrPermission) { + // If the server cannot recurse into a directory because it is not allowed, + // then there is nothing more to say about it. Just skip sending anything. + return skipResp + } + + if _, ok := err.(*os.PathError); ok { + // If the file is just bad, it couldn't be a proper WebDAV resource. Skip it. + return skipResp + } + + // We need to be careful with other errors: there is no way to abort the xml stream + // part way through while returning a valid PROPFIND response. Returning only half + // the data would be misleading, but so would be returning results tainted by errors. + // The current behaviour by returning an error here leads to the stream being aborted, + // and the parent http server complaining about writing a spurious header. We should + // consider further enhancing this error handling to more gracefully fail, or perhaps + // buffer the entire response until we've walked the tree. + return err +} + +const ( + infiniteDepth = -1 + invalidDepth = -2 +) + +// parseDepth maps the strings "0", "1" and "infinity" to 0, 1 and +// infiniteDepth. Parsing any other string returns invalidDepth. +// +// Different WebDAV methods have further constraints on valid depths: +// - PROPFIND has no further restrictions, as per section 9.1. +// - COPY accepts only "0" or "infinity", as per section 9.8.3. +// - MOVE accepts only "infinity", as per section 9.9.2. +// - LOCK accepts only "0" or "infinity", as per section 9.10.3. +// +// These constraints are enforced by the handleXxx methods. +func parseDepth(s string) int { + switch s { + case "0": + return 0 + case "1": + return 1 + case "infinity": + return infiniteDepth + } + return invalidDepth +} + +// http://www.webdav.org/specs/rfc4918.html#status.code.extensions.to.http11 +const ( + StatusMulti = 207 + StatusUnprocessableEntity = 422 + StatusLocked = 423 + StatusFailedDependency = 424 + StatusInsufficientStorage = 507 +) + +func StatusText(code int) string { + switch code { + case StatusMulti: + return "Multi-Status" + case StatusUnprocessableEntity: + return "Unprocessable Entity" + case StatusLocked: + return "Locked" + case StatusFailedDependency: + return "Failed Dependency" + case StatusInsufficientStorage: + return "Insufficient Storage" + } + return http.StatusText(code) +} + +var ( + errDestinationEqualsSource = errors.New("webdav: destination equals source") + errDirectoryNotEmpty = errors.New("webdav: directory not empty") + errInvalidDepth = errors.New("webdav: invalid depth") + errInvalidDestination = errors.New("webdav: invalid destination") + errInvalidIfHeader = errors.New("webdav: invalid If header") + errInvalidLockInfo = errors.New("webdav: invalid lock info") + errInvalidLockToken = errors.New("webdav: invalid lock token") + errInvalidPropfind = errors.New("webdav: invalid propfind") + errInvalidProppatch = errors.New("webdav: invalid proppatch") + errInvalidResponse = errors.New("webdav: invalid response") + errInvalidTimeout = errors.New("webdav: invalid timeout") + errNoFileSystem = errors.New("webdav: no file system") + errNoLockSystem = errors.New("webdav: no lock system") + errNotADirectory = errors.New("webdav: not a directory") + errPrefixMismatch = errors.New("webdav: prefix mismatch") + errRecursionTooDeep = errors.New("webdav: recursion too deep") + errUnsupportedLockInfo = errors.New("webdav: unsupported lock info") + errUnsupportedMethod = errors.New("webdav: unsupported method") +) diff --git a/drives/davServer/webdav_test.go b/drives/davServer/webdav_test.go new file mode 100644 index 00000000..996eb797 --- /dev/null +++ b/drives/davServer/webdav_test.go @@ -0,0 +1,349 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "context" + "errors" + "fmt" + "io" + "io/ioutil" + "net/http" + "net/http/httptest" + "net/url" + "os" + "reflect" + "regexp" + "sort" + "strings" + "testing" +) + +// TODO: add tests to check XML responses with the expected prefix path +func TestPrefix(t *testing.T) { + const dst, blah = "Destination", "blah blah blah" + + // createLockBody comes from the example in Section 9.10.7. + const createLockBody = ` + + + + + http://example.org/~ejw/contact.html + + + ` + + do := func(method, urlStr string, body string, wantStatusCode int, headers ...string) (http.Header, error) { + var bodyReader io.Reader + if body != "" { + bodyReader = strings.NewReader(body) + } + req, err := http.NewRequest(method, urlStr, bodyReader) + if err != nil { + return nil, err + } + for len(headers) >= 2 { + req.Header.Add(headers[0], headers[1]) + headers = headers[2:] + } + res, err := http.DefaultTransport.RoundTrip(req) + if err != nil { + return nil, err + } + defer res.Body.Close() + if res.StatusCode != wantStatusCode { + return nil, fmt.Errorf("got status code %d, want %d", res.StatusCode, wantStatusCode) + } + return res.Header, nil + } + + prefixes := []string{ + "/", + "/a/", + "/a/b/", + "/a/b/c/", + } + ctx := context.Background() + for _, prefix := range prefixes { + fs := NewMemFS() + h := &Handler{ + FileSystem: fs, + LockSystem: NewMemLS(), + } + mux := http.NewServeMux() + if prefix != "/" { + h.Prefix = prefix + } + mux.Handle(prefix, h) + srv := httptest.NewServer(mux) + defer srv.Close() + + // The script is: + // MKCOL /a + // MKCOL /a/b + // PUT /a/b/c + // COPY /a/b/c /a/b/d + // MKCOL /a/b/e + // MOVE /a/b/d /a/b/e/f + // LOCK /a/b/e/g + // PUT /a/b/e/g + // which should yield the (possibly stripped) filenames /a/b/c, + // /a/b/e/f and /a/b/e/g, plus their parent directories. + + wantA := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusMovedPermanently, + "/a/b/": http.StatusNotFound, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a", "", wantA); err != nil { + t.Errorf("prefix=%-9q MKCOL /a: %v", prefix, err) + continue + } + + wantB := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusMovedPermanently, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b", "", wantB); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b: %v", prefix, err) + continue + } + + wantC := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/c", blah, wantC); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/c: %v", prefix, err) + continue + } + + wantD := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusMovedPermanently, + }[prefix] + if _, err := do("COPY", srv.URL+"/a/b/c", "", wantD, dst, srv.URL+"/a/b/d"); err != nil { + t.Errorf("prefix=%-9q COPY /a/b/c /a/b/d: %v", prefix, err) + continue + } + + wantE := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MKCOL", srv.URL+"/a/b/e", "", wantE); err != nil { + t.Errorf("prefix=%-9q MKCOL /a/b/e: %v", prefix, err) + continue + } + + wantF := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("MOVE", srv.URL+"/a/b/d", "", wantF, dst, srv.URL+"/a/b/e/f"); err != nil { + t.Errorf("prefix=%-9q MOVE /a/b/d /a/b/e/f: %v", prefix, err) + continue + } + + var lockToken string + wantG := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if h, err := do("LOCK", srv.URL+"/a/b/e/g", createLockBody, wantG); err != nil { + t.Errorf("prefix=%-9q LOCK /a/b/e/g: %v", prefix, err) + continue + } else { + lockToken = h.Get("Lock-Token") + } + + ifHeader := fmt.Sprintf("<%s/a/b/e/g> (%s)", srv.URL, lockToken) + wantH := map[string]int{ + "/": http.StatusCreated, + "/a/": http.StatusCreated, + "/a/b/": http.StatusCreated, + "/a/b/c/": http.StatusNotFound, + }[prefix] + if _, err := do("PUT", srv.URL+"/a/b/e/g", blah, wantH, "If", ifHeader); err != nil { + t.Errorf("prefix=%-9q PUT /a/b/e/g: %v", prefix, err) + continue + } + + got, err := find(ctx, nil, fs, "/") + if err != nil { + t.Errorf("prefix=%-9q find: %v", prefix, err) + continue + } + sort.Strings(got) + want := map[string][]string{ + "/": {"/", "/a", "/a/b", "/a/b/c", "/a/b/e", "/a/b/e/f", "/a/b/e/g"}, + "/a/": {"/", "/b", "/b/c", "/b/e", "/b/e/f", "/b/e/g"}, + "/a/b/": {"/", "/c", "/e", "/e/f", "/e/g"}, + "/a/b/c/": {"/"}, + }[prefix] + if !reflect.DeepEqual(got, want) { + t.Errorf("prefix=%-9q find:\ngot %v\nwant %v", prefix, got, want) + continue + } + } +} + +func TestEscapeXML(t *testing.T) { + // These test cases aren't exhaustive, and there is more than one way to + // escape e.g. a quot (as """ or """) or an apos. We presume that + // the encoding/xml package tests xml.EscapeText more thoroughly. This test + // here is just a sanity check for this package's escapeXML function, and + // its attempt to provide a fast path (and avoid a bytes.Buffer allocation) + // when escaping filenames is obviously a no-op. + testCases := map[string]string{ + "": "", + " ": " ", + "&": "&", + "*": "*", + "+": "+", + ",": ",", + "-": "-", + ".": ".", + "/": "/", + "0": "0", + "9": "9", + ":": ":", + "<": "<", + ">": ">", + "A": "A", + "_": "_", + "a": "a", + "~": "~", + "\u0201": "\u0201", + "&": "&amp;", + "foo&baz": "foo&<b/ar>baz", + } + + for in, want := range testCases { + if got := escapeXML(in); got != want { + t.Errorf("in=%q: got %q, want %q", in, got, want) + } + } +} + +func TestFilenameEscape(t *testing.T) { + hrefRe := regexp.MustCompile(`([^<]*)`) + displayNameRe := regexp.MustCompile(`([^<]*)`) + do := func(method, urlStr string) (string, string, error) { + req, err := http.NewRequest(method, urlStr, nil) + if err != nil { + return "", "", err + } + res, err := http.DefaultClient.Do(req) + if err != nil { + return "", "", err + } + defer res.Body.Close() + + b, err := ioutil.ReadAll(res.Body) + if err != nil { + return "", "", err + } + hrefMatch := hrefRe.FindStringSubmatch(string(b)) + if len(hrefMatch) != 2 { + return "", "", errors.New("D:href not found") + } + displayNameMatch := displayNameRe.FindStringSubmatch(string(b)) + if len(displayNameMatch) != 2 { + return "", "", errors.New("D:displayname not found") + } + + return hrefMatch[1], displayNameMatch[1], nil + } + + testCases := []struct { + name, wantHref, wantDisplayName string + }{{ + name: `/foo%bar`, + wantHref: `/foo%25bar`, + wantDisplayName: `foo%bar`, + }, { + name: `/こんにちわ世界`, + wantHref: `/%E3%81%93%E3%82%93%E3%81%AB%E3%81%A1%E3%82%8F%E4%B8%96%E7%95%8C`, + wantDisplayName: `こんにちわ世界`, + }, { + name: `/Program Files/`, + wantHref: `/Program%20Files/`, + wantDisplayName: `Program Files`, + }, { + name: `/go+lang`, + wantHref: `/go+lang`, + wantDisplayName: `go+lang`, + }, { + name: `/go&lang`, + wantHref: `/go&lang`, + wantDisplayName: `go&lang`, + }, { + name: `/goexclusive"` + Shared *struct{} `xml:"lockscope>shared"` + Write *struct{} `xml:"locktype>write"` + Owner owner `xml:"owner"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_owner +type owner struct { + InnerXML string `xml:",innerxml"` +} + +func readLockInfo(r io.Reader) (li lockInfo, status int, err error) { + c := &countingReader{r: r} + if err = ixml.NewDecoder(c).Decode(&li); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to refresh the lock. + // http://www.webdav.org/specs/rfc4918.html#refreshing-locks + return lockInfo{}, 0, nil + } + err = errInvalidLockInfo + } + return lockInfo{}, http.StatusBadRequest, err + } + // We only support exclusive (non-shared) write locks. In practice, these are + // the only types of locks that seem to matter. + if li.Exclusive == nil || li.Shared != nil || li.Write == nil { + return lockInfo{}, http.StatusNotImplemented, errUnsupportedLockInfo + } + return li, 0, nil +} + +type countingReader struct { + n int + r io.Reader +} + +func (c *countingReader) Read(p []byte) (int, error) { + n, err := c.r.Read(p) + c.n += n + return n, err +} + +func writeLockInfo(w io.Writer, token string, ld LockDetails) (int, error) { + depth := "infinity" + if ld.ZeroDepth { + depth = "0" + } + timeout := ld.Duration / time.Second + return fmt.Fprintf(w, "\n"+ + "\n"+ + " \n"+ + " \n"+ + " %s\n"+ + " %s\n"+ + " Second-%d\n"+ + " %s\n"+ + " %s\n"+ + "", + depth, ld.OwnerXML, timeout, escape(token), escape(ld.Root), + ) +} + +func escape(s string) string { + for i := 0; i < len(s); i++ { + switch s[i] { + case '"', '&', '\'', '<', '>': + b := bytes.NewBuffer(nil) + ixml.EscapeText(b, []byte(s)) + return b.String() + } + } + return s +} + +// next returns the next token, if any, in the XML stream of d. +// RFC 4918 requires to ignore comments, processing instructions +// and directives. +// http://www.webdav.org/specs/rfc4918.html#property_values +// http://www.webdav.org/specs/rfc4918.html#xml-extensibility +func next(d *ixml.Decoder) (ixml.Token, error) { + for { + t, err := d.Token() + if err != nil { + return t, err + } + switch t.(type) { + case ixml.Comment, ixml.Directive, ixml.ProcInst: + continue + default: + return t, nil + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for propfind) +type propfindProps []xml.Name + +// UnmarshalXML appends the property names enclosed within start to pn. +// +// It returns an error if start does not contain any properties or if +// properties contain values. Character data between properties is ignored. +func (pn *propfindProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + for { + t, err := next(d) + if err != nil { + return err + } + switch t.(type) { + case ixml.EndElement: + if len(*pn) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + name := t.(ixml.StartElement).Name + t, err = next(d) + if err != nil { + return err + } + if _, ok := t.(ixml.EndElement); !ok { + return fmt.Errorf("unexpected token %T", t) + } + *pn = append(*pn, xml.Name(name)) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propfind +type propfind struct { + XMLName ixml.Name `xml:"DAV: propfind"` + Allprop *struct{} `xml:"DAV: allprop"` + Propname *struct{} `xml:"DAV: propname"` + Prop propfindProps `xml:"DAV: prop"` + Include propfindProps `xml:"DAV: include"` +} + +func readPropfind(r io.Reader) (pf propfind, status int, err error) { + c := countingReader{r: r} + if err = ixml.NewDecoder(&c).Decode(&pf); err != nil { + if err == io.EOF { + if c.n == 0 { + // An empty body means to propfind allprop. + // http://www.webdav.org/specs/rfc4918.html#METHOD_PROPFIND + return propfind{Allprop: new(struct{})}, 0, nil + } + err = errInvalidPropfind + } + return propfind{}, http.StatusBadRequest, err + } + + if pf.Allprop == nil && pf.Include != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Allprop != nil && (pf.Prop != nil || pf.Propname != nil) { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Prop != nil && pf.Propname != nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + if pf.Propname == nil && pf.Allprop == nil && pf.Prop == nil { + return propfind{}, http.StatusBadRequest, errInvalidPropfind + } + return pf, 0, nil +} + +// Property represents a single DAV resource property as defined in RFC 4918. +// See http://www.webdav.org/specs/rfc4918.html#data.model.for.resource.properties +type Property struct { + // XMLName is the fully qualified name that identifies this property. + XMLName xml.Name + + // Lang is an optional xml:lang attribute. + Lang string `xml:"xml:lang,attr,omitempty"` + + // InnerXML contains the XML representation of the property value. + // See http://www.webdav.org/specs/rfc4918.html#property_values + // + // Property values of complex type or mixed-content must have fully + // expanded XML namespaces or be self-contained with according + // XML namespace declarations. They must not rely on any XML + // namespace declarations within the scope of the XML document, + // even including the DAV: namespace. + InnerXML []byte `xml:",innerxml"` +} + +// ixmlProperty is the same as the Property type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlProperty struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_error +// See multistatusWriter for the "D:" namespace prefix. +type xmlError struct { + XMLName ixml.Name `xml:"D:error"` + InnerXML []byte `xml:",innerxml"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propstat +// See multistatusWriter for the "D:" namespace prefix. +type propstat struct { + Prop []Property `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// ixmlPropstat is the same as the propstat type except it holds an ixml.Name +// instead of an xml.Name. +type ixmlPropstat struct { + Prop []ixmlProperty `xml:"D:prop>_ignored_"` + Status string `xml:"D:status"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MarshalXML prepends the "D:" namespace prefix on properties in the DAV: namespace +// before encoding. See multistatusWriter. +func (ps propstat) MarshalXML(e *ixml.Encoder, start ixml.StartElement) error { + // Convert from a propstat to an ixmlPropstat. + ixmlPs := ixmlPropstat{ + Prop: make([]ixmlProperty, len(ps.Prop)), + Status: ps.Status, + Error: ps.Error, + ResponseDescription: ps.ResponseDescription, + } + for k, prop := range ps.Prop { + ixmlPs.Prop[k] = ixmlProperty{ + XMLName: ixml.Name(prop.XMLName), + Lang: prop.Lang, + InnerXML: prop.InnerXML, + } + } + + for k, prop := range ixmlPs.Prop { + if prop.XMLName.Space == "DAV:" { + prop.XMLName = ixml.Name{Space: "", Local: "D:" + prop.XMLName.Local} + ixmlPs.Prop[k] = prop + } + } + // Distinct type to avoid infinite recursion of MarshalXML. + type newpropstat ixmlPropstat + return e.EncodeElement(newpropstat(ixmlPs), start) +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_response +// See multistatusWriter for the "D:" namespace prefix. +type response struct { + XMLName ixml.Name `xml:"D:response"` + Href []string `xml:"D:href"` + Propstat []propstat `xml:"D:propstat"` + Status string `xml:"D:status,omitempty"` + Error *xmlError `xml:"D:error"` + ResponseDescription string `xml:"D:responsedescription,omitempty"` +} + +// MultistatusWriter marshals one or more Responses into a XML +// multistatus response. +// See http://www.webdav.org/specs/rfc4918.html#ELEMENT_multistatus +// TODO(rsto, mpl): As a workaround, the "D:" namespace prefix, defined as +// "DAV:" on this element, is prepended on the nested response, as well as on all +// its nested elements. All property names in the DAV: namespace are prefixed as +// well. This is because some versions of Mini-Redirector (on windows 7) ignore +// elements with a default namespace (no prefixed namespace). A less intrusive fix +// should be possible after golang.org/cl/11074. See https://golang.org/issue/11177 +type multistatusWriter struct { + // ResponseDescription contains the optional responsedescription + // of the multistatus XML element. Only the latest content before + // close will be emitted. Empty response descriptions are not + // written. + responseDescription string + + w http.ResponseWriter + enc *ixml.Encoder +} + +// Write validates and emits a DAV response as part of a multistatus response +// element. +// +// It sets the HTTP status code of its underlying http.ResponseWriter to 207 +// (Multi-Status) and populates the Content-Type header. If r is the +// first, valid response to be written, Write prepends the XML representation +// of r with a multistatus tag. Callers must call close after the last response +// has been written. +func (w *multistatusWriter) write(r *response) error { + switch len(r.Href) { + case 0: + return errInvalidResponse + case 1: + if len(r.Propstat) > 0 != (r.Status == "") { + return errInvalidResponse + } + default: + if len(r.Propstat) > 0 || r.Status == "" { + return errInvalidResponse + } + } + err := w.writeHeader() + if err != nil { + return err + } + return w.enc.Encode(r) +} + +// writeHeader writes a XML multistatus start element on w's underlying +// http.ResponseWriter and returns the result of the write operation. +// After the first write attempt, writeHeader becomes a no-op. +func (w *multistatusWriter) writeHeader() error { + if w.enc != nil { + return nil + } + w.w.Header().Add("Content-Type", "text/xml; charset=utf-8") + w.w.WriteHeader(StatusMulti) + _, err := fmt.Fprintf(w.w, ``) + if err != nil { + return err + } + w.enc = ixml.NewEncoder(w.w) + return w.enc.EncodeToken(ixml.StartElement{ + Name: ixml.Name{ + Space: "DAV:", + Local: "multistatus", + }, + Attr: []ixml.Attr{{ + Name: ixml.Name{Space: "xmlns", Local: "D"}, + Value: "DAV:", + }}, + }) +} + +// Close completes the marshalling of the multistatus response. It returns +// an error if the multistatus response could not be completed. If both the +// return value and field enc of w are nil, then no multistatus response has +// been written. +func (w *multistatusWriter) close() error { + if w.enc == nil { + return nil + } + var end []ixml.Token + if w.responseDescription != "" { + name := ixml.Name{Space: "DAV:", Local: "responsedescription"} + end = append(end, + ixml.StartElement{Name: name}, + ixml.CharData(w.responseDescription), + ixml.EndElement{Name: name}, + ) + } + end = append(end, ixml.EndElement{ + Name: ixml.Name{Space: "DAV:", Local: "multistatus"}, + }) + for _, t := range end { + err := w.enc.EncodeToken(t) + if err != nil { + return err + } + } + return w.enc.Flush() +} + +var xmlLangName = ixml.Name{Space: "http://www.w3.org/XML/1998/namespace", Local: "lang"} + +func xmlLang(s ixml.StartElement, d string) string { + for _, attr := range s.Attr { + if attr.Name == xmlLangName { + return attr.Value + } + } + return d +} + +type xmlValue []byte + +func (v *xmlValue) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + // The XML value of a property can be arbitrary, mixed-content XML. + // To make sure that the unmarshalled value contains all required + // namespaces, we encode all the property value XML tokens into a + // buffer. This forces the encoder to redeclare any used namespaces. + var b bytes.Buffer + e := ixml.NewEncoder(&b) + for { + t, err := next(d) + if err != nil { + return err + } + if e, ok := t.(ixml.EndElement); ok && e.Name == start.Name { + break + } + if err = e.EncodeToken(t); err != nil { + return err + } + } + err := e.Flush() + if err != nil { + return err + } + *v = b.Bytes() + return nil +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_prop (for proppatch) +type proppatchProps []Property + +// UnmarshalXML appends the property names and values enclosed within start +// to ps. +// +// An xml:lang attribute that is defined either on the DAV:prop or property +// name XML element is propagated to the property's Lang field. +// +// UnmarshalXML returns an error if start does not contain any properties or if +// property values contain syntactically incorrect XML. +func (ps *proppatchProps) UnmarshalXML(d *ixml.Decoder, start ixml.StartElement) error { + lang := xmlLang(start, "") + for { + t, err := next(d) + if err != nil { + return err + } + switch elem := t.(type) { + case ixml.EndElement: + if len(*ps) == 0 { + return fmt.Errorf("%s must not be empty", start.Name.Local) + } + return nil + case ixml.StartElement: + p := Property{ + XMLName: xml.Name(t.(ixml.StartElement).Name), + Lang: xmlLang(t.(ixml.StartElement), lang), + } + err = d.DecodeElement(((*xmlValue)(&p.InnerXML)), &elem) + if err != nil { + return err + } + *ps = append(*ps, p) + } + } +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_set +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_remove +type setRemove struct { + XMLName ixml.Name + Lang string `xml:"xml:lang,attr,omitempty"` + Prop proppatchProps `xml:"DAV: prop"` +} + +// http://www.webdav.org/specs/rfc4918.html#ELEMENT_propertyupdate +type propertyupdate struct { + XMLName ixml.Name `xml:"DAV: propertyupdate"` + Lang string `xml:"xml:lang,attr,omitempty"` + SetRemove []setRemove `xml:",any"` +} + +func readProppatch(r io.Reader) (patches []Proppatch, status int, err error) { + var pu propertyupdate + if err = ixml.NewDecoder(r).Decode(&pu); err != nil { + return nil, http.StatusBadRequest, err + } + for _, op := range pu.SetRemove { + remove := false + switch op.XMLName { + case ixml.Name{Space: "DAV:", Local: "set"}: + // No-op. + case ixml.Name{Space: "DAV:", Local: "remove"}: + for _, p := range op.Prop { + if len(p.InnerXML) > 0 { + return nil, http.StatusBadRequest, errInvalidProppatch + } + } + remove = true + default: + return nil, http.StatusBadRequest, errInvalidProppatch + } + patches = append(patches, Proppatch{Remove: remove, Props: op.Prop}) + } + return patches, 0, nil +} diff --git a/drives/davServer/xml_test.go b/drives/davServer/xml_test.go new file mode 100644 index 00000000..09b7e317 --- /dev/null +++ b/drives/davServer/xml_test.go @@ -0,0 +1,905 @@ +// Copyright 2014 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package davServer + +import ( + "bytes" + "encoding/xml" + "fmt" + "io" + "net/http" + "net/http/httptest" + "reflect" + "sort" + "strings" + "testing" + + ixml "github.com/openziti/zrok/drives/davServer/internal/xml" +) + +func TestReadLockInfo(t *testing.T) { + // The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + input string + wantLI lockInfo + wantStatus int + }{{ + "bad: junk", + "xxx", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid owner XML", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " no end tag \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: invalid UTF-8", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \xff \n" + + " \n" + + "", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #1", + "" + + "\n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "bad: unfinished XML #2", + "" + + "\n" + + " \n" + + " \n" + + " \n", + lockInfo{}, + http.StatusBadRequest, + }, { + "good: empty", + "", + lockInfo{}, + 0, + }, { + "good: plain-text owner", + "" + + "\n" + + " \n" + + " \n" + + " gopher\n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "gopher", + }, + }, + 0, + }, { + "section 9.10.7", + "" + + "\n" + + " \n" + + " \n" + + " \n" + + " http://example.org/~ejw/contact.html\n" + + " \n" + + "", + lockInfo{ + XMLName: ixml.Name{Space: "DAV:", Local: "lockinfo"}, + Exclusive: new(struct{}), + Write: new(struct{}), + Owner: owner{ + InnerXML: "\n http://example.org/~ejw/contact.html\n ", + }, + }, + 0, + }} + + for _, tc := range testCases { + li, status, err := readLockInfo(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(li, tc.wantLI) || status != tc.wantStatus { + t.Errorf("%s:\ngot lockInfo=%v, status=%v\nwant lockInfo=%v, status=%v", + tc.desc, li, status, tc.wantLI, tc.wantStatus) + continue + } + } +} + +func TestReadPropfind(t *testing.T) { + testCases := []struct { + desc string + input string + wantPF propfind + wantStatus int + }{{ + desc: "propfind: propname", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: empty body means allprop", + input: "", + wantPF: propfind{ + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + }, + }, { + desc: "propfind: allprop followed by include", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: include followed by allprop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Allprop: new(struct{}), + Include: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: prop with ignored comments", + input: "" + + "\n" + + " \n" + + " \n" + + " \n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored whitespace", + input: "" + + "\n" + + " \n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propfind with ignored mixed-content", + input: "" + + "\n" + + " foobar\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Prop: propfindProps{xml.Name{Space: "DAV:", Local: "displayname"}}, + }, + }, { + desc: "propfind: propname with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + " *boss*\n" + + "", + wantPF: propfind{ + XMLName: ixml.Name{Space: "DAV:", Local: "propfind"}, + Propname: new(struct{}), + }, + }, { + desc: "propfind: bad: junk", + input: "xxx", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and allprop (section A.3)", + input: "" + + "\n" + + " " + + " " + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: propname and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: allprop and prop", + input: "" + + "\n" + + " \n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty propfind with ignored element (section A.4)", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: empty prop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: prop with just chardata", + input: "" + + "\n" + + " foo\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: interrupted prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: malformed end element prop", + input: "" + + "\n" + + " \n", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with chardata value", + input: "" + + "\n" + + " bar\n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: property with whitespace value", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }, { + desc: "propfind: bad: include without allprop", + input: "" + + "\n" + + " \n" + + "", + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pf, status, err := readPropfind(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if !reflect.DeepEqual(pf, tc.wantPF) || status != tc.wantStatus { + t.Errorf("%s:\ngot propfind=%v, status=%v\nwant propfind=%v, status=%v", + tc.desc, pf, status, tc.wantPF, tc.wantStatus) + continue + } + } +} + +func TestMultistatusWriter(t *testing.T) { + ///The "section x.y.z" test cases come from section x.y.z of the spec at + // http://www.webdav.org/specs/rfc4918.html + testCases := []struct { + desc string + responses []response + respdesc string + writeHeader bool + wantXML string + wantCode int + wantErr error + }{{ + desc: "section 9.2.2 (failed dependency)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Authors", + }, + }}, + Status: "HTTP/1.1 424 Failed Dependency", + }, { + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://ns.example.com/", + Local: "Copyright-Owner", + }, + }}, + Status: "HTTP/1.1 409 Conflict", + }}, + ResponseDescription: "Copyright Owner cannot be deleted or altered.", + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 424 Failed Dependency` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 409 Conflict` + + ` ` + + ` Copyright Owner cannot be deleted or altered.` + + `` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.6.2 (lock-token-submitted)", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Status: "HTTP/1.1 423 Locked", + Error: &xmlError{ + InnerXML: []byte(``), + }, + }}, + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` HTTP/1.1 423 Locked` + + ` ` + + ` ` + + ``, + wantCode: StatusMulti, + }, { + desc: "section 9.1.3", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "bigbox"}, + InnerXML: []byte(`` + + `` + + `Box type A` + + ``), + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "author"}, + InnerXML: []byte(`` + + `` + + `J.J. Johnson` + + ``), + }}, + Status: "HTTP/1.1 200 OK", + }, { + Prop: []Property{{ + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "DingALing"}, + }, { + XMLName: xml.Name{Space: "http://ns.example.com/boxschema/", Local: "Random"}, + }}, + Status: "HTTP/1.1 403 Forbidden", + ResponseDescription: "The user does not have access to the DingALing property.", + }}, + }}, + respdesc: "There has been an access violation error.", + wantXML: `` + + `` + + `` + + ` ` + + ` http://example.com/foo` + + ` ` + + ` ` + + ` Box type A` + + ` J.J. Johnson` + + ` ` + + ` HTTP/1.1 200 OK` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ` HTTP/1.1 403 Forbidden` + + ` The user does not have access to the DingALing property.` + + ` ` + + ` ` + + ` There has been an access violation error.` + + ``, + wantCode: StatusMulti, + }, { + desc: "no response written", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "no response written (with description)", + respdesc: "too bad", + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "empty multistatus with header", + writeHeader: true, + wantXML: ``, + wantCode: StatusMulti, + }, { + desc: "bad: no href", + responses: []response{{ + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and no status", + responses: []response{{ + Href: []string{"http://example.com/foo", "http://example.com/bar"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: one href and no propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: status with one href and propstat", + responses: []response{{ + Href: []string{"http://example.com/foo"}, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + Status: "HTTP/1.1 200 OK", + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }, { + desc: "bad: multiple hrefs and propstat", + responses: []response{{ + Href: []string{ + "http://example.com/foo", + "http://example.com/bar", + }, + Propstat: []propstat{{ + Prop: []Property{{ + XMLName: xml.Name{ + Space: "http://example.com/", + Local: "foo", + }, + }}, + Status: "HTTP/1.1 200 OK", + }}, + }}, + wantErr: errInvalidResponse, + // default of http.responseWriter + wantCode: http.StatusOK, + }} + + n := xmlNormalizer{omitWhitespace: true} +loop: + for _, tc := range testCases { + rec := httptest.NewRecorder() + w := multistatusWriter{w: rec, responseDescription: tc.respdesc} + if tc.writeHeader { + if err := w.writeHeader(); err != nil { + t.Errorf("%s: got writeHeader error %v, want nil", tc.desc, err) + continue + } + } + for _, r := range tc.responses { + if err := w.write(&r); err != nil { + if err != tc.wantErr { + t.Errorf("%s: got write error %v, want %v", + tc.desc, err, tc.wantErr) + } + continue loop + } + } + if err := w.close(); err != tc.wantErr { + t.Errorf("%s: got close error %v, want %v", + tc.desc, err, tc.wantErr) + continue + } + if rec.Code != tc.wantCode { + t.Errorf("%s: got HTTP status code %d, want %d\n", + tc.desc, rec.Code, tc.wantCode) + continue + } + gotXML := rec.Body.String() + eq, err := n.equalXML(strings.NewReader(gotXML), strings.NewReader(tc.wantXML)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s: XML body\ngot %s\nwant %s", tc.desc, gotXML, tc.wantXML) + } + } +} + +func TestReadProppatch(t *testing.T) { + ppStr := func(pps []Proppatch) string { + var outer []string + for _, pp := range pps { + var inner []string + for _, p := range pp.Props { + inner = append(inner, fmt.Sprintf("{XMLName: %q, Lang: %q, InnerXML: %q}", + p.XMLName, p.Lang, p.InnerXML)) + } + outer = append(outer, fmt.Sprintf("{Remove: %t, Props: [%s]}", + pp.Remove, strings.Join(inner, ", "))) + } + return "[" + strings.Join(outer, ", ") + "]" + } + + testCases := []struct { + desc string + input string + wantPP []Proppatch + wantStatus int + }{{ + desc: "proppatch: section 9.2 (with simple property value)", + input: `` + + `` + + `` + + ` ` + + ` somevalue` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Authors"}, + "", + []byte(`somevalue`), + }}, + }, { + Remove: true, + Props: []Property{{ + xml.Name{Space: "http://ns.example.com/z/", Local: "Copyright-Owner"}, + "", + nil, + }}, + }}, + }, { + desc: "proppatch: lang attribute on prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` ` + + ` ` + + ``, + wantPP: []Proppatch{{ + Props: []Property{{ + xml.Name{Space: "http://example.com/ns", Local: "foo"}, + "en", + nil, + }}, + }}, + }, { + desc: "bad: remove with value", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ` Jim Whitehead` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty propertyupdate", + input: `` + + `` + + ``, + wantStatus: http.StatusBadRequest, + }, { + desc: "bad: empty prop", + input: `` + + `` + + `` + + ` ` + + ` ` + + ` ` + + ``, + wantStatus: http.StatusBadRequest, + }} + + for _, tc := range testCases { + pp, status, err := readProppatch(strings.NewReader(tc.input)) + if tc.wantStatus != 0 { + if err == nil { + t.Errorf("%s: got nil error, want non-nil", tc.desc) + continue + } + } else if err != nil { + t.Errorf("%s: %v", tc.desc, err) + continue + } + if status != tc.wantStatus { + t.Errorf("%s: got status %d, want %d", tc.desc, status, tc.wantStatus) + continue + } + if !reflect.DeepEqual(pp, tc.wantPP) || status != tc.wantStatus { + t.Errorf("%s: proppatch\ngot %v\nwant %v", tc.desc, ppStr(pp), ppStr(tc.wantPP)) + } + } +} + +func TestUnmarshalXMLValue(t *testing.T) { + testCases := []struct { + desc string + input string + wantVal string + }{{ + desc: "simple char data", + input: "foo", + wantVal: "foo", + }, { + desc: "empty element", + input: "", + wantVal: "", + }, { + desc: "preserve namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve root element namespace", + input: ``, + wantVal: ``, + }, { + desc: "preserve whitespace", + input: " \t ", + wantVal: " \t ", + }, { + desc: "preserve mixed content", + input: ` a `, + wantVal: ` a `, + }, { + desc: "section 9.2", + input: `` + + `` + + ` Jim Whitehead` + + ` Roy Fielding` + + ``, + wantVal: `` + + ` Jim Whitehead` + + ` Roy Fielding`, + }, { + desc: "section 4.3.1 (mixed content)", + input: `` + + `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of ]]>.` + + ` ` + + ``, + wantVal: `` + + ` Jane Doe` + + ` ` + + ` mailto:jane.doe@example.com` + + ` http://www.example.com` + + ` ` + + ` Jane has been working way too long on the` + + ` long-awaited revision of <RFC2518>.` + + ` `, + }} + + var n xmlNormalizer + for _, tc := range testCases { + d := ixml.NewDecoder(strings.NewReader(tc.input)) + var v xmlValue + if err := d.Decode(&v); err != nil { + t.Errorf("%s: got error %v, want nil", tc.desc, err) + continue + } + eq, err := n.equalXML(bytes.NewReader(v), strings.NewReader(tc.wantVal)) + if err != nil { + t.Errorf("%s: equalXML: %v", tc.desc, err) + continue + } + if !eq { + t.Errorf("%s:\ngot %s\nwant %s", tc.desc, string(v), tc.wantVal) + } + } +} + +// xmlNormalizer normalizes XML. +type xmlNormalizer struct { + // omitWhitespace instructs to ignore whitespace between element tags. + omitWhitespace bool + // omitComments instructs to ignore XML comments. + omitComments bool +} + +// normalize writes the normalized XML content of r to w. It applies the +// following rules +// +// - Rename namespace prefixes according to an internal heuristic. +// - Remove unnecessary namespace declarations. +// - Sort attributes in XML start elements in lexical order of their +// fully qualified name. +// - Remove XML directives and processing instructions. +// - Remove CDATA between XML tags that only contains whitespace, if +// instructed to do so. +// - Remove comments, if instructed to do so. +func (n *xmlNormalizer) normalize(w io.Writer, r io.Reader) error { + d := ixml.NewDecoder(r) + e := ixml.NewEncoder(w) + for { + t, err := d.Token() + if err != nil { + if t == nil && err == io.EOF { + break + } + return err + } + switch val := t.(type) { + case ixml.Directive, ixml.ProcInst: + continue + case ixml.Comment: + if n.omitComments { + continue + } + case ixml.CharData: + if n.omitWhitespace && len(bytes.TrimSpace(val)) == 0 { + continue + } + case ixml.StartElement: + start, _ := ixml.CopyToken(val).(ixml.StartElement) + attr := start.Attr[:0] + for _, a := range start.Attr { + if a.Name.Space == "xmlns" || a.Name.Local == "xmlns" { + continue + } + attr = append(attr, a) + } + sort.Sort(byName(attr)) + start.Attr = attr + t = start + } + err = e.EncodeToken(t) + if err != nil { + return err + } + } + return e.Flush() +} + +// equalXML tests for equality of the normalized XML contents of a and b. +func (n *xmlNormalizer) equalXML(a, b io.Reader) (bool, error) { + var buf bytes.Buffer + if err := n.normalize(&buf, a); err != nil { + return false, err + } + normA := buf.String() + buf.Reset() + if err := n.normalize(&buf, b); err != nil { + return false, err + } + normB := buf.String() + return normA == normB, nil +} + +type byName []ixml.Attr + +func (a byName) Len() int { return len(a) } +func (a byName) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byName) Less(i, j int) bool { + if a[i].Name.Space != a[j].Name.Space { + return a[i].Name.Space < a[j].Name.Space + } + return a[i].Name.Local < a[j].Name.Local +} diff --git a/drives/sync/filesystem.go b/drives/sync/filesystem.go new file mode 100644 index 00000000..153f929c --- /dev/null +++ b/drives/sync/filesystem.go @@ -0,0 +1,152 @@ +package sync + +import ( + "context" + "fmt" + "github.com/openziti/zrok/drives/davServer" + "io" + "io/fs" + "os" + "path/filepath" + "time" +) + +type FilesystemTargetConfig struct { + Root string +} + +type FilesystemTarget struct { + cfg *FilesystemTargetConfig + root fs.FS + tree []*Object +} + +func NewFilesystemTarget(cfg *FilesystemTargetConfig) *FilesystemTarget { + root := os.DirFS(cfg.Root) + return &FilesystemTarget{cfg: cfg, root: root} +} + +func (t *FilesystemTarget) Inventory() ([]*Object, error) { + fi, err := os.Stat(t.cfg.Root) + if os.IsNotExist(err) { + return nil, nil + } + if err != nil { + return nil, err + } + + if !fi.IsDir() { + t.cfg.Root = filepath.Dir(t.cfg.Root) + return []*Object{{ + Path: "/" + fi.Name(), + IsDir: false, + Size: fi.Size(), + Modified: fi.ModTime(), + }}, nil + } + + t.tree = nil + if err := fs.WalkDir(t.root, ".", t.recurse); err != nil { + return nil, err + } + return t.tree, nil +} + +func (t *FilesystemTarget) Dir(path string) ([]*Object, error) { + des, err := os.ReadDir(t.cfg.Root) + if err != nil { + return nil, err + } + var objects []*Object + for _, de := range des { + fi, err := de.Info() + if err != nil { + return nil, err + } + objects = append(objects, &Object{ + Path: de.Name(), + IsDir: de.IsDir(), + Size: fi.Size(), + Modified: fi.ModTime(), + }) + } + return objects, nil +} + +func (t *FilesystemTarget) Mkdir(path string) error { + return os.MkdirAll(filepath.Join(t.cfg.Root, path), os.ModePerm) +} + +func (t *FilesystemTarget) recurse(path string, d fs.DirEntry, err error) error { + if err != nil { + return err + } + fi, err := d.Info() + if err != nil { + return err + } + etag := "" + if v, ok := fi.(davServer.ETager); ok { + etag, err = v.ETag(context.Background()) + if err != nil { + return err + } + } else { + etag = fmt.Sprintf(`"%x%x"`, fi.ModTime().UTC().UnixNano(), fi.Size()) + } + if path != "." { + outPath := "/" + path + if fi.IsDir() { + outPath = outPath + "/" + } + t.tree = append(t.tree, &Object{ + Path: outPath, + IsDir: fi.IsDir(), + Size: fi.Size(), + Modified: fi.ModTime(), + ETag: etag, + }) + } + return nil +} + +func (t *FilesystemTarget) ReadStream(path string) (io.ReadCloser, error) { + return os.Open(filepath.Join(t.cfg.Root, path)) +} + +func (t *FilesystemTarget) WriteStream(path string, stream io.Reader, mode os.FileMode) error { + targetPath := filepath.Join(t.cfg.Root, path) + + if err := os.MkdirAll(filepath.Dir(targetPath), mode); err != nil { + return err + } + f, err := os.Create(targetPath) + if err != nil { + return err + } + _, err = io.Copy(f, stream) + if err != nil { + return err + } + return nil +} + +func (t *FilesystemTarget) WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error { + return t.WriteStream(path, stream, mode) +} + +func (t *FilesystemTarget) Move(src, dest string) error { + return os.Rename(filepath.Join(t.cfg.Root, src), filepath.Join(filepath.Dir(t.cfg.Root), dest)) +} + +func (t *FilesystemTarget) Rm(path string) error { + return os.RemoveAll(filepath.Join(t.cfg.Root, path)) +} + +func (t *FilesystemTarget) SetModificationTime(path string, mtime time.Time) error { + targetPath := filepath.Join(t.cfg.Root, path) + if err := os.Chtimes(targetPath, time.Now(), mtime); err != nil { + return err + } + return nil +} diff --git a/drives/sync/model.go b/drives/sync/model.go new file mode 100644 index 00000000..41a5c61f --- /dev/null +++ b/drives/sync/model.go @@ -0,0 +1,27 @@ +package sync + +import ( + "io" + "os" + "time" +) + +type Object struct { + Path string + IsDir bool + Size int64 + Modified time.Time + ETag string +} + +type Target interface { + Inventory() ([]*Object, error) + Dir(path string) ([]*Object, error) + Mkdir(path string) error + ReadStream(path string) (io.ReadCloser, error) + WriteStream(path string, stream io.Reader, mode os.FileMode) error + WriteStreamWithModTime(path string, stream io.Reader, mode os.FileMode, modTime time.Time) error + Move(src, dest string) error + Rm(path string) error + SetModificationTime(path string, mtime time.Time) error +} diff --git a/drives/sync/synchronizer.go b/drives/sync/synchronizer.go new file mode 100644 index 00000000..d6fe93d7 --- /dev/null +++ b/drives/sync/synchronizer.go @@ -0,0 +1,59 @@ +package sync + +import ( + "github.com/pkg/errors" + "github.com/sirupsen/logrus" + "os" +) + +func OneWay(src, dst Target, sync bool) error { + srcTree, err := src.Inventory() + if err != nil { + return errors.Wrap(err, "error creating source inventory") + } + + var dstTree []*Object + if sync { + dstTree, err = dst.Inventory() + if err != nil { + return errors.Wrap(err, "error creating destination inventory") + } + } + + dstIndex := make(map[string]*Object) + for _, f := range dstTree { + dstIndex[f.Path] = f + } + + var copyList []*Object + for _, srcF := range srcTree { + if dstF, found := dstIndex[srcF.Path]; found { + if !srcF.IsDir && (dstF.Size != srcF.Size || dstF.Modified.Unix() != srcF.Modified.Unix()) { + logrus.Debugf("%v <- dstF.Size = '%d', srcF.Size = '%d', dstF.Modified.UTC = '%d', srcF.Modified.UTC = '%d'", srcF.Path, dstF.Size, srcF.Size, dstF.Modified.Unix(), srcF.Modified.Unix()) + copyList = append(copyList, srcF) + } + } else { + logrus.Debugf("%v <- !found", srcF.Path) + copyList = append(copyList, srcF) + } + } + + for _, copyPath := range copyList { + if copyPath.IsDir { + if err := dst.Mkdir(copyPath.Path); err != nil { + return err + } + } else { + ss, err := src.ReadStream(copyPath.Path) + if err != nil { + return err + } + if err := dst.WriteStreamWithModTime(copyPath.Path, ss, os.ModePerm, copyPath.Modified); err != nil { + return err + } + } + logrus.Infof("=> %v", copyPath.Path) + } + + return nil +} diff --git a/drives/sync/target.go b/drives/sync/target.go new file mode 100644 index 00000000..5e32635c --- /dev/null +++ b/drives/sync/target.go @@ -0,0 +1,34 @@ +package sync + +import ( + "github.com/openziti/zrok/environment/env_core" + "github.com/pkg/errors" + "net/url" + "strings" +) + +func TargetForURL(url *url.URL, root env_core.Root, basicAuth string) (Target, error) { + switch url.Scheme { + case "file": + return NewFilesystemTarget(&FilesystemTargetConfig{Root: url.Path}), nil + + case "zrok": + return NewZrokTarget(&ZrokTargetConfig{URL: url, Root: root}) + + case "http", "https": + var username string + var password string + if basicAuth != "" { + authTokens := strings.Split(basicAuth, ":") + if len(authTokens) != 2 { + return nil, errors.Errorf("invalid basic authentication (expect 'username:password')") + } + username = authTokens[0] + password = authTokens[1] + } + return NewWebDAVTarget(&WebDAVTargetConfig{URL: url, Username: username, Password: password}) + + default: + return nil, errors.Errorf("unknown URL scheme '%v'", url.Scheme) + } +} diff --git a/drives/sync/webdav.go b/drives/sync/webdav.go new file mode 100644 index 00000000..26045e75 --- /dev/null +++ b/drives/sync/webdav.go @@ -0,0 +1,144 @@ +package sync + +import ( + "context" + "github.com/openziti/zrok/drives/davClient" + "github.com/pkg/errors" + "io" + "net/http" + "net/url" + "os" + "path/filepath" + "time" +) + +type WebDAVTargetConfig struct { + URL *url.URL + Username string + Password string +} + +type WebDAVTarget struct { + cfg *WebDAVTargetConfig + dc *davClient.Client +} + +func NewWebDAVTarget(cfg *WebDAVTargetConfig) (*WebDAVTarget, error) { + var httpClient davClient.HTTPClient + httpClient = http.DefaultClient + if cfg.Username != "" || cfg.Password != "" { + httpClient = davClient.HTTPClientWithBasicAuth(httpClient, cfg.Username, cfg.Password) + } + dc, err := davClient.NewClient(httpClient, cfg.URL.String()) + if err != nil { + return nil, err + } + return &WebDAVTarget{cfg: cfg, dc: dc}, nil +} + +func (t *WebDAVTarget) Inventory() ([]*Object, error) { + rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path) + if err != nil { + return nil, err + } + + if !rootFi.IsDir { + base := filepath.Base(t.cfg.URL.Path) + t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path) + return []*Object{{ + Path: "/" + base, + IsDir: false, + Size: rootFi.Size, + Modified: rootFi.ModTime, + }}, nil + } + + fis, err := t.dc.Readdir(context.Background(), "", true) + if err != nil { + return nil, err + } + var objects []*Object + for _, fi := range fis { + if fi.Path != "/" { + objects = append(objects, &Object{ + Path: fi.Path, + IsDir: fi.IsDir, + Size: fi.Size, + Modified: fi.ModTime, + }) + } + } + return objects, nil +} + +func (t *WebDAVTarget) Dir(path string) ([]*Object, error) { + fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false) + if err != nil { + return nil, err + } + var objects []*Object + for _, fi := range fis { + if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" { + objects = append(objects, &Object{ + Path: filepath.Base(fi.Path), + IsDir: fi.IsDir, + Size: fi.Size, + Modified: fi.ModTime, + }) + } + } + return objects, nil +} + +func (t *WebDAVTarget) Mkdir(path string) error { + fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path)) + if err == nil { + if fi.IsDir { + return nil + } + return errors.Errorf("'%v' already exists; not directory", path) + } + return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *WebDAVTarget) ReadStream(path string) (io.ReadCloser, error) { + return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *WebDAVTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error { + ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path)) + if err != nil { + return err + } + defer func() { _ = ws.Close() }() + _, err = io.Copy(ws, rs) + if err != nil { + return err + } + return nil +} + +func (t *WebDAVTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error { + ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime) + if err != nil { + return err + } + defer func() { _ = ws.Close() }() + _, err = io.Copy(ws, rs) + if err != nil { + return err + } + return nil +} + +func (t *WebDAVTarget) Move(src, dest string) error { + return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true) +} + +func (t *WebDAVTarget) Rm(path string) error { + return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *WebDAVTarget) SetModificationTime(path string, mtime time.Time) error { + return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime) +} diff --git a/drives/sync/zrok.go b/drives/sync/zrok.go new file mode 100644 index 00000000..0971b25e --- /dev/null +++ b/drives/sync/zrok.go @@ -0,0 +1,156 @@ +package sync + +import ( + "context" + "github.com/openziti/zrok/drives/davClient" + "github.com/openziti/zrok/environment/env_core" + "github.com/openziti/zrok/sdk/golang/sdk" + "github.com/pkg/errors" + "io" + "net" + "net/http" + "net/url" + "os" + "path/filepath" + "strings" + "time" +) + +type ZrokTargetConfig struct { + URL *url.URL + Root env_core.Root +} + +type ZrokTarget struct { + cfg *ZrokTargetConfig + dc *davClient.Client +} + +type zrokDialContext struct { + root env_core.Root +} + +func (zdc *zrokDialContext) Dial(_ context.Context, _, addr string) (net.Conn, error) { + share := strings.Split(addr, ":")[0] + return sdk.NewDialer(share, zdc.root) +} + +func NewZrokTarget(cfg *ZrokTargetConfig) (*ZrokTarget, error) { + transport := http.DefaultTransport.(*http.Transport).Clone() + transport.DialContext = (&zrokDialContext{cfg.Root}).Dial + transport.TLSClientConfig.InsecureSkipVerify = true + httpUrl := strings.Replace(cfg.URL.String(), "zrok:", "http:", 1) + dc, err := davClient.NewClient(&http.Client{Transport: transport}, httpUrl) + if err != nil { + return nil, err + } + return &ZrokTarget{cfg: cfg, dc: dc}, nil +} + +func (t *ZrokTarget) Inventory() ([]*Object, error) { + rootFi, err := t.dc.Stat(context.Background(), t.cfg.URL.Path) + if err != nil { + return nil, err + } + + if !rootFi.IsDir { + base := filepath.Base(t.cfg.URL.Path) + t.cfg.URL.Path = filepath.Dir(t.cfg.URL.Path) + return []*Object{{ + Path: "/" + base, + IsDir: false, + Size: rootFi.Size, + Modified: rootFi.ModTime, + }}, nil + } + + fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, true) + if err != nil { + return nil, err + } + var objects []*Object + for _, fi := range fis { + if fi.Path != "/" { + objects = append(objects, &Object{ + Path: fi.Path, + IsDir: fi.IsDir, + Size: fi.Size, + Modified: fi.ModTime, + ETag: fi.ETag, + }) + } + } + return objects, nil +} + +func (t *ZrokTarget) Dir(path string) ([]*Object, error) { + fis, err := t.dc.Readdir(context.Background(), t.cfg.URL.Path, false) + if err != nil { + return nil, err + } + var objects []*Object + for _, fi := range fis { + if fi.Path != "/" && fi.Path != t.cfg.URL.Path+"/" { + objects = append(objects, &Object{ + Path: filepath.Base(fi.Path), + IsDir: fi.IsDir, + Size: fi.Size, + Modified: fi.ModTime, + }) + } + } + return objects, nil +} + +func (t *ZrokTarget) Mkdir(path string) error { + fi, err := t.dc.Stat(context.Background(), filepath.Join(t.cfg.URL.Path, path)) + if err == nil { + if fi.IsDir { + return nil + } + return errors.Errorf("'%v' already exists; not directory", path) + } + return t.dc.Mkdir(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *ZrokTarget) ReadStream(path string) (io.ReadCloser, error) { + return t.dc.Open(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *ZrokTarget) WriteStream(path string, rs io.Reader, _ os.FileMode) error { + ws, err := t.dc.Create(context.Background(), filepath.Join(t.cfg.URL.Path, path)) + if err != nil { + return err + } + defer func() { _ = ws.Close() }() + _, err = io.Copy(ws, rs) + if err != nil { + return err + } + return nil +} + +func (t *ZrokTarget) WriteStreamWithModTime(path string, rs io.Reader, _ os.FileMode, modTime time.Time) error { + ws, err := t.dc.CreateWithModTime(context.Background(), filepath.Join(t.cfg.URL.Path, path), modTime) + if err != nil { + return err + } + defer func() { _ = ws.Close() }() + _, err = io.Copy(ws, rs) + if err != nil { + return err + } + return nil +} + +func (t *ZrokTarget) Move(src, dest string) error { + return t.dc.MoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, src), dest, true) +} + +func (t *ZrokTarget) Rm(path string) error { + return t.dc.RemoveAll(context.Background(), filepath.Join(t.cfg.URL.Path, path)) +} + +func (t *ZrokTarget) SetModificationTime(path string, mtime time.Time) error { + return t.dc.Touch(context.Background(), filepath.Join(t.cfg.URL.Path, path), mtime) +} diff --git a/endpoints/config.go b/endpoints/config.go new file mode 100644 index 00000000..aa9c7b44 --- /dev/null +++ b/endpoints/config.go @@ -0,0 +1,6 @@ +package endpoints + +type TlsConfig struct { + CertPath string + KeyPath string +} diff --git a/endpoints/drive/backend.go b/endpoints/drive/backend.go index f9147e8c..810e173d 100644 --- a/endpoints/drive/backend.go +++ b/endpoints/drive/backend.go @@ -4,9 +4,9 @@ import ( "fmt" "github.com/openziti/sdk-golang/ziti" "github.com/openziti/sdk-golang/ziti/edge" + "github.com/openziti/zrok/drives/davServer" "github.com/openziti/zrok/endpoints" "github.com/pkg/errors" - "golang.org/x/net/webdav" "net/http" "time" ) @@ -26,8 +26,8 @@ type Backend struct { func NewBackend(cfg *BackendConfig) (*Backend, error) { options := ziti.ListenOptions{ - ConnectTimeout: 5 * time.Minute, - MaxConnections: 64, + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, } zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath) if err != nil { @@ -42,9 +42,9 @@ func NewBackend(cfg *BackendConfig) (*Backend, error) { return nil, err } - handler := &webdav.Handler{ - FileSystem: webdav.Dir(cfg.DriveRoot), - LockSystem: webdav.NewMemLS(), + handler := &davServer.Handler{ + FileSystem: davServer.Dir(cfg.DriveRoot), + LockSystem: davServer.NewMemLS(), Logger: func(r *http.Request, err error) { if cfg.Requests != nil { cfg.Requests <- &endpoints.Request{ diff --git a/endpoints/proxy/backend.go b/endpoints/proxy/backend.go index 06858ada..1aef09e9 100644 --- a/endpoints/proxy/backend.go +++ b/endpoints/proxy/backend.go @@ -31,8 +31,8 @@ type Backend struct { func NewBackend(cfg *BackendConfig) (*Backend, error) { options := ziti.ListenOptions{ - ConnectTimeout: 5 * time.Minute, - MaxConnections: 64, + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, } zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath) if err != nil { diff --git a/endpoints/proxy/browse.html b/endpoints/proxy/browse.html index bd04ff76..9f93217b 100755 --- a/endpoints/proxy/browse.html +++ b/endpoints/proxy/browse.html @@ -1,288 +1,296 @@ {{- define "icon"}} {{- if .IsDir}} - - - - + {{- if .IsSymlink}} + + + + + + {{- else}} + + + + + {{- end}} {{- else if or (eq .Name "LICENSE") (eq .Name "README")}} - - - - + + + + {{- else if .HasExt ".jpg" ".jpeg" ".png" ".gif" ".webp" ".tiff" ".bmp" ".heif" ".heic" ".svg"}} {{- if eq .Tpl.Layout "grid"}} {{- else}} - - - - - + + + + + {{- end}} - {{- else if .HasExt ".mp4" ".mov" ".mpeg" ".mpg" ".avi" ".ogg" ".webm" ".mkv" ".vob" ".gifv" ".3gp"}} + {{- else if .HasExt ".mp4" ".mov" ".m4v" ".mpeg" ".mpg" ".avi" ".ogg" ".webm" ".mkv" ".vob" ".gifv" ".3gp"}} - - - - - - - - - + + + + + + + + + {{- else if .HasExt ".mp3" ".m4a" ".aac" ".ogg" ".flac" ".wav" ".wma" ".midi" ".cda"}} - - - - - + + + + + {{- else if .HasExt ".pdf"}} - - - - - - - + + + + + + + {{- else if .HasExt ".csv" ".tsv"}} - - - - - - + + + + + + {{- else if .HasExt ".txt" ".doc" ".docx" ".odt" ".fodt" ".rtf"}} - - - - - - + + + + + + {{- else if .HasExt ".xls" ".xlsx" ".ods" ".fods"}} - - - - - - + + + + + + {{- else if .HasExt ".ppt" ".pptx" ".odp" ".fodp"}} - - - - - - - - + + + + + + + + {{- else if .HasExt ".zip" ".gz" ".xz" ".tar" ".7z" ".rar" ".xz" ".zst"}} - - - - - - - - - + + + + + + + + + {{- else if .HasExt ".deb" ".dpkg"}} - - - + + + {{- else if .HasExt ".rpm" ".exe" ".flatpak" ".appimage" ".jar" ".msi" ".apk"}} - - - - - - + + + + + + {{- else if .HasExt ".ps1"}} - - - - + + + + {{- else if .HasExt ".py" ".pyc" ".pyo"}} - - - - - - + + + + + + {{- else if .HasExt ".bash" ".sh" ".com" ".bat" ".dll" ".so"}} - - + + {{- else if .HasExt ".dmg"}} - - - - - - + + + + + + {{- else if .HasExt ".iso" ".img"}} - - - - - + + + + + {{- else if .HasExt ".md" ".mdown" ".markdown"}} - - - - + + + + {{- else if .HasExt ".ttf" ".otf" ".woff" ".woff2" ".eof"}} - - - - - - + + + + + + {{- else if .HasExt ".go"}} - - - - - - + + + + + + {{- else if .HasExt ".html" ".htm"}} - - - - - - - - - - + + + + + + + + + + {{- else if .HasExt ".js"}} - - - - - + + + + + {{- else if .HasExt ".css"}} - - - - - - + + + + + + {{- else if .HasExt ".json" ".json5" ".jsonc"}} - - - - - + + + + + {{- else if .HasExt ".ts"}} - - - - - - - + + + + + + + {{- else if .HasExt ".sql"}} - - - - - - - - + + + + + + + + {{- else if .HasExt ".db" ".sqlite" ".bak" ".mdb"}} - - - - + + + + {{- else if .HasExt ".eml" ".email" ".mailbox" ".mbox" ".msg"}} - - - + + + {{- else if .HasExt ".crt" ".pem" ".x509" ".cer" ".ca-bundle"}} - - - - - - - + + + + + + + {{- else if .HasExt ".key" ".keystore" ".jks" ".p12" ".pfx" ".pub"}} - - - + + + {{- else}} {{- if .IsSymlink}} - - - - - + + + + + {{- else}} - - - + + + {{- end}} {{- end}} @@ -291,6 +299,7 @@ {{html .Name}} + @@ -789,19 +798,19 @@ footer { - - - + + + List - - - - - + + + + + Grid @@ -826,22 +835,22 @@ footer { {{- if and (eq .Sort "namedirfirst") (ne .Order "desc")}} - - + + {{- else if and (eq .Sort "namedirfirst") (ne .Order "asc")}} - - + + {{- else}} - - + + {{- end}} @@ -850,16 +859,16 @@ footer { Name - - + + {{- else if and (eq .Sort "name") (ne .Order "asc")}} Name - - + + {{- else}} @@ -870,11 +879,11 @@ footer {

- - - + + + - +
@@ -882,16 +891,16 @@ footer { Size - - + + {{- else if and (eq .Sort "size") (ne .Order "asc")}} Size - - + + {{- else}} @@ -905,16 +914,16 @@ footer { Modified - - + + {{- else if and (eq .Sort "time") (ne .Order "asc")}} Modified - - + + {{- else}} @@ -933,8 +942,8 @@ footer { - - + + Up @@ -1160,7 +1169,10 @@ footer { }); document.querySelectorAll('.size').forEach(el => { const size = Number(el.dataset.size); - el.querySelector('.sizebar-bar').style.width = `${size/largest * 100}%`; + const sizebar = el.querySelector('.sizebar-bar'); + if (sizebar) { + sizebar.style.width = `${size/largest * 100}%`; + } }); } diff --git a/endpoints/proxy/frontend.go b/endpoints/proxy/frontend.go index 09ea200a..bd31b36b 100644 --- a/endpoints/proxy/frontend.go +++ b/endpoints/proxy/frontend.go @@ -22,6 +22,7 @@ type FrontendConfig struct { IdentityName string ShrToken string Address string + Tls *endpoints.TlsConfig RequestsChan chan *endpoints.Request } @@ -76,6 +77,9 @@ func NewFrontend(cfg *FrontendConfig) (*Frontend, error) { } func (h *Frontend) Run() error { + if h.cfg.Tls != nil { + return http.ListenAndServeTLS(h.cfg.Address, h.cfg.Tls.CertPath, h.cfg.Tls.KeyPath, h.handler) + } return http.ListenAndServe(h.cfg.Address, h.handler) } diff --git a/endpoints/publicProxy/config.go b/endpoints/publicProxy/config.go index 67206db6..54710c0b 100644 --- a/endpoints/publicProxy/config.go +++ b/endpoints/publicProxy/config.go @@ -3,6 +3,7 @@ package publicProxy import ( "context" "github.com/michaelquigley/cf" + "github.com/openziti/zrok/endpoints" "github.com/pkg/errors" "github.com/sirupsen/logrus" zhttp "github.com/zitadel/oidc/v2/pkg/http" @@ -16,6 +17,7 @@ type Config struct { Address string HostMatch string Oauth *OauthConfig + Tls *endpoints.TlsConfig } type OauthConfig struct { diff --git a/endpoints/publicProxy/github.go b/endpoints/publicProxy/github.go index 9e9a0713..e8c4ef25 100644 --- a/endpoints/publicProxy/github.go +++ b/endpoints/publicProxy/github.go @@ -80,7 +80,7 @@ func configureGithubOauth(cfg *OauthConfig, tls bool) error { return func(w http.ResponseWriter, r *http.Request) { host, err := url.QueryUnescape(r.URL.Query().Get("targethost")) if err != nil { - logrus.Errorf("Unable to unescape target host: %v", err) + logrus.Errorf("unable to unescape target host: %v", err) } rp.AuthURLHandler(func() string { id := uuid.New().String() @@ -99,7 +99,7 @@ func configureGithubOauth(cfg *OauthConfig, tls bool) error { }) s, err := t.SignedString(key) if err != nil { - logrus.Errorf("Unable to sign intermediate JWT: %v", err) + logrus.Errorf("unable to sign intermediate JWT: %v", err) } return s }, party, rp.WithURLParam("access_type", "offline"))(w, r) @@ -121,7 +121,7 @@ func configureGithubOauth(cfg *OauthConfig, tls bool) error { req.Header.Add("Authorization", fmt.Sprintf("Bearer %s", tokens.AccessToken)) resp, err := http.DefaultClient.Do(req) if err != nil { - logrus.Error("Error getting user info from github: " + err.Error() + "\n") + logrus.Errorf("error getting user info from github: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -130,14 +130,14 @@ func configureGithubOauth(cfg *OauthConfig, tls bool) error { }() response, err := io.ReadAll(resp.Body) if err != nil { - logrus.Errorf("Error reading response body: %v", err) + logrus.Errorf("error reading response body: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } var rDat []githubUserResp err = json.Unmarshal(response, &rDat) if err != nil { - logrus.Errorf("Error unmarshalling google oauth response: %v", err) + logrus.Errorf("error unmarshalling google oauth response: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -165,8 +165,7 @@ func configureGithubOauth(cfg *OauthConfig, tls bool) error { } else { authCheckInterval = i } - - SetZrokCookie(w, cfg.CookieDomain, primaryEmail, tokens.AccessToken, "github", authCheckInterval, key) + SetZrokCookie(w, cfg.CookieDomain, primaryEmail, tokens.AccessToken, "github", authCheckInterval, key, token.Claims.(*IntermediateJWT).Host) http.Redirect(w, r, fmt.Sprintf("%s://%s", scheme, token.Claims.(*IntermediateJWT).Host), http.StatusFound) } diff --git a/endpoints/publicProxy/google.go b/endpoints/publicProxy/google.go index 81e66cf2..5b62257c 100644 --- a/endpoints/publicProxy/google.go +++ b/endpoints/publicProxy/google.go @@ -78,7 +78,7 @@ func configureGoogleOauth(cfg *OauthConfig, tls bool) error { return func(w http.ResponseWriter, r *http.Request) { host, err := url.QueryUnescape(r.URL.Query().Get("targethost")) if err != nil { - logrus.Errorf("Unable to unescape target host: %v", err) + logrus.Errorf("unable to unescape target host: %v", err) } rp.AuthURLHandler(func() string { id := uuid.New().String() @@ -97,7 +97,7 @@ func configureGoogleOauth(cfg *OauthConfig, tls bool) error { }) s, err := t.SignedString(key) if err != nil { - logrus.Errorf("Unable to sign intermediate JWT: %v", err) + logrus.Errorf("unable to sign intermediate JWT: %v", err) } return s }, party, rp.WithURLParam("access_type", "offline"))(w, r) @@ -108,7 +108,7 @@ func configureGoogleOauth(cfg *OauthConfig, tls bool) error { getEmail := func(w http.ResponseWriter, r *http.Request, tokens *oidc.Tokens[*oidc.IDTokenClaims], state string, rp rp.RelyingParty) { resp, err := http.Get("https://www.googleapis.com/oauth2/v2/userinfo?access_token=" + url.QueryEscape(tokens.AccessToken)) if err != nil { - logrus.Error("Error getting user info from google: " + err.Error() + "\n") + logrus.Errorf("error getting user info from google: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -117,15 +117,15 @@ func configureGoogleOauth(cfg *OauthConfig, tls bool) error { }() response, err := io.ReadAll(resp.Body) if err != nil { - logrus.Errorf("Error reading response body: %v", err) + logrus.Errorf("error reading response body: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } - logrus.Infof("Response from google userinfo endpoint: %s", string(response)) + logrus.Infof("response from google userinfo endpoint: %s", string(response)) rDat := googleOauthEmailResp{} err = json.Unmarshal(response, &rDat) if err != nil { - logrus.Errorf("Error unmarshalling google oauth response: %v", err) + logrus.Errorf("error unmarshalling google oauth response: %v", err) http.Error(w, err.Error(), http.StatusInternalServerError) return } @@ -145,8 +145,7 @@ func configureGoogleOauth(cfg *OauthConfig, tls bool) error { } else { authCheckInterval = i } - - SetZrokCookie(w, cfg.CookieDomain, rDat.Email, tokens.AccessToken, "google", authCheckInterval, key) + SetZrokCookie(w, cfg.CookieDomain, rDat.Email, tokens.AccessToken, "google", authCheckInterval, key, token.Claims.(*IntermediateJWT).Host) http.Redirect(w, r, fmt.Sprintf("%s://%s", scheme, token.Claims.(*IntermediateJWT).Host), http.StatusFound) } diff --git a/endpoints/publicProxy/http.go b/endpoints/publicProxy/http.go index 505d1c39..60fc4438 100644 --- a/endpoints/publicProxy/http.go +++ b/endpoints/publicProxy/http.go @@ -4,6 +4,7 @@ import ( "context" "crypto/md5" "fmt" + "github.com/gobwas/glob" "github.com/golang-jwt/jwt/v5" "github.com/openziti/sdk-golang/ziti" "github.com/openziti/zrok/endpoints" @@ -69,7 +70,7 @@ func NewHTTP(cfg *Config) (*HttpFrontend, error) { return nil, err } proxy.Transport = zTransport - if err := configureOauthHandlers(context.Background(), cfg, false); err != nil { + if err := configureOauthHandlers(context.Background(), cfg, cfg.Tls != nil); err != nil { return nil, err } handler := authHandler(util.NewProxyHandler(proxy), cfg, key, zCtx) @@ -81,6 +82,9 @@ func NewHTTP(cfg *Config) (*HttpFrontend, error) { } func (f *HttpFrontend) Run() error { + if f.cfg.Tls != nil { + return http.ListenAndServeTLS(f.cfg.Address, f.cfg.Tls.CertPath, f.cfg.Tls.KeyPath, f.handler) + } return http.ListenAndServe(f.cfg.Address, f.handler) } @@ -157,6 +161,8 @@ func authHandler(handler http.Handler, pcfg *Config, key []byte, ctx ziti.Contex switch scheme { case string(sdk.None): logrus.Debugf("auth scheme none '%v'", shrToken) + // ensure cookies from other shares are not sent to this share, in case it's malicious + deleteZrokCookies(w, r) handler.ServeHTTP(w, r) return @@ -202,6 +208,8 @@ func authHandler(handler http.Handler, pcfg *Config, key []byte, ctx ziti.Contex return } + // ensure cookies from other shares are not sent to this share, in case it's malicious + deleteZrokCookies(w, r) handler.ServeHTTP(w, r) case string(sdk.Oauth): @@ -211,7 +219,7 @@ func authHandler(handler http.Handler, pcfg *Config, key []byte, ctx ziti.Contex if provider, found := oauthCfg.(map[string]interface{})["provider"]; found { var authCheckInterval time.Duration if checkInterval, found := oauthCfg.(map[string]interface{})["authorization_check_interval"]; !found { - logrus.Errorf("Missing authorization check interval in share config. Defaulting to 3 hours") + logrus.Errorf("missing authorization check interval in share config. Defaulting to 3 hours") authCheckInterval = 3 * time.Hour } else { i, err := time.ParseDuration(checkInterval.(string)) @@ -253,21 +261,39 @@ func authHandler(handler http.Handler, pcfg *Config, key []byte, ctx ziti.Contex oauthLoginRequired(w, r, pcfg.Oauth, provider.(string), target, authCheckInterval) return } - if validDomains, found := oauthCfg.(map[string]interface{})["email_domains"]; found { - if castedDomains, ok := validDomains.([]interface{}); !ok { - logrus.Error("invalid email domain format") + if claims.Audience != r.Host { + logrus.Errorf("audience claim '%s' does not match requested host '%s'; restarting auth flow", claims.Audience, r.Host) + oauthLoginRequired(w, r, pcfg.Oauth, provider.(string), target, authCheckInterval) + return + } + + if validEmailAddressPatterns, found := oauthCfg.(map[string]interface{})["email_domains"]; found { + if castedPatterns, ok := validEmailAddressPatterns.([]interface{}); !ok { + logrus.Error("invalid email pattern array format") return } else { - if len(castedDomains) > 0 { + if len(castedPatterns) > 0 { found := false - for _, domain := range castedDomains { - if strings.HasSuffix(claims.Email, domain.(string)) { - found = true - break + for _, pattern := range castedPatterns { + if castedPattern, ok := pattern.(string); ok { + match, err := glob.Compile(castedPattern) + if err != nil { + logrus.Errorf("invalid email address pattern glob '%v': %v", pattern.(string), err) + unauthorizedUi.WriteUnauthorized(w) + return + } + if match.Match(claims.Email) { + found = true + break + } + } else { + logrus.Errorf("invalid email address pattern '%v'", pattern) + unauthorizedUi.WriteUnauthorized(w) + return } } if !found { - logrus.Warnf("invalid email domain") + logrus.Warnf("unauthorized email '%v' for '%v'", claims.Email, shrToken) unauthorizedUi.WriteUnauthorized(w) return } @@ -313,15 +339,26 @@ type ZrokClaims struct { Email string `json:"email"` AccessToken string `json:"accessToken"` Provider string `json:"provider"` + Audience string `json:"aud"` AuthorizationCheckInterval time.Duration `json:"authorizationCheckInterval"` jwt.RegisteredClaims } -func SetZrokCookie(w http.ResponseWriter, domain, email, accessToken, provider string, checkInterval time.Duration, key []byte) { +func SetZrokCookie(w http.ResponseWriter, cookieDomain, email, accessToken, provider string, checkInterval time.Duration, key []byte, targetHost string) { + targetHost = strings.TrimSpace(targetHost) + if targetHost == "" { + logrus.Error("host claim must not be empty") + http.Error(w, "host claim must not be empty", http.StatusBadRequest) + return + } + targetHost = strings.Split(targetHost, "/")[0] + logrus.Debugf("setting zrok-access cookie JWT audience '%s'", targetHost) + tkn := jwt.NewWithClaims(jwt.SigningMethodHS256, ZrokClaims{ Email: email, AccessToken: accessToken, Provider: provider, + Audience: targetHost, AuthorizationCheckInterval: checkInterval, }) sTkn, err := tkn.SignedString(key) @@ -334,13 +371,32 @@ func SetZrokCookie(w http.ResponseWriter, domain, email, accessToken, provider s Name: "zrok-access", Value: sTkn, MaxAge: int(checkInterval.Seconds()), - Domain: domain, + Domain: cookieDomain, Path: "/", Expires: time.Now().Add(checkInterval), - //Secure: true, //When tls gets added have this be configured on if tls + // Secure: true, // pending server tls feature https://github.com/openziti/zrok/issues/24 + HttpOnly: true, // enabled because zrok frontend is the only intended consumer of this cookie, not client-side scripts + SameSite: http.SameSiteLaxMode, // explicitly set to the default Lax mode which allows the zrok share to be navigated to from another site and receive the cookie }) } +func deleteZrokCookies(w http.ResponseWriter, r *http.Request) { + // Get all cookies from the request + cookies := r.Cookies() + // Clear the Cookie header + r.Header.Del("Cookie") + // Save cookies not in the list of cookies to delete, the pkce cookie might be okay to pass along to the HTTP + // backend, but zrok-access is not because it can contain the accessToken from any other OAuth enabled shares, so we + // delete it here when the current share is not OAuth-enabled. OAuth-enabled shares check the audience claim in the + // JWT to ensure it matches the requested share and will send the client back to the OAuth provider if it does not + // match. + for _, cookie := range cookies { + if cookie.Name != "zrok-access" && cookie.Name != "pkce" { + r.AddCookie(cookie) + } + } +} + func basicAuthRequired(w http.ResponseWriter, realm string) { w.Header().Set("WWW-Authenticate", `Basic realm="`+realm+`"`) w.WriteHeader(401) diff --git a/endpoints/socks/backend.go b/endpoints/socks/backend.go new file mode 100644 index 00000000..44229763 --- /dev/null +++ b/endpoints/socks/backend.go @@ -0,0 +1,53 @@ +package socks + +import ( + "github.com/openziti/sdk-golang/ziti" + "github.com/openziti/sdk-golang/ziti/edge" + "github.com/openziti/zrok/endpoints" + "github.com/pkg/errors" + "time" +) + +type BackendConfig struct { + IdentityPath string + ShrToken string + Requests chan *endpoints.Request +} + +type Backend struct { + cfg *BackendConfig + listener edge.Listener + server *Server +} + +func NewBackend(cfg *BackendConfig) (*Backend, error) { + options := ziti.ListenOptions{ + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, + } + zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath) + if err != nil { + return nil, errors.Wrap(err, "error loading ziti identity") + } + zctx, err := ziti.NewContext(zcfg) + if err != nil { + return nil, errors.Wrap(err, "error loading ziti context") + } + listener, err := zctx.ListenWithOptions(cfg.ShrToken, &options) + if err != nil { + return nil, err + } + + return &Backend{ + cfg: cfg, + listener: listener, + server: &Server{Requests: cfg.Requests}, + }, nil +} + +func (b *Backend) Run() error { + if err := b.server.Serve(b.listener); err != nil { + return err + } + return nil +} diff --git a/endpoints/socks/socks5.go b/endpoints/socks/socks5.go new file mode 100755 index 00000000..28a465d2 --- /dev/null +++ b/endpoints/socks/socks5.go @@ -0,0 +1,415 @@ +// Copyright (c) Tailscale Inc & AUTHORS +// SPDX-License-Identifier: BSD-3-Clause + +// Package socks5 is a SOCKS5 server implementation. +// +// This is used for userspace networking in Tailscale. Specifically, +// this is used for dialing out of the machine to other nodes, without +// the host kernel's involvement, so it doesn't proper routing tables, +// TUN, IPv6, etc. This package is meant to only handle the SOCKS5 protocol +// details and not any integration with Tailscale internals itself. +// +// The glue between this package and Tailscale is in net/socks5/tssocks. +package socks + +import ( + "context" + "encoding/binary" + "fmt" + "github.com/openziti/zrok/endpoints" + "github.com/sirupsen/logrus" + "io" + "net" + "strconv" + "time" +) + +// Authentication METHODs described in RFC 1928, section 3. +const ( + noAuthRequired byte = 0 + passwordAuth byte = 2 + noAcceptableAuth byte = 255 +) + +// passwordAuthVersion is the auth version byte described in RFC 1929. +const passwordAuthVersion = 1 + +// socks5Version is the byte that represents the SOCKS version +// in requests. +const socks5Version byte = 5 + +// commandType are the bytes sent in SOCKS5 packets +// that represent the kind of connection the client needs. +type commandType byte + +// The set of valid SOCKS5 commands as described in RFC 1928. +const ( + connect commandType = 1 + bind commandType = 2 + udpAssociate commandType = 3 +) + +// addrType are the bytes sent in SOCKS5 packets +// that represent particular address types. +type addrType byte + +// The set of valid SOCKS5 address types as defined in RFC 1928. +const ( + ipv4 addrType = 1 + domainName addrType = 3 + ipv6 addrType = 4 +) + +// replyCode are the bytes sent in SOCKS5 packets +// that represent replies from the server to a client +// request. +type replyCode byte + +// The set of valid SOCKS5 reply types as per the RFC 1928. +const ( + success replyCode = 0 + generalFailure replyCode = 1 + connectionNotAllowed replyCode = 2 + networkUnreachable replyCode = 3 + hostUnreachable replyCode = 4 + connectionRefused replyCode = 5 + ttlExpired replyCode = 6 + commandNotSupported replyCode = 7 + addrTypeNotSupported replyCode = 8 +) + +// Server is a SOCKS5 proxy server. +type Server struct { + // Dialer optionally specifies the dialer to use for outgoing connections. + // If nil, the net package's standard dialer is used. + Dialer func(ctx context.Context, network, addr string) (net.Conn, error) + + // Username and Password, if set, are the credential clients must provide. + Username string + Password string + + // For notifying user-facing components about activity + Requests chan *endpoints.Request +} + +func (s *Server) dial(ctx context.Context, network, addr string) (net.Conn, error) { + dial := s.Dialer + if dial == nil { + dialer := &net.Dialer{} + dial = dialer.DialContext + } + return dial(ctx, network, addr) +} + +// Serve accepts and handles incoming connections on the given listener. +func (s *Server) Serve(l net.Listener) error { + defer l.Close() + for { + c, err := l.Accept() + if err != nil { + return err + } + go func() { + defer c.Close() + conn := &Conn{clientConn: c, srv: s} + err := conn.Run() + if err != nil { + logrus.Infof("client connection failed: %v", err) + } + }() + } +} + +// Conn is a SOCKS5 connection for client to reach +// server. +type Conn struct { + // The struct is filled by each of the internal + // methods in turn as the transaction progresses. + + srv *Server + clientConn net.Conn + request *request +} + +// Run starts the new connection. +func (c *Conn) Run() error { + needAuth := c.srv.Username != "" || c.srv.Password != "" + authMethod := noAuthRequired + if needAuth { + authMethod = passwordAuth + } + + err := parseClientGreeting(c.clientConn, authMethod) + if err != nil { + c.clientConn.Write([]byte{socks5Version, noAcceptableAuth}) + return err + } + c.clientConn.Write([]byte{socks5Version, authMethod}) + if !needAuth { + return c.handleRequest() + } + + user, pwd, err := parseClientAuth(c.clientConn) + if err != nil || user != c.srv.Username || pwd != c.srv.Password { + c.clientConn.Write([]byte{1, 1}) // auth error + return err + } + c.clientConn.Write([]byte{1, 0}) // auth success + + return c.handleRequest() +} + +func (c *Conn) handleRequest() error { + req, err := parseClientRequest(c.clientConn) + if err != nil { + res := &response{reply: generalFailure} + buf, _ := res.marshal() + c.clientConn.Write(buf) + return err + } + if req.command != connect { + res := &response{reply: commandNotSupported} + buf, _ := res.marshal() + c.clientConn.Write(buf) + return fmt.Errorf("unsupported command %v", req.command) + } + c.request = req + + if c.srv.Requests != nil { + c.srv.Requests <- &endpoints.Request{ + Stamp: time.Now(), + Method: "CONNECT", + Path: fmt.Sprintf("%v:%d", c.request.destination, c.request.port), + } + } + + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + srv, err := c.srv.dial( + ctx, + "tcp", + net.JoinHostPort(c.request.destination, strconv.Itoa(int(c.request.port))), + ) + if err != nil { + res := &response{reply: generalFailure} + buf, _ := res.marshal() + c.clientConn.Write(buf) + return err + } + defer srv.Close() + serverAddr, serverPortStr, err := net.SplitHostPort(srv.LocalAddr().String()) + if err != nil { + return err + } + serverPort, _ := strconv.Atoi(serverPortStr) + + var bindAddrType addrType + if ip := net.ParseIP(serverAddr); ip != nil { + if ip.To4() != nil { + bindAddrType = ipv4 + } else { + bindAddrType = ipv6 + } + } else { + bindAddrType = domainName + } + res := &response{ + reply: success, + bindAddrType: bindAddrType, + bindAddr: serverAddr, + bindPort: uint16(serverPort), + } + buf, err := res.marshal() + if err != nil { + res = &response{reply: generalFailure} + buf, _ = res.marshal() + } + c.clientConn.Write(buf) + + errc := make(chan error, 2) + go func() { + _, err := io.Copy(c.clientConn, srv) + if err != nil { + err = fmt.Errorf("from backend to client: %w", err) + } + errc <- err + }() + go func() { + _, err := io.Copy(srv, c.clientConn) + if err != nil { + err = fmt.Errorf("from client to backend: %w", err) + } + errc <- err + }() + return <-errc +} + +// parseClientGreeting parses a request initiation packet. +func parseClientGreeting(r io.Reader, authMethod byte) error { + var hdr [2]byte + _, err := io.ReadFull(r, hdr[:]) + if err != nil { + return fmt.Errorf("could not read packet header") + } + if hdr[0] != socks5Version { + return fmt.Errorf("incompatible SOCKS version") + } + count := int(hdr[1]) + methods := make([]byte, count) + _, err = io.ReadFull(r, methods) + if err != nil { + return fmt.Errorf("could not read methods") + } + for _, m := range methods { + if m == authMethod { + return nil + } + } + return fmt.Errorf("no acceptable auth methods") +} + +func parseClientAuth(r io.Reader) (usr, pwd string, err error) { + var hdr [2]byte + if _, err := io.ReadFull(r, hdr[:]); err != nil { + return "", "", fmt.Errorf("could not read auth packet header") + } + if hdr[0] != passwordAuthVersion { + return "", "", fmt.Errorf("bad SOCKS auth version") + } + usrLen := int(hdr[1]) + usrBytes := make([]byte, usrLen) + if _, err := io.ReadFull(r, usrBytes); err != nil { + return "", "", fmt.Errorf("could not read auth packet username") + } + var hdrPwd [1]byte + if _, err := io.ReadFull(r, hdrPwd[:]); err != nil { + return "", "", fmt.Errorf("could not read auth packet password length") + } + pwdLen := int(hdrPwd[0]) + pwdBytes := make([]byte, pwdLen) + if _, err := io.ReadFull(r, pwdBytes); err != nil { + return "", "", fmt.Errorf("could not read auth packet password") + } + return string(usrBytes), string(pwdBytes), nil +} + +// request represents data contained within a SOCKS5 +// connection request packet. +type request struct { + command commandType + destination string + port uint16 + destAddrType addrType +} + +// parseClientRequest converts raw packet bytes into a +// SOCKS5Request struct. +func parseClientRequest(r io.Reader) (*request, error) { + var hdr [4]byte + _, err := io.ReadFull(r, hdr[:]) + if err != nil { + return nil, fmt.Errorf("could not read packet header") + } + cmd := hdr[1] + destAddrType := addrType(hdr[3]) + + var destination string + var port uint16 + + if destAddrType == ipv4 { + var ip [4]byte + _, err = io.ReadFull(r, ip[:]) + if err != nil { + return nil, fmt.Errorf("could not read IPv4 address") + } + destination = net.IP(ip[:]).String() + } else if destAddrType == domainName { + var dstSizeByte [1]byte + _, err = io.ReadFull(r, dstSizeByte[:]) + if err != nil { + return nil, fmt.Errorf("could not read domain name size") + } + dstSize := int(dstSizeByte[0]) + domainName := make([]byte, dstSize) + _, err = io.ReadFull(r, domainName) + if err != nil { + return nil, fmt.Errorf("could not read domain name") + } + destination = string(domainName) + } else if destAddrType == ipv6 { + var ip [16]byte + _, err = io.ReadFull(r, ip[:]) + if err != nil { + return nil, fmt.Errorf("could not read IPv6 address") + } + destination = net.IP(ip[:]).String() + } else { + return nil, fmt.Errorf("unsupported address type") + } + var portBytes [2]byte + _, err = io.ReadFull(r, portBytes[:]) + if err != nil { + return nil, fmt.Errorf("could not read port") + } + port = binary.BigEndian.Uint16(portBytes[:]) + + return &request{ + command: commandType(cmd), + destination: destination, + port: port, + destAddrType: destAddrType, + }, nil +} + +// response contains the contents of +// a response packet sent from the proxy +// to the client. +type response struct { + reply replyCode + bindAddrType addrType + bindAddr string + bindPort uint16 +} + +// marshal converts a SOCKS5Response struct into +// a packet. If res.reply == Success, it may throw an error on +// receiving an invalid bind address. Otherwise, it will not throw. +func (res *response) marshal() ([]byte, error) { + pkt := make([]byte, 4) + pkt[0] = socks5Version + pkt[1] = byte(res.reply) + pkt[2] = 0 // null reserved byte + pkt[3] = byte(res.bindAddrType) + + if res.reply != success { + return pkt, nil + } + + var addr []byte + switch res.bindAddrType { + case ipv4: + addr = net.ParseIP(res.bindAddr).To4() + if addr == nil { + return nil, fmt.Errorf("invalid IPv4 address for binding") + } + case domainName: + if len(res.bindAddr) > 255 { + return nil, fmt.Errorf("invalid domain name for binding") + } + addr = make([]byte, 0, len(res.bindAddr)+1) + addr = append(addr, byte(len(res.bindAddr))) + addr = append(addr, []byte(res.bindAddr)...) + case ipv6: + addr = net.ParseIP(res.bindAddr).To16() + if addr == nil { + return nil, fmt.Errorf("invalid IPv6 address for binding") + } + default: + return nil, fmt.Errorf("unsupported address type") + } + + pkt = append(pkt, addr...) + pkt = binary.BigEndian.AppendUint16(pkt, uint16(res.bindPort)) + + return pkt, nil +} diff --git a/endpoints/tcpTunnel/backend.go b/endpoints/tcpTunnel/backend.go index 2dcfb074..b4274cbd 100644 --- a/endpoints/tcpTunnel/backend.go +++ b/endpoints/tcpTunnel/backend.go @@ -24,8 +24,8 @@ type Backend struct { func NewBackend(cfg *BackendConfig) (*Backend, error) { options := ziti.ListenOptions{ - ConnectTimeout: 5 * time.Minute, - MaxConnections: 64, + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, } zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath) if err != nil { diff --git a/endpoints/udpTunnel/backend.go b/endpoints/udpTunnel/backend.go index e38a7769..281cd26c 100644 --- a/endpoints/udpTunnel/backend.go +++ b/endpoints/udpTunnel/backend.go @@ -24,8 +24,8 @@ type Backend struct { func NewBackend(cfg *BackendConfig) (*Backend, error) { options := ziti.ListenOptions{ - ConnectTimeout: 5 * time.Minute, - MaxConnections: 64, + ConnectTimeout: 5 * time.Minute, + WaitForNEstablishedListeners: 1, } zcfg, err := ziti.NewConfigFromFile(cfg.IdentityPath) if err != nil { diff --git a/endpoints/util.go b/endpoints/util.go index 1b009f7e..a1e7238f 100644 --- a/endpoints/util.go +++ b/endpoints/util.go @@ -8,14 +8,19 @@ import ( "strings" ) -func GetRefreshedService(name string, ctx ziti.Context) (*rest_model.ServiceDetail, bool) { - svc, found := ctx.GetService(name) +func GetRefreshedService(svcName string, ctx ziti.Context) (*rest_model.ServiceDetail, bool) { + svc, found := ctx.GetService(svcName) if !found { - if err := ctx.RefreshServices(); err != nil { - logrus.Errorf("error refreshing services: %v", err) + svc, err := ctx.RefreshService(svcName) + if err != nil { + logrus.Errorf("error refreshing service '%v': %v", svcName, err) return nil, false } - return ctx.GetService(name) + if svc == nil { + logrus.Errorf("service '%v' not found", svcName) + return nil, false + } + return svc, true } return svc, found } diff --git a/etc/caddy/multiple_upstream.Caddyfile b/etc/caddy/multiple_upstream.Caddyfile index 6aedb30d..99f58b68 100644 --- a/etc/caddy/multiple_upstream.Caddyfile +++ b/etc/caddy/multiple_upstream.Caddyfile @@ -1,5 +1,13 @@ # Multiple Backends Example # + +# global config must be first +{ + # no listen on 2019/tcp with admin API + admin off +} + +# zrok site block http:// { # Bind to the zrok share bind {{ .ZrokBindAddress }} diff --git a/etc/caddy/simple_reverse_proxy.Caddyfile b/etc/caddy/simple_reverse_proxy.Caddyfile index d6bbc751..c6aebaed 100644 --- a/etc/caddy/simple_reverse_proxy.Caddyfile +++ b/etc/caddy/simple_reverse_proxy.Caddyfile @@ -1,3 +1,10 @@ +# global config must be first +{ + # no listen on 2019/tcp with admin API + admin off +} + +# zrok site block http:// { bind {{ .ZrokBindAddress }} reverse_proxy 127.0.0.1:3000 diff --git a/etc/ctrl.yml b/etc/ctrl.yml index b3207ad8..e2c06c5d 100644 --- a/etc/ctrl.yml +++ b/etc/ctrl.yml @@ -181,6 +181,12 @@ store: path: zrok.db type: sqlite3 +# The `tls` section sets the cert and key to use and enables serving over HTTPS +# +#tls: +# cert_path: "/Path/To/Cert/zrok.crt" +# key_path: "/Path/To/Cert/zrok.key" + # Ziti configuration. # ziti: diff --git a/etc/frontend.yml b/etc/frontend.yml index 13c78bad..ad90a9ff 100644 --- a/etc/frontend.yml +++ b/etc/frontend.yml @@ -41,4 +41,10 @@ v: 3 # client_secret: # - name: github # client_id: -# client_secret: \ No newline at end of file +# client_secret: +# +# The `tls` section sets the cert and key to use and enables serving over HTTPS +# +#tls: +# cert_path: "/Path/To/Cert/zrok.crt" +# key_path: "/Path/To/Cert/zrok.key" \ No newline at end of file diff --git a/go.mod b/go.mod index ab9fdf36..e0729ece 100644 --- a/go.mod +++ b/go.mod @@ -1,59 +1,60 @@ module github.com/openziti/zrok -go 1.20 +go 1.21 require ( - github.com/caddyserver/caddy/v2 v2.7.5-0.20230829153420-ed8bb13c5df7 + github.com/TwiN/go-away v1.6.12 + github.com/caddyserver/caddy/v2 v2.7.6 github.com/charmbracelet/bubbles v0.14.0 github.com/charmbracelet/bubbletea v0.23.1 github.com/charmbracelet/lipgloss v0.6.0 - github.com/go-openapi/errors v0.20.4 - github.com/go-openapi/loads v0.21.2 - github.com/go-openapi/runtime v0.26.0 - github.com/go-openapi/spec v0.20.9 - github.com/go-openapi/strfmt v0.21.7 - github.com/go-openapi/swag v0.22.4 - github.com/go-openapi/validate v0.22.1 - github.com/golang-jwt/jwt/v5 v5.0.0 - github.com/google/uuid v1.3.0 - github.com/gorilla/websocket v1.5.0 + github.com/go-openapi/errors v0.21.0 + github.com/go-openapi/loads v0.21.5 + github.com/go-openapi/runtime v0.27.1 + github.com/go-openapi/spec v0.20.14 + github.com/go-openapi/strfmt v0.22.0 + github.com/go-openapi/swag v0.22.9 + github.com/go-openapi/validate v0.22.6 + github.com/golang-jwt/jwt/v5 v5.2.0 + github.com/google/uuid v1.6.0 + github.com/gorilla/websocket v1.5.1 github.com/iancoleman/strcase v0.2.0 github.com/influxdata/influxdb-client-go/v2 v2.11.0 github.com/jaevor/go-nanoid v1.3.0 github.com/jedib0t/go-pretty/v6 v6.4.3 github.com/jessevdk/go-flags v1.5.0 github.com/jmoiron/sqlx v1.3.5 - github.com/lib/pq v1.10.2 - github.com/mattn/go-sqlite3 v1.14.15 + github.com/lib/pq v1.10.9 + github.com/mattn/go-sqlite3 v1.14.16 github.com/michaelquigley/cf v0.0.13 github.com/michaelquigley/pfxlog v0.6.10 github.com/muesli/reflow v0.3.0 github.com/nxadm/tail v1.4.8 - github.com/openziti/channel/v2 v2.0.91 - github.com/openziti/edge-api v0.25.33 + github.com/openziti/channel/v2 v2.0.119 + github.com/openziti/edge-api v0.26.10 github.com/openziti/fabric v0.23.26 - github.com/openziti/identity v1.0.60 - github.com/openziti/sdk-golang v0.20.92 - github.com/openziti/transport/v2 v2.0.99 + github.com/openziti/identity v1.0.70 + github.com/openziti/sdk-golang v0.22.28 + github.com/openziti/transport/v2 v2.0.122 github.com/pkg/errors v0.9.1 github.com/rabbitmq/amqp091-go v1.8.1 - github.com/rubenv/sql-migrate v1.1.2 - github.com/shirou/gopsutil/v3 v3.23.7 + github.com/rubenv/sql-migrate v1.6.0 + github.com/shirou/gopsutil/v3 v3.24.1 github.com/sirupsen/logrus v1.9.3 - github.com/spf13/cobra v1.7.0 + github.com/spf13/cobra v1.8.0 github.com/stretchr/testify v1.8.4 github.com/wneessen/go-mail v0.2.7 - github.com/zitadel/oidc/v2 v2.7.0 + github.com/zitadel/oidc/v2 v2.12.0 go.uber.org/zap v1.25.0 - golang.org/x/crypto v0.12.0 - golang.org/x/net v0.14.0 - golang.org/x/oauth2 v0.10.0 + golang.org/x/crypto v0.18.0 + golang.org/x/net v0.20.0 + golang.org/x/oauth2 v0.16.0 golang.org/x/time v0.3.0 - nhooyr.io/websocket v1.8.7 + nhooyr.io/websocket v1.8.10 ) require ( - cloud.google.com/go/compute v1.20.1 // indirect + cloud.google.com/go/compute v1.23.0 // indirect cloud.google.com/go/compute/metadata v0.2.3 // indirect filippo.io/edwards25519 v1.0.0 // indirect github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 // indirect @@ -63,7 +64,7 @@ require ( github.com/Masterminds/semver/v3 v3.2.0 // indirect github.com/Masterminds/sprig/v3 v3.2.3 // indirect github.com/Microsoft/go-winio v0.6.0 // indirect - github.com/alecthomas/chroma/v2 v2.7.0 // indirect + github.com/alecthomas/chroma/v2 v2.9.1 // indirect github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211106181442-e4c1a74c66bd // indirect github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df // indirect github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b // indirect @@ -72,53 +73,54 @@ require ( github.com/aymanbagabas/go-osc52 v1.0.3 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/biogo/store v0.0.0-20200525035639-8c94ae1e7c9c // indirect - github.com/caddyserver/certmagic v0.19.2 // indirect + github.com/caddyserver/certmagic v0.20.0 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cespare/xxhash v1.1.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/chzyer/readline v1.5.1 // indirect github.com/containerd/console v1.0.3 // indirect - github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect + github.com/cpuguy83/go-md2man/v2 v2.0.3 // indirect github.com/davecgh/go-spew v1.1.1 // indirect github.com/deepmap/oapi-codegen v1.8.2 // indirect github.com/dgraph-io/badger v1.6.2 // indirect github.com/dgraph-io/badger/v2 v2.2007.4 // indirect github.com/dgraph-io/ristretto v0.1.0 // indirect github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 // indirect - github.com/dlclark/regexp2 v1.7.0 // indirect + github.com/dlclark/regexp2 v1.10.0 // indirect github.com/docker/go-units v0.5.0 // indirect github.com/dustin/go-humanize v1.0.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect - github.com/fsnotify/fsnotify v1.6.0 // indirect + github.com/fsnotify/fsnotify v1.7.0 // indirect github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa // indirect - github.com/fxamacker/cbor/v2 v2.4.0 // indirect - github.com/go-chi/chi v4.1.2+incompatible // indirect - github.com/go-gorp/gorp/v3 v3.0.2 // indirect + github.com/fxamacker/cbor/v2 v2.5.0 // indirect + github.com/go-chi/chi/v5 v5.0.10 // indirect + github.com/go-gorp/gorp/v3 v3.1.0 // indirect github.com/go-kit/kit v0.10.0 // indirect - github.com/go-logfmt/logfmt v0.5.1 // indirect - github.com/go-logr/logr v1.2.4 // indirect + github.com/go-logfmt/logfmt v0.6.0 // indirect + github.com/go-logr/logr v1.4.1 // indirect github.com/go-logr/stdr v1.2.2 // indirect github.com/go-ole/go-ole v1.2.6 // indirect - github.com/go-openapi/analysis v0.21.4 // indirect - github.com/go-openapi/jsonpointer v0.20.0 // indirect - github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-sql-driver/mysql v1.7.0 // indirect + github.com/go-openapi/analysis v0.22.2 // indirect + github.com/go-openapi/jsonpointer v0.20.2 // indirect + github.com/go-openapi/jsonreference v0.20.4 // indirect + github.com/go-resty/resty/v2 v2.11.0 // indirect + github.com/go-sql-driver/mysql v1.7.1 // indirect github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 // indirect - github.com/golang/glog v1.1.0 // indirect - github.com/golang/mock v1.6.0 // indirect + github.com/gobwas/glob v0.2.3 // indirect + github.com/golang/glog v1.1.2 // indirect github.com/golang/protobuf v1.5.3 // indirect github.com/golang/snappy v0.0.4 // indirect github.com/google/cel-go v0.15.1 // indirect - github.com/google/certificate-transparency-go v1.1.4 // indirect - github.com/google/go-tpm v0.3.3 // indirect + github.com/google/certificate-transparency-go v1.1.6 // indirect + github.com/google/go-tpm v0.9.0 // indirect github.com/google/go-tspi v0.3.0 // indirect github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 // indirect - github.com/gorilla/mux v1.8.0 // indirect + github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/schema v1.2.0 // indirect github.com/gorilla/securecookie v1.1.1 // indirect - github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect - github.com/huandu/xstrings v1.3.3 // indirect - github.com/imdario/mergo v0.3.12 // indirect + github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 // indirect + github.com/huandu/xstrings v1.4.0 // indirect + github.com/imdario/mergo v0.3.13 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/influxdata/line-protocol v0.0.0-20200327222509-2487e7298839 // indirect github.com/jackc/chunkreader/v2 v2.0.1 // indirect @@ -131,7 +133,7 @@ require ( github.com/jackc/pgx/v4 v4.18.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/kataras/go-events v0.0.3 // indirect - github.com/klauspost/compress v1.16.7 // indirect + github.com/klauspost/compress v1.17.0 // indirect github.com/klauspost/cpuid/v2 v2.2.5 // indirect github.com/libdns/libdns v0.2.1 // indirect github.com/lucasb-eyer/go-colorful v1.2.0 // indirect @@ -140,10 +142,10 @@ require ( github.com/manifoldco/promptui v0.9.0 // indirect github.com/mastercactapus/proxyprotocol v0.0.4 // indirect github.com/mattn/go-colorable v0.1.13 // indirect - github.com/mattn/go-isatty v0.0.18 // indirect + github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.14 // indirect - github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect + github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/mholt/acmez v1.2.0 // indirect github.com/micromdm/scep/v2 v2.1.0 // indirect @@ -160,84 +162,82 @@ require ( github.com/oklog/ulid v1.3.1 // indirect github.com/onsi/ginkgo/v2 v2.9.5 // indirect github.com/opentracing/opentracing-go v1.2.0 // indirect - github.com/openziti/foundation/v2 v2.0.29 // indirect - github.com/openziti/metrics v1.2.31 // indirect - github.com/openziti/secretstream v0.1.10 // indirect + github.com/openziti/foundation/v2 v2.0.37 // indirect + github.com/openziti/metrics v1.2.45 // indirect + github.com/openziti/secretstream v0.1.16 // indirect github.com/openziti/storage v0.2.6 // indirect github.com/orcaman/concurrent-map/v2 v2.0.1 // indirect github.com/parallaxsecond/parsec-client-go v0.0.0-20221025095442-f0a77d263cf9 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b // indirect - github.com/prometheus/client_golang v1.14.0 // indirect - github.com/prometheus/client_model v0.3.0 // indirect - github.com/prometheus/common v0.37.0 // indirect - github.com/prometheus/procfs v0.8.0 // indirect + github.com/prometheus/client_golang v1.15.1 // indirect + github.com/prometheus/client_model v0.4.0 // indirect + github.com/prometheus/common v0.42.0 // indirect + github.com/prometheus/procfs v0.9.0 // indirect github.com/quic-go/qpack v0.4.0 // indirect - github.com/quic-go/qtls-go1-20 v0.3.2 // indirect - github.com/quic-go/quic-go v0.38.0 // indirect + github.com/quic-go/qtls-go1-20 v0.4.1 // indirect + github.com/quic-go/quic-go v0.40.0 // indirect github.com/rcrowley/go-metrics v0.0.0-20201227073835-cf1acfcdf475 // indirect github.com/rivo/uniseg v0.4.4 // indirect github.com/rs/xid v1.5.0 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect github.com/shoenig/go-m1cpu v0.1.6 // indirect - github.com/shopspring/decimal v1.2.0 // indirect + github.com/shopspring/decimal v1.3.1 // indirect github.com/shurcooL/sanitized_anchor_name v1.0.0 // indirect github.com/slackhq/nebula v1.6.1 // indirect - github.com/smallstep/certificates v0.24.3-rc.5 // indirect - github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738 // indirect + github.com/smallstep/certificates v0.25.0 // indirect + github.com/smallstep/go-attestation v0.4.4-0.20230627102604-cf579e53cbd2 // indirect github.com/smallstep/nosql v0.6.0 // indirect github.com/smallstep/truststore v0.12.1 // indirect github.com/speps/go-hashids v2.0.0+incompatible // indirect - github.com/spf13/cast v1.4.1 // indirect + github.com/spf13/cast v1.5.0 // indirect github.com/spf13/pflag v1.0.5 // indirect github.com/stoewer/go-strcase v1.2.0 // indirect - github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad // indirect - github.com/tklauser/go-sysconf v0.3.11 // indirect - github.com/tklauser/numcpus v0.6.0 // indirect + github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046 // indirect + github.com/tklauser/go-sysconf v0.3.12 // indirect + github.com/tklauser/numcpus v0.6.1 // indirect github.com/urfave/cli v1.22.14 // indirect github.com/x448/float16 v0.8.4 // indirect - github.com/yuin/goldmark v1.5.5 // indirect + github.com/yuin/goldmark v1.5.6 // indirect github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc // indirect github.com/yusufpapurcu/wmi v1.2.3 // indirect github.com/zeebo/blake3 v0.2.3 // indirect go.etcd.io/bbolt v1.3.7 // indirect - go.mongodb.org/mongo-driver v1.12.1 // indirect + go.mongodb.org/mongo-driver v1.13.1 // indirect go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 // indirect - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 // indirect go.opentelemetry.io/contrib/propagators/autoprop v0.42.0 // indirect go.opentelemetry.io/contrib/propagators/aws v1.17.0 // indirect go.opentelemetry.io/contrib/propagators/b3 v1.17.0 // indirect go.opentelemetry.io/contrib/propagators/jaeger v1.17.0 // indirect go.opentelemetry.io/contrib/propagators/ot v1.17.0 // indirect - go.opentelemetry.io/otel v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 // indirect - go.opentelemetry.io/otel/metric v1.16.0 // indirect - go.opentelemetry.io/otel/sdk v1.16.0 // indirect - go.opentelemetry.io/otel/trace v1.16.0 // indirect - go.opentelemetry.io/proto/otlp v0.19.0 // indirect + go.opentelemetry.io/otel v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 // indirect + go.opentelemetry.io/otel/metric v1.21.0 // indirect + go.opentelemetry.io/otel/sdk v1.21.0 // indirect + go.opentelemetry.io/otel/trace v1.21.0 // indirect + go.opentelemetry.io/proto/otlp v1.0.0 // indirect go.step.sm/cli-utils v0.8.0 // indirect - go.step.sm/crypto v0.33.0 // indirect - go.step.sm/linkedca v0.20.0 // indirect + go.step.sm/crypto v0.35.1 // indirect + go.step.sm/linkedca v0.20.1 // indirect + go.uber.org/mock v0.3.0 // indirect go.uber.org/multierr v1.11.0 // indirect - golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 // indirect - golang.org/x/mod v0.11.0 // indirect - golang.org/x/sync v0.3.0 // indirect - golang.org/x/sys v0.11.0 // indirect - golang.org/x/term v0.11.0 // indirect - golang.org/x/text v0.12.0 // indirect - golang.org/x/tools v0.10.0 // indirect + golang.org/x/exp v0.0.0-20230905200255-921286631fa9 // indirect + golang.org/x/mod v0.12.0 // indirect + golang.org/x/sync v0.5.0 // indirect + golang.org/x/sys v0.17.0 // indirect + golang.org/x/term v0.17.0 // indirect + golang.org/x/text v0.14.0 // indirect + golang.org/x/tools v0.13.0 // indirect google.golang.org/appengine v1.6.7 // indirect - google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect - google.golang.org/grpc v1.56.2 // indirect - google.golang.org/protobuf v1.31.0 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect + google.golang.org/grpc v1.59.0 // indirect + google.golang.org/protobuf v1.32.0 // indirect gopkg.in/natefinch/lumberjack.v2 v2.2.1 // indirect gopkg.in/square/go-jose.v2 v2.6.0 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect howett.net/plist v1.0.0 // indirect ) diff --git a/go.sum b/go.sum index c1454c49..f005d219 100644 --- a/go.sum +++ b/go.sum @@ -1,12 +1,8 @@ -bazil.org/fuse v0.0.0-20180421153158-65cc252bf669/go.mod h1:Xbm+BRKSBEpa4q4hTSxohYNQpsxXPbPry4JJWOB3LB8= -bitbucket.org/creachadair/shell v0.0.6/go.mod h1:8Qqi/cYk7vPnsOePHroKXDJYmb5x7ENhtiFtfZq8K+M= cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.34.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= cloud.google.com/go v0.38.0/go.mod h1:990N+gfupTy94rShfmMCWGDn0LpTmnzTp2qbd1dvSRU= -cloud.google.com/go v0.39.0/go.mod h1:rVLT6fkc8chs9sfPtFc1SBH6em7n+ZoXaG+87tDISts= cloud.google.com/go v0.44.1/go.mod h1:iSa0KzasP4Uvy3f1mN/7PiObzGgflwredwwASm/v6AU= cloud.google.com/go v0.44.2/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= -cloud.google.com/go v0.44.3/go.mod h1:60680Gw3Yr4ikxnPRS/oxxkBccT6SA1yMk63TGekxKY= cloud.google.com/go v0.45.1/go.mod h1:RpBamKRgapWJb87xiFSdk4g1CME7QZg3uwTez+TSTjc= cloud.google.com/go v0.46.3/go.mod h1:a6bKKbmY7er1mI7TEI4lsAkts/mkhTSZK8w33B4RAg0= cloud.google.com/go v0.50.0/go.mod h1:r9sluTvynVuxRIOHXQEHMFffphuXHOMZMycpNR5e6To= @@ -15,166 +11,108 @@ cloud.google.com/go v0.53.0/go.mod h1:fp/UouUEsRkN6ryDKNW/Upv/JBKnv6WDthjR6+vze6 cloud.google.com/go v0.54.0/go.mod h1:1rq2OEkV3YMf6n/9ZvGWI3GWw0VoqH/1x2nd8Is/bPc= cloud.google.com/go v0.56.0/go.mod h1:jr7tqZxxKOVYizybht9+26Z/gUq7tiRzu+ACVAMbKVk= cloud.google.com/go v0.57.0/go.mod h1:oXiQ6Rzq3RAkkY7N6t3TcE6jE+CIBBbA36lwQ1JyzZs= -cloud.google.com/go v0.60.0/go.mod h1:yw2G51M9IfRboUH61Us8GqCeF1PzPblB823Mn2q2eAU= cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOYc= cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY= cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= -cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg= cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8= cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0= -cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY= -cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM= -cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY= -cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ= -cloud.google.com/go v0.92.2/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.92.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI= -cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk= +cloud.google.com/go v0.110.7 h1:rJyC7nWRg2jWGZ4wSJ5nY65GTdYJkg0cd/uXb+ACI6o= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE= cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc= cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg= cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc= cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ= -cloud.google.com/go/compute v1.20.1 h1:6aKEtlUiwEpJzM001l0yFkpXmUVXaN8W+fbkb2AZNbg= -cloud.google.com/go/compute v1.20.1/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= +cloud.google.com/go/compute v1.23.0 h1:tP41Zoavr8ptEqaW6j+LQOnyBBhO7OkOMAGrgLopTwY= +cloud.google.com/go/compute v1.23.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM= cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY= cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA= cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE= cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk= cloud.google.com/go/firestore v1.1.0/go.mod h1:ulACoGHTpvq5r8rxGJ4ddJZBZqakUQqClKRT5SZwBmk= -cloud.google.com/go/iam v1.1.0 h1:67gSqaPukx7O8WLLHMa0PNs3EBGd2eE4d+psbO/CO94= -cloud.google.com/go/kms v1.15.0 h1:xYl5WEaSekKYN5gGRyhjvZKM22GVBBCzegGNVPy+aIs= -cloud.google.com/go/monitoring v0.1.0/go.mod h1:Hpm3XfzJv+UTiXzCG5Ffp0wijzHTC7Cv4eR7o3x/fEE= +cloud.google.com/go/iam v1.1.2 h1:gacbrBdWcoVmGLozRuStX45YKvJtzIjJdAolzUs1sm4= +cloud.google.com/go/iam v1.1.2/go.mod h1:A5avdyVL2tCppe4unb0951eI9jreack+RJ0/d+KUZOU= +cloud.google.com/go/kms v1.15.2 h1:lh6qra6oC4AyWe5fUUUBe/S27k12OHAleOOOw6KakdE= +cloud.google.com/go/kms v1.15.2/go.mod h1:3hopT4+7ooWRCjc2DxgnpESFxhIraaI2IpAVUEhbT/w= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.5.0/go.mod h1:ZEwJccE3z93Z2HWvstpri00jOg7oO4UZDtKhwDwqF0w= -cloud.google.com/go/spanner v1.7.0/go.mod h1:sd3K2gZ9Fd0vMPLXzeCrF6fq4i63Q7aTLW/lBIfBkIk= -cloud.google.com/go/spanner v1.17.0/go.mod h1:+17t2ixFwRG4lWRwE+5kipDR9Ef07Jkmc8z0IbMDKUs= -cloud.google.com/go/spanner v1.18.0/go.mod h1:LvAjUXPeJRGNuGpikMULjhLj/t9cRvdc+fxRoLiugXA= -cloud.google.com/go/spanner v1.25.0/go.mod h1:kQUft3x355hzzaeFbObjsvkzZDgpDkesp3v75WBnI8w= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= -cloud.google.com/go/trace v0.1.0/go.mod h1:wxEwsoeRVPbeSkt7ZC9nWCgmoKQRAoySN7XHW2AmI7g= -code.gitea.io/sdk/gitea v0.11.3/go.mod h1:z3uwDV/b9Ls47NGukYM9XhnHtqPh/J+t40lsUrR6JDY= -contrib.go.opencensus.io/exporter/aws v0.0.0-20181029163544-2befc13012d0/go.mod h1:uu1P0UCM/6RbsMrgPa98ll8ZcHM858i/AD06a9aLRCA= -contrib.go.opencensus.io/exporter/ocagent v0.5.0/go.mod h1:ImxhfLRpxoYiSq891pBrLVhN+qmP8BTVvdH2YLs7Gl0= -contrib.go.opencensus.io/exporter/stackdriver v0.12.1/go.mod h1:iwB6wGarfphGGe/e5CWqyUk/cLzKnWsOKPVW3no6OTw= -contrib.go.opencensus.io/exporter/stackdriver v0.13.4/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -contrib.go.opencensus.io/exporter/stackdriver v0.13.5/go.mod h1:aXENhDJ1Y4lIg4EUaVTwzvYETVNZk10Pu26tevFKLUc= -contrib.go.opencensus.io/exporter/stackdriver v0.13.8/go.mod h1:huNtlWx75MwO7qMs0KrMxPZXzNNWebav1Sq/pm02JdQ= -contrib.go.opencensus.io/integrations/ocsql v0.1.4/go.mod h1:8DsSdjz3F+APR+0z0WkU1aRorQCFfRxvqjUUPMbF3fE= -contrib.go.opencensus.io/resource v0.1.1/go.mod h1:F361eGI91LCmW1I/Saf+rX0+OFcigGlFvXwEGEnkRLA= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= filippo.io/edwards25519 v1.0.0 h1:0wAIcmJUqRdI8IJ/3eGi5/HwXZWPujYXXlkrQogz0Ek= filippo.io/edwards25519 v1.0.0/go.mod h1:N1IkdkCkiLB6tki+MYJoSx2JTY9NUlxZE7eHn5EwJns= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96 h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M= github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= -github.com/Azure/azure-amqp-common-go/v2 v2.1.0/go.mod h1:R8rea+gJRuJR6QxTir/XuEd+YuKoUiazDC/N96FiDEU= -github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= -github.com/Azure/azure-sdk-for-go v29.0.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-sdk-for-go v30.1.0+incompatible/go.mod h1:9XXNKU+eRnpl9moKnB4QOLf1HestfXbmab5FXxiDBjc= -github.com/Azure/azure-service-bus-go v0.9.1/go.mod h1:yzBx6/BUGfjfeqbRZny9AQIbIe3AcV9WZbAdpkoXOa0= -github.com/Azure/azure-storage-blob-go v0.8.0/go.mod h1:lPI3aLPpuLTeUwh1sViKXFxwl2B6teiRqI0deQUvsw0= -github.com/Azure/go-autorest v12.0.0+incompatible/go.mod h1:r+4oMnoxhatjLLJ6zxSWATqVooLgysK6ZNox3g/xq24= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8= github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ= github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo= -github.com/GoogleCloudPlatform/cloudsql-proxy v0.0.0-20191009163259-e802c2cb94ae/go.mod h1:mjwGPas4yKduTyubHvD1Atl9r1rUq8DfVy+gkVvZ+oo= github.com/Jeffail/gabs v1.4.0 h1://5fYRRTq1edjfIrQGvdkcd22pkYUrHZ5YC/H2GJVAo= github.com/Jeffail/gabs v1.4.0/go.mod h1:6xMvQMK4k33lb7GUUpaAPh6nKMmemQeg5d4gn7/bOXc= github.com/Knetic/govaluate v3.0.1-0.20171022003610-9aa49832a739+incompatible/go.mod h1:r7JcOSlj0wfOMncg0iLm8Leh48TZaKVeNIfJntJ2wa0= -github.com/Masterminds/goutils v1.1.0/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= github.com/Masterminds/goutils v1.1.1 h1:5nUrii3FMTL5diU80unEVvNevw1nH4+ZV4DSLVJLSYI= github.com/Masterminds/goutils v1.1.1/go.mod h1:8cTjp+g8YejhMuvIA5y2vz3BpJxksy863GQaJW2MFNU= -github.com/Masterminds/semver v1.4.2/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y= -github.com/Masterminds/semver/v3 v3.0.3/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= -github.com/Masterminds/semver/v3 v3.1.0/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Masterminds/semver/v3 v3.2.0 h1:3MEsd0SM6jqZojhjLWWeBY+Kcjy9i6MQAeY7YgDP83g= github.com/Masterminds/semver/v3 v3.2.0/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= -github.com/Masterminds/sprig v2.15.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= -github.com/Masterminds/sprig v2.22.0+incompatible/go.mod h1:y6hNFY5UBTIWBxnzTeuNhlNS5hqE0NB0E6fgfo2Br3o= github.com/Masterminds/sprig/v3 v3.2.3 h1:eL2fZNezLomi0uOLqjQoN6BfsDD+fyLtgbJMAj9n6YA= github.com/Masterminds/sprig/v3 v3.2.3/go.mod h1:rXcFaZ2zZbLRJv/xSysmlgIM1u11eBaRMhvYXJNkGuM= github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg= github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE= github.com/OneOfOne/xxhash v1.2.2 h1:KMrpdQIwFcEqXDklaen+P1axHaj9BSKzvpUUfnHldSE= github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU= -github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0= -github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE= github.com/Shopify/sarama v1.19.0/go.mod h1:FVkBWblsNy7DGZRfXLU0O9RCGt5g3g3yEuWXgklEdEo= github.com/Shopify/toxiproxy v2.1.4+incompatible/go.mod h1:OXgGpZ6Cli1/URJOF1DMxUHB2q5Ap20/P/eIdh4G0pI= +github.com/TwiN/go-away v1.6.12 h1:80AjDyeTjfQaSFYbALzRcDKMAmxKW0a5PoxwXKZlW2A= +github.com/TwiN/go-away v1.6.12/go.mod h1:MpvIC9Li3minq+CGgbgUDvQ9tDaeW35k5IXZrF9MVas= github.com/VividCortex/gohistogram v1.0.0/go.mod h1:Pf5mBqqDxYaXu3hDrrU+w6nw50o/4+TcAqDqk/vUH7g= github.com/afex/hystrix-go v0.0.0-20180502004556-fa1af6a1f4f5/go.mod h1:SkGFH1ia65gfNATL8TAiHDNxPzPdmEL5uirI2Uyuz6c= -github.com/alcortesm/tgz v0.0.0-20161220082320-9c5fe88206d7/go.mod h1:6zEj6s6u/ghQa61ZWa/C2Aw3RkjiTBOix7dkqa1VLIs= github.com/alecthomas/assert/v2 v2.2.1 h1:XivOgYcduV98QCahG8T5XTezV5bylXe+lBxLG2K2ink= +github.com/alecthomas/assert/v2 v2.2.1/go.mod h1:pXcQ2Asjp247dahGEmsZ6ru0UVwnkhktn7S0bBDLxvQ= github.com/alecthomas/chroma/v2 v2.2.0/go.mod h1:vf4zrexSH54oEjJ7EdB65tGNHmH3pGZmVkgTP5RHvAs= -github.com/alecthomas/chroma/v2 v2.7.0 h1:hm1rY6c/Ob4eGclpQ7X/A3yhqBOZNUTk9q+yhyLIViI= -github.com/alecthomas/chroma/v2 v2.7.0/go.mod h1:yrkMI9807G1ROx13fhe1v6PN2DDeaR73L3d+1nmYQtw= -github.com/alecthomas/kingpin v2.2.6+incompatible/go.mod h1:59OFYbFVLKQKq+mqrL6Rw5bR0c3ACQaawgXx0QYndlE= +github.com/alecthomas/chroma/v2 v2.9.1 h1:0O3lTQh9FxazJ4BYE/MOi/vDGuHn7B+6Bu902N2UZvU= +github.com/alecthomas/chroma/v2 v2.9.1/go.mod h1:4TQu7gdfuPjSh76j78ietmqh9LiurGF0EpseFXdKMBw= github.com/alecthomas/repr v0.0.0-20220113201626-b1b626ac65ae/go.mod h1:2kn6fqh/zIyPLmm3ugklbEi5hg5wS435eygvNfaDQL8= github.com/alecthomas/repr v0.2.0 h1:HAzS41CIzNW5syS8Mf9UwXhNH1J9aix/BvDRf1Ml2Yk= +github.com/alecthomas/repr v0.2.0/go.mod h1:Fr0507jx4eOXV7AlPV6AVZLYrLIuIeSOWtW57eE/O/4= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= -github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho= -github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/antihax/optional v0.0.0-20180407024304-ca021399b1a6/go.mod h1:V8iCPQYkqmusNa815XgQio277wI47sdRh1dUOLdyC6Q= github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211106181442-e4c1a74c66bd h1:fjJY1LimH0wVCvOHLX35SCX/MbWomAglET1H2kvz7xc= github.com/antlr/antlr4/runtime/Go/antlr v0.0.0-20211106181442-e4c1a74c66bd/go.mod h1:F7bn7fEU90QkQ3tnmaTx3LTKLEDqnwWODIYppRQ5hnY= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df h1:7RFfzj4SSt6nnvCPbCqijJi1nWCd+TqAT3bYCStRC18= github.com/antlr/antlr4/runtime/Go/antlr/v4 v4.0.0-20230305170008-8188dc5388df/go.mod h1:pSwJ0fSY5KhvocuWSx4fz3BA8OrA1bQn+K1Eli3BRwM= -github.com/aokoli/goutils v1.0.1/go.mod h1:SijmP0QR8LtwsmDs8Yii5Z/S4trXFGFC2oO5g9DP+DQ= -github.com/apache/beam v2.28.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= -github.com/apache/beam v2.32.0+incompatible/go.mod h1:/8NX3Qi8vGstDLLaeaU7+lzVEu/ACaQhYjeefzQ0y1o= github.com/apache/thrift v0.12.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= github.com/apache/thrift v0.13.0/go.mod h1:cp2SuWMxlEZw2r+iP2GNCdIi4C1qmUzdZFSVb+bacwQ= -github.com/apex/log v1.1.4/go.mod h1:AlpoD9aScyQfJDVHmLMEcx4oU6LqzkWp4Mg9GdAcEvQ= -github.com/apex/logs v0.0.4/go.mod h1:XzxuLZ5myVHDy9SAmYpamKKRNApGj54PfYLcFrXqDwo= -github.com/aphistic/golf v0.0.0-20180712155816-02c07f170c5a/go.mod h1:3NqKYiepwy8kCu4PNA+aP7WUV72eXWJeP9/r3/K9aLE= -github.com/aphistic/sweet v0.2.0/go.mod h1:fWDlIh/isSE9n6EPsRmC0det+whmX6dJid3stzu0Xys= github.com/armon/circbuf v0.0.0-20150827004946-bbbad097214e/go.mod h1:3U/XgcO3hCbHZ8TKRvWD2dDTCfh9M9ya+I9JpbB7O8o= github.com/armon/consul-api v0.0.0-20180202201655-eb2c6b5be1b6/go.mod h1:grANhF5doyWs3UAsr3K4I6qtAmlQcZDesFNEHPZAzj8= github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da/go.mod h1:Q73ZrmVTwzkszR9V5SSuryQ31EELlFMUz1kKyl939pY= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= -github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/aryann/difflib v0.0.0-20170710044230-e206f873d14a/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b h1:uUXgbcPDK3KpW29o4iy7GtuappbWT0l5NaMo9H9pJDw= github.com/aryann/difflib v0.0.0-20210328193216-ff5ff6dc229b/go.mod h1:DAHtR1m6lCRdSC2Tm3DSWRPvIPr6xNKyeHdqDQSQT+A= -github.com/asaskevich/govalidator v0.0.0-20200907205600-7a23bdc65eef/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 h1:DklsrG3dyBCFEj5IhUbnKptjxatkF07cF2ak3yi77so= github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2/go.mod h1:WaHUgvxTVq04UNunO+XhnAqY/wQc+bxr74GqbsZ/Jqw= github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4= github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI= github.com/aws/aws-lambda-go v1.13.3/go.mod h1:4UKl9IzQMoD+QF79YdCuzCwp8VbmG4VAQwij/eHl5CU= -github.com/aws/aws-sdk-go v1.15.27/go.mod h1:mFuSZ37Z9YOHbQEwBWztmVzqXrEkub65tZoCYDt7FT0= -github.com/aws/aws-sdk-go v1.19.18/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.19.45/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.20.6/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.23.20/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.11/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.25.37/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= github.com/aws/aws-sdk-go v1.27.0/go.mod h1:KmX6BPdI08NWTb3/sm4ZGu5ShLoqVDhKgpiN924inxo= -github.com/aws/aws-sdk-go v1.37.0/go.mod h1:hcU610XS61/+aQV88ixoOzUoG7v3b31pl2zKMmprdro= -github.com/aws/aws-sdk-go v1.44.307 h1:2R0/EPgpZcFSUwZhYImq/srjaOrOfLv5MNRzrFyAM38= +github.com/aws/aws-sdk-go v1.45.12 h1:+bKbbesGNPp+TeGrcqfrWuZoqcIEhjwKyBMHQPp80Jo= +github.com/aws/aws-sdk-go v1.45.12/go.mod h1:aVsgQcEevwlmQ7qHE9I3h+dtQgpqhFB+i8Phjh7fkwI= github.com/aws/aws-sdk-go-v2 v0.18.0/go.mod h1:JWVYvqSMppoMJC0x5wdwiImzgXTI9FuZwxzkQq9wy+g= -github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I= github.com/aymanbagabas/go-osc52 v1.0.3 h1:DTwqENW7X9arYimJrPeGZcV0ln14sGMt3pHZspWD+Mg= github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4= -github.com/benbjohnson/clock v1.0.3/go.mod h1:bGMdMPoPVvcYyt1gHDf4J2KE153Yf9BuiUKYMaxlTDM= github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A= +github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= @@ -182,30 +120,20 @@ github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6r github.com/bgentry/speakeasy v0.1.0/go.mod h1:+zsyZBPWlz7T6j88CTgSN5bM796AkVf0kBD4zp0CCIs= github.com/biogo/store v0.0.0-20200525035639-8c94ae1e7c9c h1:qq7IeRYvCmew9ThhcfslD5XD25P/UsBxsTozk6ywF+A= github.com/biogo/store v0.0.0-20200525035639-8c94ae1e7c9c/go.mod h1:wdbXg77soR6ESRprAMEwAQDFtLT6EAGF5o1GRy0cB5k= -github.com/bketelsen/crypt v0.0.3-0.20200106085610-5cbc8cc4026c/go.mod h1:MKsuJmJgSg28kpZDP6UIiPt0e0Oz0kqKNGyRaWEPv84= github.com/bketelsen/crypt v0.0.4/go.mod h1:aI6NrJ0pMGgvZKL1iVgXLnfIFJtfV+bKCoqOes/6LfM= -github.com/blakesmith/ar v0.0.0-20190502131153-809d4375e1fb/go.mod h1:PkYb9DJNAwrSvRx5DYA+gUcOIgTGVMNkfSCbZM8cWpI= github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps= -github.com/caarlos0/ctrlc v1.0.0/go.mod h1:CdXpj4rmq0q/1Eb44M9zi2nKB0QraNKuRGYGrrHhcQw= -github.com/caddyserver/caddy/v2 v2.7.5-0.20230829153420-ed8bb13c5df7 h1:O06TFidxnykPuvDeehwf8hdNQdK1F8bbB5F/4nzMp8A= -github.com/caddyserver/caddy/v2 v2.7.5-0.20230829153420-ed8bb13c5df7/go.mod h1:9hCGdaF1HO0wRkK5MeKRYC/lLT4o6dvXSTtj/PjWgBw= -github.com/caddyserver/certmagic v0.19.2 h1:HZd1AKLx4592MalEGQS39DKs2ZOAJCEM/xYPMQ2/ui0= -github.com/caddyserver/certmagic v0.19.2/go.mod h1:fsL01NomQ6N+kE2j37ZCnig2MFosG+MIO4ztnmG/zz8= -github.com/campoy/unique v0.0.0-20180121183637-88950e537e7e/go.mod h1:9IOqJGCPMSc6E5ydlp5NIonxObaeu/Iub/X03EKPVYo= +github.com/caddyserver/caddy/v2 v2.7.6 h1:w0NymbG2m9PcvKWsrXO6EEkY9Ru4FJK8uQbYcev1p3A= +github.com/caddyserver/caddy/v2 v2.7.6/go.mod h1:JCiwFMnRWjk8lOa7po0wM/75kwd38ccJPMSrXvQCMQ0= +github.com/caddyserver/certmagic v0.20.0 h1:bTw7LcEZAh9ucYCRXyCpIrSAGplplI0vGYJ4BpCQ/Fc= +github.com/caddyserver/certmagic v0.20.0/go.mod h1:N4sXgpICQUskEWpj7zVzvWD41p3NYacrNoZYiRM2jTg= github.com/casbin/casbin/v2 v2.1.2/go.mod h1:YcPU1XXisHhLzuxH9coDNf2FbKpjGlbCg3n9yuLkIJQ= -github.com/cavaliercoder/go-cpio v0.0.0-20180626203310-925f9528c45e/go.mod h1:oDpT4efm8tSYHXV5tHSdRvBet/b/QzxZ+XyyPehvm3A= github.com/cenkalti/backoff v2.2.1+incompatible/go.mod h1:90ReRw6GdpyfrHakVjL/QHaoyV4aDUVVkXQJJJ3NXXM= github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM= github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/census-instrumentation/opencensus-proto v0.2.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= -github.com/certifi/gocertifi v0.0.0-20191021191039-0944d244cd40/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= -github.com/certifi/gocertifi v0.0.0-20200922220541-2c3bb06c6054/go.mod h1:sGbDF6GwGcLpkNXPUTkMRoywsNa/ol15pxFe6ERfguA= github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko= github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc= github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= -github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44= github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/charmbracelet/bubbles v0.14.0 h1:DJfCwnARfWjZLvMglhSQzo76UZ2gucuHPy9jLWX45Og= @@ -231,51 +159,34 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= -github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI= -github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= -github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I= github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= -github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h6jFvWxBdQXxjopDMZyH2UVceIRfR84bdzbkoKrsWNo= -github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA= -github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw= github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U= -github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk= github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= -github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE= github.com/coreos/go-etcd v2.0.0+incompatible/go.mod h1:Jez6KQU2B/sWsbdaef3ED8NzMklzPG4d5KIOhIy30Tk= github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-semver v0.3.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3EedlOD2RNk= github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd v0.0.0-20190620071333-e64a0ec8b42a/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4= -github.com/coreos/go-systemd/v22 v22.1.0/go.mod h1:xO0FLkIi5MaZafQlIrOotqXZ90ih+1atmu1JpKERPPk= github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc= github.com/coreos/pkg v0.0.0-20160727233714-3ac0863d7acf/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= -github.com/coreos/pkg v0.0.0-20180928190104-399ea9e2e55f/go.mod h1:E3G3o1h8I7cfcXa63jLwjI0eiQQMgzzUDFVpN/nH/eA= github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE= github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= github.com/cpuguy83/go-md2man/v2 v2.0.0/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU= -github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= +github.com/cpuguy83/go-md2man/v2 v2.0.3 h1:qMCsGGgs+MAzDFyp9LpAe1Lqy/fY/qCovCm0qnXZOBM= +github.com/cpuguy83/go-md2man/v2 v2.0.3/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY= -github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= -github.com/creack/pty v1.1.11/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4= -github.com/davecgh/go-spew v0.0.0-20161028175848-04cdfd42973b/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbzjuhfU= github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw= -github.com/denisenkom/go-mssqldb v0.9.0/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= -github.com/devigned/tab v0.1.1/go.mod h1:XG9mPq0dFghrYvoBF3xdRrJzSTX1b7IQrvaL9mzjeJY= github.com/dgraph-io/badger v1.6.2 h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8= github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/badger/v2 v2.2007.4 h1:TRWBQg8UrlUhaFdco01nO2uXwzKS7zd+HVdwV/GHc4o= @@ -288,11 +199,10 @@ github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZm github.com/dgryski/go-farm v0.0.0-20190423205320-6a90982ecee2/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13 h1:fAjc9m62+UWV/WAFKLNi6ZS0675eEUC9y3AlwSbQu1Y= github.com/dgryski/go-farm v0.0.0-20200201041132-a6ae2369ad13/go.mod h1:SqUrOPUnsFjfmXRMNPybcSiG0BgUW2AuFH8PAnS2iTw= -github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no= -github.com/dimchansky/utfbom v1.1.0/go.mod h1:rO41eb7gLfo8SF1jd9F8HplJm1Fewwi4mQvIirEdv+8= github.com/dlclark/regexp2 v1.4.0/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc= -github.com/dlclark/regexp2 v1.7.0 h1:7lJfhqlPssTb1WQx4yvTHN0uElPEv52sbaECrAQxjAo= github.com/dlclark/regexp2 v1.7.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= +github.com/dlclark/regexp2 v1.10.0 h1:+/GIL799phkJqYW+3YbOd8LCcbHzT0Pbo8zl70MHsq0= +github.com/dlclark/regexp2 v1.10.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4= github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk= github.com/dustin/go-humanize v0.0.0-20171111073723-bb3d318650d4/go.mod h1:HtrtbFcZ19U5GC7JDqmcUSB87Iq5E25KnS6fMYU6eOk= @@ -303,8 +213,8 @@ github.com/eapache/go-resiliency v1.1.0/go.mod h1:kFI+JgMyC7bLPUVY133qvEBtVayf5m github.com/eapache/go-xerial-snappy v0.0.0-20180814174437-776d5712da21/go.mod h1:+020luEh2TKB4/GOp8oxxtq0Daoen/Cii55CzbTV6DU= github.com/eapache/queue v1.1.0/go.mod h1:6eCeP0CKFpHLu8blIFXhExK/dRa7WDZfr6jVFPTqq+I= github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M= -github.com/emirpasic/gods v1.12.0/go.mod h1:YfzfFFoVP/catgzJb4IKIqXjX78Ha8FMSDh3ymbK86o= github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc= +github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ= github.com/envoyproxy/go-control-plane v0.6.9/go.mod h1:SBwIajubJHhxtWwsL9s8ss4safvEdbitLhGGK48rN6g= github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4= @@ -312,190 +222,99 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= -github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= -github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0= -github.com/envoyproxy/protoc-gen-validate v0.0.14/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/envoyproxy/protoc-gen-validate v0.3.0-java/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/etcd-io/gofail v0.0.0-20190801230047-ad7f989257ca/go.mod h1:49H/RkXP8pKaZy4h0d+NW16rSLhyVBt4o6VLJbmOqDE= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= -github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= github.com/felixge/httpsnoop v1.0.3 h1:s/nj+GCswXYzN5v2DpNMuMQYe+0DDwt5WVCU6CWBdXk= github.com/felixge/httpsnoop v1.0.3/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U= -github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= -github.com/form3tech-oss/jwt-go v3.2.2+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/form3tech-oss/jwt-go v3.2.3+incompatible/go.mod h1:pbq4aXjuKjdthFRnoDwaVPLA+WlJuPGy+QneDUgJi2k= -github.com/fortytw2/leaktest v1.2.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= -github.com/fortytw2/leaktest v1.3.0/go.mod h1:jDsjWgpAGjm2CA7WthBh/CdZYEPF31XHquHwclZch5g= github.com/franela/goblin v0.0.0-20200105215937-c9ffbefa60db/go.mod h1:7dvUGVsVBjqR7JHJk0brhHOZYGmfBYOrK0ZhYMEtBr4= github.com/franela/goreq v0.0.0-20171204163338-bcd34c9993f8/go.mod h1:ZhphrRTfi2rbfLwlschooIH4+wKKDR4Pdxhh+TRoA20= +github.com/frankban/quicktest v1.14.3 h1:FJKSZTDHjyhriyC81FLQ0LY93eSai0ZyR/ZIkd3ZUKE= +github.com/frankban/quicktest v1.14.3/go.mod h1:mgiwOwqx65TmIk1wJ6Q7wvnVMocbUorkibMOrVTHZps= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= -github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= +github.com/fsnotify/fsnotify v1.7.0 h1:8JEhPFa5W2WU7YfeZzPNqzMP6Lwt7L2715Ggo0nosvA= +github.com/fsnotify/fsnotify v1.7.0/go.mod h1:40Bi/Hjc2AVfZrqy+aj+yEI+/bRxZnMJyTJwOpGvigM= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa h1:RDBNVkRviHZtvDvId8XSGPu3rmpmSe+wKRcEWNgsfWU= github.com/fullsailor/pkcs7 v0.0.0-20190404230743-d7302db945fa/go.mod h1:KnogPXtdwXqoenmZCw6S+25EAm2MkxbG0deNDu4cbSA= -github.com/fullstorydev/grpcurl v1.6.0/go.mod h1:ZQ+ayqbKMJNhzLmbpCiurTVlaK2M/3nqZCxaQ2Ze/sM= -github.com/fullstorydev/grpcurl v1.8.0/go.mod h1:Mn2jWbdMrQGJQ8UD62uNyMumT2acsZUCkZIqFxsQf1o= -github.com/fullstorydev/grpcurl v1.8.1/go.mod h1:3BWhvHZwNO7iLXaQlojdg5NA6SxUDePli4ecpK1N7gw= -github.com/fullstorydev/grpcurl v1.8.2/go.mod h1:YvWNT3xRp2KIRuvCphFodG0fKkMXwaxA9CJgKCcyzUQ= -github.com/fxamacker/cbor/v2 v2.4.0 h1:ri0ArlOR+5XunOP8CRUowT0pSJOwhW098ZCUyskZD88= -github.com/fxamacker/cbor/v2 v2.4.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= +github.com/fxamacker/cbor/v2 v2.5.0 h1:oHsG0V/Q6E/wqTS2O1Cozzsy69nqCiguo5Q1a1ADivE= +github.com/fxamacker/cbor/v2 v2.5.0/go.mod h1:TA1xS00nchWmaBnEIxPSE5oHLuJBAVvqrtAnWBwBCVo= github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4= -github.com/getsentry/raven-go v0.2.0/go.mod h1:KungGk8q33+aIAZUIVWZDr2OfAEBsO49PX4NzFV5kcQ= github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04= -github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= -github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= -github.com/gin-gonic/gin v1.5.0/go.mod h1:Nd6IXA8m5kNZdNEHMBd93KT+mdY3+bewLgRvmCsR2Do= -github.com/gin-gonic/gin v1.6.3 h1:ahKqKTFpO5KTPHxWZjEdPScmYaGtLo8Y4DMHoEsnp14= -github.com/gin-gonic/gin v1.6.3/go.mod h1:75u5sXoLsGZoRN5Sgbi1eraJ4GU3++wFwWzhwvtwp4M= -github.com/gliderlabs/ssh v0.2.2/go.mod h1:U7qILu1NlMHj9FlMhZLlkCdDnU1DBEAqr0aevW3Awn0= -github.com/go-chi/chi v4.1.2+incompatible h1:fGFk2Gmi/YKXk0OmGfBh0WgmN3XB8lVnEyNz34tQRec= -github.com/go-chi/chi v4.1.2+incompatible/go.mod h1:eB3wogJHnLi3x/kFX2A+IbTBlXxmMeXJVKy9tTv1XzQ= github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs= +github.com/go-chi/chi/v5 v5.0.10 h1:rLz5avzKpjqxrYwXNfmjkrYYXOyLJd37pz53UFHC6vk= +github.com/go-chi/chi/v5 v5.0.10/go.mod h1:DslCQbL2OYiznFReuXYUmQ2hGd1aDpCnlMNITLSKoi8= github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8= -github.com/go-gorp/gorp/v3 v3.0.2 h1:ULqJXIekoqMx29FI5ekXXFoH1dT2Vc8UhnRzBg+Emz4= -github.com/go-gorp/gorp/v3 v3.0.2/go.mod h1:BJ3q1ejpV8cVALtcXvXaXyTOlMmJhWDxTmncaR6rwBY= -github.com/go-ini/ini v1.25.4/go.mod h1:ByCAeIL28uOIIG0E3PJtZPDL8WnHpFKFOtgjp+3Ies8= +github.com/go-gorp/gorp/v3 v3.1.0 h1:ItKF/Vbuj31dmV4jxA1qblpSwkl9g1typ24xoe70IGs= +github.com/go-gorp/gorp/v3 v3.1.0/go.mod h1:dLEjIyyRNiXvNZ8PSmzpt1GsWAUK8kjVhEpjH8TixEw= github.com/go-kit/kit v0.4.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= github.com/go-kit/kit v0.10.0 h1:dXFJfIHVvUcpSgDOV+Ne6t7jXri8Tfv2uOLHUZ2XNuo= github.com/go-kit/kit v0.10.0/go.mod h1:xUsJbQ/Fp4kEt7AFgCuvyX4a71u8h9jB8tj/ORgOZ7o= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= -github.com/go-kit/log v0.2.0/go.mod h1:NwTd00d/i8cPZ3xOwwiv2PO5MOcx78fFErGNcVmBjv0= github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= -github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA= -github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= +github.com/go-logfmt/logfmt v0.6.0 h1:wGYYu3uicYdqXVgoYbvnkrPVXkuLM1p1ifugDMEdRi4= +github.com/go-logfmt/logfmt v0.6.0/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= -github.com/go-logr/logr v1.2.4 h1:g01GSCwiDw2xSZfjJ2/T9M+S6pFdcNtFYsp+Y43HYDQ= -github.com/go-logr/logr v1.2.4/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ= +github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag= github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE= github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY= github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0= -github.com/go-openapi/analysis v0.21.2/go.mod h1:HZwRk4RRisyG8vx2Oe6aqeSQcoxRp47Xkp3+K6q+LdY= -github.com/go-openapi/analysis v0.21.4 h1:ZDFLvSNxpDaomuCueM0BlSXxpANBlFYiBvr+GXrvIHc= -github.com/go-openapi/analysis v0.21.4/go.mod h1:4zQ35W4neeZTqh3ol0rv/O8JBbka9QyAgQRPp9y3pfo= -github.com/go-openapi/errors v0.19.8/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.19.9/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.2/go.mod h1:cM//ZKUKyO06HSwqAelJ5NsEMMcpa6VpXe8DOa1Mi1M= -github.com/go-openapi/errors v0.20.4 h1:unTcVm6PispJsMECE3zWgvG4xTiKda1LIR5rCRWLG6M= -github.com/go-openapi/errors v0.20.4/go.mod h1:Z3FlZ4I8jEGxjUK+bugx3on2mIAk4txuAOhlsB1FSgk= -github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= +github.com/go-openapi/analysis v0.22.2 h1:ZBmNoP2h5omLKr/srIC9bfqrUGzT6g6gNv03HE9Vpj0= +github.com/go-openapi/analysis v0.22.2/go.mod h1:pDF4UbZsQTo/oNuRfAWWd4dAh4yuYf//LYorPTjrpvo= +github.com/go-openapi/errors v0.21.0 h1:FhChC/duCnfoLj1gZ0BgaBmzhJC2SL/sJr8a2vAobSY= +github.com/go-openapi/errors v0.21.0/go.mod h1:jxNTMUxRCKj65yb/okJGEtahVd7uvWnuWfj53bse4ho= github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg= -github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= -github.com/go-openapi/jsonpointer v0.20.0 h1:ESKJdU9ASRfaPNOPRx12IUyA1vn3R9GiE3KYD14BXdQ= -github.com/go-openapi/jsonpointer v0.20.0/go.mod h1:6PGzBjjIIumbLYysB73Klnms1mwnU4G3YHOECG3CedA= -github.com/go-openapi/jsonreference v0.19.6/go.mod h1:diGHMEHg2IqXZGKxqyvWdfWU/aim5Dprw5bqpKkTvns= -github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo= -github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= -github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/loads v0.21.1/go.mod h1:/DtAMXXneXFjbQMGEtbamCZb+4x7eGwkvZCvBmwUG+g= -github.com/go-openapi/loads v0.21.2 h1:r2a/xFIYeZ4Qd2TnGpWDIQNcP80dIaZgf704za8enro= -github.com/go-openapi/loads v0.21.2/go.mod h1:Jq58Os6SSGz0rzh62ptiu8Z31I+OTHqmULx5e/gJbNw= -github.com/go-openapi/runtime v0.26.0 h1:HYOFtG00FM1UvqrcxbEJg/SwvDRvYLQKGhw2zaQjTcc= -github.com/go-openapi/runtime v0.26.0/go.mod h1:QgRGeZwrUcSHdeh4Ka9Glvo0ug1LC5WyE+EV88plZrQ= -github.com/go-openapi/spec v0.20.4/go.mod h1:faYFR1CvsJZ0mNsmsphTMSoRrNV3TEDoAM7FOEWeq8I= -github.com/go-openapi/spec v0.20.6/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/spec v0.20.9 h1:xnlYNQAwKd2VQRRfwTEI0DcK+2cbuvI/0c7jx3gA8/8= -github.com/go-openapi/spec v0.20.9/go.mod h1:2OpW+JddWPrpXSCIX8eOx7lZ5iyuWj3RYR6VaaBKcWA= -github.com/go-openapi/strfmt v0.21.0/go.mod h1:ZRQ409bWMj+SOgXofQAGTIo2Ebu72Gs+WaRADcS5iNg= -github.com/go-openapi/strfmt v0.21.1/go.mod h1:I/XVKeLc5+MM5oPNN7P6urMOpuLXEcNrCX/rPGuWb0k= -github.com/go-openapi/strfmt v0.21.3/go.mod h1:k+RzNO0Da+k3FrrynSNN8F7n/peCmQQqbbXjtDfvmGg= -github.com/go-openapi/strfmt v0.21.7 h1:rspiXgNWgeUzhjo1YU01do6qsahtJNByjLVbPLNHb8k= -github.com/go-openapi/strfmt v0.21.7/go.mod h1:adeGTkxE44sPyLk0JV235VQAO/ZXUr8KAzYjclFs3ew= +github.com/go-openapi/jsonpointer v0.20.2 h1:mQc3nmndL8ZBzStEo3JYF8wzmeWffDH4VbXz58sAx6Q= +github.com/go-openapi/jsonpointer v0.20.2/go.mod h1:bHen+N0u1KEO3YlmqOjTT9Adn1RfD91Ar825/PuiRVs= +github.com/go-openapi/jsonreference v0.20.4 h1:bKlDxQxQJgwpUSgOENiMPzCTBVuc7vTdXSSgNeAhojU= +github.com/go-openapi/jsonreference v0.20.4/go.mod h1:5pZJyJP2MnYCpoeoMAql78cCHauHj0V9Lhc506VOpw4= +github.com/go-openapi/loads v0.21.5 h1:jDzF4dSoHw6ZFADCGltDb2lE4F6De7aWSpe+IcsRzT0= +github.com/go-openapi/loads v0.21.5/go.mod h1:PxTsnFBoBe+z89riT+wYt3prmSBP6GDAQh2l9H1Flz8= +github.com/go-openapi/runtime v0.27.1 h1:ae53yaOoh+fx/X5Eaq8cRmavHgDma65XPZuvBqvJYto= +github.com/go-openapi/runtime v0.27.1/go.mod h1:fijeJEiEclyS8BRurYE1DE5TLb9/KZl6eAdbzjsrlLU= +github.com/go-openapi/spec v0.20.14 h1:7CBlRnw+mtjFGlPDRZmAMnq35cRzI91xj03HVyUi/Do= +github.com/go-openapi/spec v0.20.14/go.mod h1:8EOhTpBoFiask8rrgwbLC3zmJfz4zsCUueRuPM6GNkw= +github.com/go-openapi/strfmt v0.22.0 h1:Ew9PnEYc246TwrEspvBdDHS4BVKXy/AOVsfqGDgAcaI= +github.com/go-openapi/strfmt v0.22.0/go.mod h1:HzJ9kokGIju3/K6ap8jL+OlGAbjpSv27135Yr9OivU4= github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk= -github.com/go-openapi/swag v0.19.15/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.21.1/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ= -github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= -github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= -github.com/go-openapi/validate v0.22.1 h1:G+c2ub6q47kfX1sOBLwIQwzBVt8qmOAARyo/9Fqs9NU= -github.com/go-openapi/validate v0.22.1/go.mod h1:rjnrwK57VJ7A8xqfpAOEKRH8yQSGUriMu5/zuPSQ1hg= -github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= -github.com/go-playground/locales v0.12.1/go.mod h1:IUMDtCfWo/w/mtMfIE/IG2K+Ey3ygWanZIBtBW0W2TM= -github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= -github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= -github.com/go-playground/universal-translator v0.16.0/go.mod h1:1AnU7NaIRDWWzGEKwgtJRd2xk99HeFyHw3yid4rvQIY= -github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= -github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= -github.com/go-playground/validator/v10 v10.2.0 h1:KgJ0snyC2R9VXYN2rneOtQcw5aHQB1Vv0sFl1UcHBOY= -github.com/go-playground/validator/v10 v10.2.0/go.mod h1:uOYAAleCW8F/7oMFd6aG0GOhaH6EGOAJShg8Id5JGkI= -github.com/go-redis/redis v6.15.8+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= -github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA= +github.com/go-openapi/swag v0.22.9 h1:XX2DssF+mQKM2DHsbgZK74y/zj4mo9I99+89xUmuZCE= +github.com/go-openapi/swag v0.22.9/go.mod h1:3/OXnFfnMAwBD099SwYRk7GD3xOrr1iL7d/XNLXVVwE= +github.com/go-openapi/validate v0.22.6 h1:+NhuwcEYpWdO5Nm4bmvhGLW0rt1Fcc532Mu3wpypXfo= +github.com/go-openapi/validate v0.22.6/go.mod h1:eaddXSqKeTg5XpSmj1dYyFTK/95n/XHwcOY+BMxKMyM= +github.com/go-resty/resty/v2 v2.11.0 h1:i7jMfNOJYMp69lq7qozJP+bjgzfAzeOhuGlyDrqxT/8= +github.com/go-resty/resty/v2 v2.11.0/go.mod h1:iiP/OpA0CkcL3IGt1O0+/SIItFUbkkyw5BGXiVdTu+A= github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= -github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= -github.com/go-sql-driver/mysql v1.7.0 h1:ueSltNNllEqE3qcWBTD0iQd3IpL/6U+mJxLkazJ7YPc= -github.com/go-sql-driver/mysql v1.7.0/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= +github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI= +github.com/go-sql-driver/mysql v1.7.1/go.mod h1:OXbVy3sEdcQ2Doequ6Z5BW6fXNQTmx+9S1MCJN5yJMI= github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEevZMzYi5KSi8KkcZtzBcTgAUUtapy0OI= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= -github.com/gobuffalo/attrs v0.0.0-20190224210810-a9411de4debd/go.mod h1:4duuawTqi2wkkpB4ePgWMaai6/Kc6WEz83bhFwpHzj0= -github.com/gobuffalo/depgen v0.0.0-20190329151759-d478694a28d3/go.mod h1:3STtPUQYuzV0gBVOY3vy6CfMm/ljR4pABfrTeHNLHUY= -github.com/gobuffalo/depgen v0.1.0/go.mod h1:+ifsuy7fhi15RWncXQQKjWS9JPkdah5sZvtHc2RXGlg= -github.com/gobuffalo/envy v1.6.15/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/envy v1.7.0/go.mod h1:n7DRkBerg/aorDM8kbduw5dN3oXGswK5liaSCx4T5NI= -github.com/gobuffalo/flect v0.1.0/go.mod h1:d2ehjJqGOH/Kjqcoz+F7jHTBbmDb38yXA598Hb50EGs= -github.com/gobuffalo/flect v0.1.1/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/flect v0.1.3/go.mod h1:8JCgGVbRjJhVgD6399mQr4fx5rRfGKVzFjbj6RE/9UI= -github.com/gobuffalo/genny v0.0.0-20190329151137-27723ad26ef9/go.mod h1:rWs4Z12d1Zbf19rlsn0nurr75KqhYp52EAGGxTbBhNk= -github.com/gobuffalo/genny v0.0.0-20190403191548-3ca520ef0d9e/go.mod h1:80lIj3kVJWwOrXWWMRzzdhW3DsrdjILVil/SFKBzF28= -github.com/gobuffalo/genny v0.1.0/go.mod h1:XidbUqzak3lHdS//TPu2OgiFB+51Ur5f7CSnXZ/JDvo= -github.com/gobuffalo/genny v0.1.1/go.mod h1:5TExbEyY48pfunL4QSXxlDOmdsD44RRq4mVZ0Ex28Xk= -github.com/gobuffalo/gitgen v0.0.0-20190315122116-cc086187d211/go.mod h1:vEHJk/E9DmhejeLeNt7UVvlSGv3ziL+djtTr3yyzcOw= -github.com/gobuffalo/gogen v0.0.0-20190315121717-8f38393713f5/go.mod h1:V9QVDIxsgKNZs6L2IYiGR8datgMhB577vzTDqypH360= -github.com/gobuffalo/gogen v0.1.0/go.mod h1:8NTelM5qd8RZ15VjQTFkAW6qOMx5wBbW4dSCS3BY8gg= -github.com/gobuffalo/gogen v0.1.1/go.mod h1:y8iBtmHmGc4qa3urIyo1shvOD8JftTtfcKi+71xfDNE= -github.com/gobuffalo/logger v0.0.0-20190315122211-86e12af44bc2/go.mod h1:QdxcLw541hSGtBnhUc4gaNIXRjiDppFGaDqzbrBd3v8= -github.com/gobuffalo/logger v1.0.6 h1:nnZNpxYo0zx+Aj9RfMPBm+x9zAU2OayFh/xrAWi34HU= -github.com/gobuffalo/logger v1.0.6/go.mod h1:J31TBEHR1QLV2683OXTAItYIg8pv2JMHnF/quuAbMjs= -github.com/gobuffalo/mapi v1.0.1/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/mapi v1.0.2/go.mod h1:4VAGh89y6rVOvm5A8fKFxYG+wIW6LO1FMTG9hnKStFc= -github.com/gobuffalo/packd v0.0.0-20190315124812-a385830c7fc0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v0.1.0/go.mod h1:M2Juc+hhDXf/PnmBANFCqx4DM3wRbgDvnVWeG2RIxq4= -github.com/gobuffalo/packd v1.0.1 h1:U2wXfRr4E9DH8IdsDLlRFwTZTK7hLfq9qT/QHXGVe/0= -github.com/gobuffalo/packd v1.0.1/go.mod h1:PP2POP3p3RXGz7Jh6eYEf93S7vA2za6xM7QT85L4+VY= -github.com/gobuffalo/packr/v2 v2.0.9/go.mod h1:emmyGweYTm6Kdper+iywB6YK5YzuKchGtJQZ0Odn4pQ= -github.com/gobuffalo/packr/v2 v2.2.0/go.mod h1:CaAwI0GPIAv+5wKLtv8Afwl+Cm78K/I/VCm/3ptBN+0= -github.com/gobuffalo/packr/v2 v2.8.3 h1:xE1yzvnO56cUC0sTpKR3DIbxZgB54AftTFMhB2XEWlY= -github.com/gobuffalo/packr/v2 v2.8.3/go.mod h1:0SahksCVcx4IMnigTjiFuyldmTrdTctXsOdiU5KwbKc= -github.com/gobuffalo/syncx v0.0.0-20190224160051-33c29581e754/go.mod h1:HhnNqWY95UYwwW3uSASeV7vtgYkT2t16hJgV3AEPUpw= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee h1:s+21KNqlpePfkah2I+gwHF8xmJWRjooY+5248k6m4A0= -github.com/gobwas/httphead v0.0.0-20180130184737-2c6c146eadee/go.mod h1:L0fX3K22YWvt/FAX9NnzrNzcI4wNYi9Yku4O0LKYflo= -github.com/gobwas/pool v0.2.0 h1:QEmUOlnSjWtnpRGHF3SauEiOsy82Cup83Vf2LcMlnc8= -github.com/gobwas/pool v0.2.0/go.mod h1:q8bcK0KcYlCgd9e7WYLm9LpyS+YeLd8JVDW6WezmKEw= -github.com/gobwas/ws v1.0.2 h1:CoAavW/wd/kulfZmSIBt6p24n4j7tHgNVCjsfHVNUbo= -github.com/gobwas/ws v1.0.2/go.mod h1:szmBTxLgaFppYjEmNtny/v3w89xOydFnnZMcgRRu/EM= -github.com/godbus/dbus/v5 v5.0.3/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= +github.com/gobwas/glob v0.2.3 h1:A4xDbljILXROh+kObIiy5kIaPYD8e96x1tgBhUI5J+Y= +github.com/gobwas/glob v0.2.3/go.mod h1:d3Ez4x06l9bZtSvzIay5+Yzi0fmZzPgnTbPcKjJAkT8= github.com/godbus/dbus/v5 v5.0.4/go.mod h1:xhWf0FNVPg57R7Z0UbKHbJfkEywrmjJnf7w5xrFpKfA= -github.com/godror/godror v0.24.2/go.mod h1:wZv/9vPiUib6tkoDl+AZ/QLf5YZgMravZ7jxH2eQWAE= github.com/gofrs/uuid v4.0.0+incompatible h1:1SD/1F5pU8p29ybwgQSwpQk+mwdRrXCYuPhW6m+TnJw= github.com/gofrs/uuid v4.0.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM= github.com/gogo/googleapis v1.1.0/go.mod h1:gf4bu3Q80BeJ6H1S1vYPm8/ELATdvryBaNFGgqEef3s= github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.0/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= github.com/gogo/protobuf v1.2.1/go.mod h1:hp+jE20tsWTFYpLwKvXlhS1hjn+gTNwPg2I6zVXpSg4= -github.com/gogo/protobuf v1.3.0/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= -github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang-jwt/jwt/v5 v5.0.0 h1:1n1XNM9hk7O9mnQoNBGolZvzebBQ7p93ULHRc28XJUE= -github.com/golang-jwt/jwt/v5 v5.0.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= -github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1Jw= +github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= -github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4= -github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE= -github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ= +github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo= +github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ= github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -509,9 +328,6 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw= github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4= github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8= -github.com/golang/mock v1.6.0 h1:ErTB+efbowRARo13NNdxyJji2egdxLGQhRaY+DUumQc= -github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= @@ -539,19 +355,13 @@ github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEW github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219/go.mod h1:/X8TswGSh1pIozq4ZwCfxS0WA5JGXguxk94ar/4c87Y= github.com/google/btree v0.0.0-20180813153112-4030bb1f1f0c/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= github.com/google/btree v1.0.0/go.mod h1:lNA+9X1NB3Zf8V7Ke586lFgjr2dZNuvo3lPJSGZ5JPQ= -github.com/google/btree v1.0.1/go.mod h1:xXMiIv4Fb/0kKde4SpL7qlzvu5cMJDRkFDxJfI9uaxA= github.com/google/btree v1.1.2 h1:xf4v41cLI2Z6FxbKm+8Bu+m8ifhj15JuZ9sa0jZCMUU= +github.com/google/btree v1.1.2/go.mod h1:qOPhT0dTNdNzV6Z/lhRX0YXUafgPLFUh+gZMl761Gm4= github.com/google/cel-go v0.15.1 h1:iTgVZor2x9okXtmTrqO8cg4uvqIeaBcWhXtruaWFMYQ= github.com/google/cel-go v0.15.1/go.mod h1:YzWEoI07MC/a/wj9in8GeVatqfypkldgBlwXh9bCwqY= github.com/google/certificate-transparency-go v1.0.21/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= -github.com/google/certificate-transparency-go v1.1.1/go.mod h1:FDKqPvSXawb2ecErVRrD+nfy23RCzyl7eqVCEmlT1Zs= -github.com/google/certificate-transparency-go v1.1.2-0.20210422104406-9f33727a7a18/go.mod h1:6CKh9dscIRoqc2kC6YUFICHZMT9NrClyPrRVFrdw1QQ= -github.com/google/certificate-transparency-go v1.1.2-0.20210512142713-bed466244fa6/go.mod h1:aF2dp7Dh81mY8Y/zpzyXps4fQW5zQbDu2CxfpJB6NkI= -github.com/google/certificate-transparency-go v1.1.2/go.mod h1:3OL+HKDqHPUfdKrHVQxO6T8nDLO0HF7LRTlkIWXaWvQ= -github.com/google/certificate-transparency-go v1.1.4 h1:hCyXHDbtqlr/lMXU0D4WgbalXL0Zk4dSWWMbPV8VrqY= -github.com/google/certificate-transparency-go v1.1.4/go.mod h1:D6lvbfwckhNrbM9WVl1EVeMOyzC19mpIjMOI4nxBHtQ= -github.com/google/go-attestation v0.3.2/go.mod h1:N0ADdnY0cr7eLJyZ75o8kofGGTUF2XrZTJuTPo5acwk= -github.com/google/go-attestation v0.4.4-0.20220404204839-8820d49b18d9/go.mod h1:KDsPHk8a2MX9g20kYSdxB21t7je5NghSaFeVn0Zu3Ao= +github.com/google/certificate-transparency-go v1.1.6 h1:SW5K3sr7ptST/pIvNkSVWMiJqemRmkjJPPT0jzXdOOY= +github.com/google/certificate-transparency-go v1.1.6/go.mod h1:0OJjOsOk+wj6aYQgP7FU0ioQ0AJUmnWPFMqTjQeazPQ= github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= @@ -564,120 +374,78 @@ github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/ github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE= -github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-github/v28 v28.1.1/go.mod h1:bsqJWQX05omyWVmc00nEUql9mhQyv38lDZ8kPZcQVoM= -github.com/google/go-licenses v0.0.0-20210329231322-ce1d9163b77d/go.mod h1:+TYOmkVoJOpwnS0wfdsJCV9CoD5nJYsHoFk/0CrTK4M= -github.com/google/go-querystring v1.0.0/go.mod h1:odCYkC5MyYFN7vkCjXpyrEuKhc/BUO6wN/zVPAxq5ck= -github.com/google/go-replayers/grpcreplay v0.1.0/go.mod h1:8Ig2Idjpr6gifRd6pNVggX6TC1Zw6Jx74AKp7QNH2QE= -github.com/google/go-replayers/httpreplay v0.1.0/go.mod h1:YKZViNhiGgqdBlUbI2MwGpq4pXxNmhJLPHQ7cv2b5no= -github.com/google/go-tpm v0.1.2-0.20190725015402-ae6dd98980d4/go.mod h1:H9HbmUG2YgV/PHITkO7p6wxEEj/v5nlsVWIwumwH2NI= -github.com/google/go-tpm v0.3.0/go.mod h1:iVLWvrPp/bHeEkxTFi9WG6K9w0iy2yIszHwZGHPbzAw= -github.com/google/go-tpm v0.3.2/go.mod h1:j71sMBTfp3X5jPHz852ZOfQMUOf65Gb/Th8pRmp7fvg= -github.com/google/go-tpm v0.3.3 h1:P/ZFNBZYXRxc+z7i5uyd8VP7MaDteuLZInzrH2idRGo= -github.com/google/go-tpm v0.3.3/go.mod h1:9Hyn3rgnzWF9XBWVk6ml6A6hNkbWjNFlDQL51BeghL4= -github.com/google/go-tpm-tools v0.0.0-20190906225433-1614c142f845/go.mod h1:AVfHadzbdzHo54inR2x1v640jdi1YSi3NauM2DUsxk0= -github.com/google/go-tpm-tools v0.2.0/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4= -github.com/google/go-tpm-tools v0.2.1/go.mod h1:npUd03rQ60lxN7tzeBJreG38RvWwme2N1reF/eeiBk4= -github.com/google/go-tpm-tools v0.3.1/go.mod h1:PSg+r5hSZI5tP3X7LBQx2sW1VSZUqZHBSrKyDqrB21U= -github.com/google/go-tpm-tools v0.3.9/go.mod h1:22JvWmHcD5w55cs+nMeqDGDxgNS15/2pDq2cLqnc3rc= -github.com/google/go-tpm-tools v0.3.12 h1:hpWglH4RaZnGVbgOK3IThI5K++jnFvjQ94EIN34xrUU= -github.com/google/go-tspi v0.2.1-0.20190423175329-115dea689aad/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI= +github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= +github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-tpm v0.9.0 h1:sQF6YqWMi+SCXpsmS3fd21oPy/vSddwZry4JnmltHVk= +github.com/google/go-tpm v0.9.0/go.mod h1:FkNVkc6C+IsvDI9Jw1OveJmxGZUUaKxtrpOS47QWKfU= +github.com/google/go-tpm-tools v0.4.1 h1:gYU6iwRo0tY3V6NDnS6m+XYog+b3g6YFhHQl3sYaUL4= +github.com/google/go-tpm-tools v0.4.1/go.mod h1:w03m0jynhTo7puXTYoyfpNOMqyQ9SB7sixnKWsS/1L0= github.com/google/go-tspi v0.3.0 h1:ADtq8RKfP+jrTyIWIZDIYcKOMecRqNJFOew2IT0Inus= github.com/google/go-tspi v0.3.0/go.mod h1:xfMGI3G0PhxCdNVcYr1C4C+EizojDg/TXuX5by8CiHI= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/licenseclassifier v0.0.0-20210325184830-bb04aff29e72/go.mod h1:qsqn2hxC+vURpyBRygGUuinTO42MFRLcsmQ/P8v94+M= github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= -github.com/google/martian v2.1.1-0.20190517191504-25dcb96d9e51+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.2.1/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200212024743-f11f1df84d12/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200229191704-1ebb73c60ed3/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= -github.com/google/pprof v0.0.0-20200507031123-427632fa3b1c/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= -github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1 h1:K6RDEckDVWvDI9JAJYCmNdQXq6neHJOYx3V6jnqNEec= github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE= github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= -github.com/google/rpmpack v0.0.0-20191226140753-aa36bfddb3a0/go.mod h1:RaTPr0KUf2K7fnZYLNDrr8rxAamWs3iNywJLtQ2AzBg= -github.com/google/s2a-go v0.1.4 h1:1kZ/sQM3srePvKs3tXAvQzo66XfcReoqFpIpIccE7Oc= -github.com/google/subcommands v1.0.1/go.mod h1:ZjhPrFU+Olkh9WazFPsl27BQ4UPiG37m3yTrtFlrHVk= -github.com/google/trillian v1.3.11/go.mod h1:0tPraVHrSDkA3BO6vKX67zgLXs6SsOAbHEivX+9mPgw= -github.com/google/trillian v1.3.14-0.20210409160123-c5ea3abd4a41/go.mod h1:1dPv0CUjNQVFEDuAUFhZql16pw/VlPgaX8qj+g5pVzQ= -github.com/google/trillian v1.3.14-0.20210511103300-67b5f349eefa/go.mod h1:s4jO3Ai4NSvxucdvqUHON0bCqJyoya32eNw6XJwsmNc= -github.com/google/trillian v1.4.0/go.mod h1:1Bja2nEgMDlEJWWRXBUemSPG9qYw84ZYX2gHRVHlR+g= -github.com/google/uuid v0.0.0-20161128191214-064e2069ce9c/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/s2a-go v0.1.7 h1:60BLSyTrOV4/haCDW4zb1guZItoSq8foHCXrAnjBo/o= +github.com/google/s2a-go v0.1.7/go.mod h1:50CgR4k1jNlWBu4UfS4AcfhVe1r6pdZPygJ3R8F0Qdw= github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/wire v0.3.0/go.mod h1:i1DMg/Lu8Sz5yYl25iOdmc5CT5qusaa+zmRWs16741s= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/enterprise-certificate-proxy v0.2.5 h1:UR4rDjcgpgEnqpIEvkiqTYKBCKLNmlge2eVjoZfySzM= -github.com/googleapis/gax-go v2.0.2+incompatible h1:silFMLAnr330+NRuag/VjIGF7TLp/LBrV2CJKFLWEww= -github.com/googleapis/gax-go v2.0.2+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= +github.com/googleapis/enterprise-certificate-proxy v0.2.5/go.mod h1:RxW0N9901Cko1VOCW3SXCpWP+mlIEkk2tP7jnHy9a3w= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= github.com/googleapis/gax-go/v2 v2.12.0 h1:A+gCJKdRfqXkr+BIRGtZLibNXf0m1f9E4HG56etFpas= +github.com/googleapis/gax-go/v2 v2.12.0/go.mod h1:y+aIqrI5eb1YGMVJfuV3185Ts/D7qKpsEkdD5+I6QGU= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= -github.com/gordonklaus/ineffassign v0.0.0-20200309095847-7953dde2c7bf/go.mod h1:cuNKsD1zp2v6XfE/orVX2QE1LC+i254ceGcVeDT3pTU= -github.com/goreleaser/goreleaser v0.134.0/go.mod h1:ZT6Y2rSYa6NxQzIsdfWWNWAlYGXGbreo66NmE+3X3WQ= -github.com/goreleaser/nfpm v1.2.1/go.mod h1:TtWrABZozuLOttX2uDlYyECfQX7x5XYkVxhjYcR6G9w= github.com/gorilla/context v0.0.0-20160226214623-1ea25387ff6f/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/context v1.1.1/go.mod h1:kBGZzfjB9CEq2AlWe17Uuf7NDRt0dE0s8S51q0aT7Yg= github.com/gorilla/mux v1.4.0/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.6.2/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= github.com/gorilla/mux v1.7.3/go.mod h1:1lud6UwP+6orDFRuTfBEV8e9/aOM/c4fVVCaMa2zaAs= -github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI= github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So= +github.com/gorilla/mux v1.8.1 h1:TuBL49tXwgrFYWhqrNgrUNEY92u81SPhu7sTdzQEiWY= +github.com/gorilla/mux v1.8.1/go.mod h1:AKf9I4AEqPTmMytcMc0KkNouC66V3BtZ4qD5fmWSiMQ= github.com/gorilla/schema v1.2.0 h1:YufUaxZYCKGFuAq3c96BOhjgd5nmXiOY9NGzF247Tsc= github.com/gorilla/schema v1.2.0/go.mod h1:kgLaKoK1FELgZqMAVxx/5cbj0kT+57qxUrAlIO2eleU= github.com/gorilla/securecookie v1.1.1 h1:miw7JPhV+b/lAHSXz4qd/nN9jRiAFV5FwjeKyCS8BvQ= github.com/gorilla/securecookie v1.1.1/go.mod h1:ra0sb63/xPlUeL+yeDciTfxMRAA+MP+HVt/4epWDjd4= github.com/gorilla/websocket v0.0.0-20170926233335-4201258b820c/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.0/go.mod h1:E7qHFY5m1UJ88s3WnNqhKjPHQ0heANvMoAMk2YaljkQ= -github.com/gorilla/websocket v1.4.1/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= -github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc= -github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= +github.com/gorilla/websocket v1.5.1 h1:gmztn0JnHVt9JZquRuzLw3g4wouNVzKL15iLr/zn/QY= +github.com/gorilla/websocket v1.5.1/go.mod h1:x3kM2JMyaluk02fnUJpQuwD2dCS5NDG2ZHL0uE0tcaY= github.com/groob/finalizer v0.0.0-20170707115354-4c2ed49aabda/go.mod h1:MyndkAZd5rUMdNogn35MWXBX1UiBigrU8eTj8DoAC2c= -github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de4/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= -github.com/grpc-ecosystem/go-grpc-middleware v1.2.2/go.mod h1:EaizFBKfUKtMIF5iaDEhniwNedqGo9FuLFzppDr3uwI= -github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y= github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= -github.com/grpc-ecosystem/grpc-gateway v1.8.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.2/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.12.1/go.mod h1:8XEsbTttt/W+VvjtQhLACqCisSPWTxCZ7sBRjU6iH9c= -github.com/grpc-ecosystem/grpc-gateway v1.14.6/go.mod h1:zdiPV4Yse/1gnckTHtghG4GkDEdKCRJduHpTxT3/jcw= github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0= -github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0 h1:RtRsiaGvWxcwd8y3BiRZxsylPT8hLWZ5SPcfI+3IDNk= +github.com/grpc-ecosystem/grpc-gateway/v2 v2.18.0/go.mod h1:TzP6duP4Py2pHLVPPQp42aoYI92+PCrVotyR5e8Vqlk= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= github.com/hashicorp/consul/api v1.3.0/go.mod h1:MmDNSzIMUjNpY/mQ398R4bk2FnqQLoPndWW5VkKPlCE= github.com/hashicorp/consul/sdk v0.1.1/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/consul/sdk v0.3.0/go.mod h1:VKf9jXwCTEY1QZP2MOLRhb5i/I/ssyNV1vwHyQBF0x8= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/go-cleanhttp v0.5.1/go.mod h1:JpRdi6/HCYpAwUzNwuwqhbovhLtngrth3wmdIIUrZ80= -github.com/hashicorp/go-hclog v0.9.2/go.mod h1:5CU+agLiy3J7N7QjHK5d05KxGsuXiQLrjA0H7acj2lQ= github.com/hashicorp/go-immutable-radix v1.0.0/go.mod h1:0y9vanUI8NX6FsYoO3zeMjhV/C5i9g4Q3DwcSNZ4P60= github.com/hashicorp/go-msgpack v0.5.3/go.mod h1:ahLV/dePpqEmjfWmKiqvPkv/twdG7iPBM1vqhUKIvfM= github.com/hashicorp/go-multierror v1.0.0/go.mod h1:dHtQlpGsu+cZNNAkkCN/P3hoUDHhCYQXV3UM06sGGrk= -github.com/hashicorp/go-retryablehttp v0.6.4/go.mod h1:vAew36LZh98gCBJNLH42IQ1ER/9wtLZZ8meHqQvEYWY= github.com/hashicorp/go-rootcerts v1.0.0/go.mod h1:K6zTfqpRlCUIjkwsN4Z+hiSfzSTQa6eBIzfwKfwNnHU= github.com/hashicorp/go-sockaddr v1.0.0/go.mod h1:7Xibr9yA9JjQq1JpNB2Vw7kxv8xerXegt+ozgdvDeDU= github.com/hashicorp/go-syslog v1.0.0/go.mod h1:qPfqrKkXGihmCqbJM2mZgkZGvKG1dFdvsLplgctolz4= @@ -693,23 +461,19 @@ github.com/hashicorp/mdns v1.0.0/go.mod h1:tL+uN++7HEJ6SQLQ2/p+z2pH24WQKWjBPkE0m github.com/hashicorp/memberlist v0.1.3/go.mod h1:ajVTdAv/9Im8oMAAj5G31PhhMCZJV2pPBoIllUwCN7I= github.com/hashicorp/serf v0.8.2/go.mod h1:6hOLApaqBFA1NXqRQAsxw9QxuDEvNxSQRwA/JwenrHc= github.com/hexops/gotextdiff v1.0.3 h1:gitA9+qJrrTCsiCl7+kh75nPqQt1cx4ZkudSTLoUqJM= +github.com/hexops/gotextdiff v1.0.3/go.mod h1:pSWU5MAI3yDq+fZBTazCSJysOMbxWL1BSow5/V2vxeg= github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= -github.com/huandu/xstrings v1.0.0/go.mod h1:4qWG/gcEcfX4z/mBDHJ++3ReCw9ibxbsNJbcucJdbSo= -github.com/huandu/xstrings v1.2.0/go.mod h1:DvyZB1rfVYsBIigL8HwpZgxHwXozlTgGqn63UyNX5k4= -github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= -github.com/huandu/xstrings v1.3.3 h1:/Gcsuc1x8JVbJ9/rlye4xZnVAbEkGauT8lbebqcQws4= github.com/huandu/xstrings v1.3.3/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= +github.com/huandu/xstrings v1.4.0 h1:D17IlohoQq4UcpqD7fDk80P7l+lwAmlFaBHgOipl2FU= +github.com/huandu/xstrings v1.4.0/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/hudl/fargo v1.3.0/go.mod h1:y3CKSmjA+wD2gak7sUSXTAoopbhU08POFhmITJgmKTg= github.com/iancoleman/strcase v0.2.0 h1:05I4QRnGpI0m37iZQRuskXh+w77mr6Z41lwQzuHLwW0= github.com/iancoleman/strcase v0.2.0/go.mod h1:iwCmte+B7n89clKwxIoIXy/HfoL7AsD47ZCWhYzw7ho= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= -github.com/imdario/mergo v0.3.4/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.8/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= -github.com/imdario/mergo v0.3.9/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= -github.com/imdario/mergo v0.3.12 h1:b6R2BslTbIEToALKP7LxUvijTsNI9TAe80pLWN2g/HU= -github.com/imdario/mergo v0.3.12/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= +github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk= +github.com/imdario/mergo v0.3.13/go.mod h1:4lJ1jqUDcsbIECGy0RUJAXNIhg+6ocWgb1ALK2O4oXg= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= @@ -768,75 +532,49 @@ github.com/jackc/puddle v1.1.3/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dv github.com/jackc/puddle v1.3.0/go.mod h1:m4B5Dj62Y0fbyuIc15OsIqK0+JU8nkqQjsgx7dvjSWk= github.com/jaevor/go-nanoid v1.3.0 h1:nD+iepesZS6pr3uOVf20vR9GdGgJW1HPaR46gtrxzkg= github.com/jaevor/go-nanoid v1.3.0/go.mod h1:SI+jFaPuddYkqkVQoNGHs81navCtH388TcrH0RqFKgY= -github.com/jarcoal/httpmock v1.0.5/go.mod h1:ATjnClrvW/3tijVmpL/va5Z3aAyGvqU3gCT8nX0Txik= -github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo= github.com/jedib0t/go-pretty/v6 v6.4.3 h1:2n9BZ0YQiXGESUSR+6FLg0WWWE80u+mIz35f0uHWcIE= github.com/jedib0t/go-pretty/v6 v6.4.3/go.mod h1:MgmISkTWDSFu0xOqiZ0mKNntMQ2mDgOcwOkwBEkMDJI= github.com/jeremija/gosubmit v0.2.7 h1:At0OhGCFGPXyjPYAsCchoBUhE099pcBXmsb4iZqROIc= +github.com/jeremija/gosubmit v0.2.7/go.mod h1:Ui+HS073lCFREXBbdfrJzMB57OI/bdxTiLtrDHHhFPI= github.com/jessevdk/go-flags v1.4.0/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI= github.com/jessevdk/go-flags v1.5.0 h1:1jKYvbxEjfUl0fmqTCOfonvskHHXMjBySTLW4y9LFvc= github.com/jessevdk/go-flags v1.5.0/go.mod h1:Fw0T6WPc1dYxT4mKEZRfG5kJhaTDP9pj1c2EWnYs/m4= -github.com/jhump/protoreflect v1.6.1/go.mod h1:RZQ/lnuN+zqeRVpQigTwO6o0AJUkxbnSnpuG7toUTG4= -github.com/jhump/protoreflect v1.8.2/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jhump/protoreflect v1.9.0/go.mod h1:7GcYQDdMU/O/BBrl/cX6PNHpXh6cenjd8pneu5yW7Tg= -github.com/jmespath/go-jmespath v0.0.0-20160202185014-0b12d6b521d8/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.0.0-20180206201540-c2b33e8439af/go.mod h1:Nht3zPeWKUH0NzdCt2Blrr5ys8VGpn0CEB0cQHVjt7k= github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9YPoQUg= github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo= -github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U= github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g= github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ= -github.com/joho/godotenv v1.3.0/go.mod h1:7hK45KPybAkOC6peb+G5yklZfMxEjkZhHbwpqxOKXbg= github.com/jonboulle/clockwork v0.1.0/go.mod h1:Ii8DK3G1RaLaWxj9trq07+26W01tbo22gdxWY5EU2bo= -github.com/jonboulle/clockwork v0.2.0/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= -github.com/jonboulle/clockwork v0.2.2/go.mod h1:Pkfl5aHPm1nk2H9h0bjmnJD/BcgbGXUBGnn1kMkgxc8= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= -github.com/jpillora/backoff v0.0.0-20180909062703-3050d21c67d7/go.mod h1:2iMrUgbbvHEiQClaW2NsSzMyGHqN+rDFqY705q49KG0= -github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4= github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= github.com/json-iterator/go v1.1.7/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.8/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= -github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= -github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU= github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk= github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU= -github.com/juju/ratelimit v1.0.1/go.mod h1:qapgC/Gy+xNh9UxzV13HGGl/6UXNN+ct+vwSgWNm/qk= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= -github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM= -github.com/karrick/godirwalk v1.8.0/go.mod h1:H5KPZjojv4lE+QYImBI8xVtrBRgYrIVsaRPx4tDPEn4= -github.com/karrick/godirwalk v1.10.3/go.mod h1:RoGL9dQei4vP9ilrpETWE8CLOZ1kiN0LhBygSwrAsHA= -github.com/karrick/godirwalk v1.16.1 h1:DynhcF+bztK8gooS0+NDJFrdNZjJ3gzVzC545UNA9iw= -github.com/karrick/godirwalk v1.16.1/go.mod h1:j4mkqPuvaLI8mp1DroR3P6ad7cyYd4c1qeJ3RV7ULlk= github.com/kataras/go-events v0.0.3 h1:o5YK53uURXtrlg7qE/vovxd/yKOJcLuFtPQbf1rYMC4= github.com/kataras/go-events v0.0.3/go.mod h1:bFBgtzwwzrag7kQmGuU1ZaVxhK2qseYPQomXoVEMsj4= -github.com/kevinburke/ssh_config v0.0.0-20190725054713-01f96b0aa0cd/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM= github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= -github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.10.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/compress v1.12.3/go.mod h1:8dP1Hq4DHOhN9w426knH3Rhby4rFm6D8eO+e+Dq5Gzg= github.com/klauspost/compress v1.13.6/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk= -github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= -github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= +github.com/klauspost/compress v1.17.0 h1:Rnbp4K9EjcDuVuHtd0dgA4qNuv9yKDYKK1ulpJwgrqM= +github.com/klauspost/compress v1.17.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/klauspost/cpuid/v2 v2.0.12/go.mod h1:g2LTdtYhdyuGPqyWyv7qRAmj1WBqxuObKfj5c0PQa7c= github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg= github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= -github.com/kortschak/utter v1.0.1/go.mod h1:vSmSjbyrlKjjsL71193LmzBOKgwePk9DH6uFaWHIInc= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= github.com/kr/pretty v0.2.0/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= -github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE= +github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk= github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= @@ -845,18 +583,12 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg= github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k= -github.com/leodido/go-urn v1.1.0/go.mod h1:+cyI34gQWZcE1eQU7NVgKkkzdXDQHr1dBMtdAPozLkw= -github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= -github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= -github.com/letsencrypt/pkcs11key/v4 v4.0.0/go.mod h1:EFUvBDay26dErnNb70Nd0/VW3tJiIbETBPTl9ATXQag= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.1.1/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= -github.com/lib/pq v1.8.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= -github.com/lib/pq v1.10.2 h1:AqzbZs4ZoCBp+GtejcpCpcxM3zlSMx29dXbUSeVtJb8= github.com/lib/pq v1.10.2/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw= +github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= github.com/libdns/libdns v0.2.1 h1:Wu59T7wSHRgtA0cfxC+n1c/e+O3upJGWytknkmFEDis= github.com/libdns/libdns v0.2.1/go.mod h1:yQCXzk1lEZmmCPa857bnk4TsOiqYasqpyOEeSObbb40= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= @@ -868,69 +600,49 @@ github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a h1:N9zuLhTvBSRt0g github.com/lufia/plan9stats v0.0.0-20230326075908-cb1d2100619a/go.mod h1:JKx41uQRwqlTZabZc+kILPrO/3jlKnQ2Z8b7YiVw5cE= github.com/lyft/protoc-gen-validate v0.0.13/go.mod h1:XbGvPuh87YZc5TdIa2/I4pLk0QoUACkjt2znoq26NVQ= github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= -github.com/magiconair/properties v1.8.1/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.8.5/go.mod h1:y3VJvCyxH9uVvJTWEGAELF3aiYNyPKd5NZ3oSwXrF60= github.com/mailru/easyjson v0.0.0-20190614124828-94de47d64c63/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN94OJF2b5HbByQZoLdCWB1Yqtg26g4irojpc= -github.com/mailru/easyjson v0.7.6/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= -github.com/markbates/errx v1.1.0 h1:QDFeR+UP95dO12JgW+tgi2UVfo0V8YBHiUIOaeBPiEI= -github.com/markbates/errx v1.1.0/go.mod h1:PLa46Oex9KNbVDZhKel8v1OT7hD5JZ2eI7AHhA0wswc= -github.com/markbates/oncer v0.0.0-20181203154359-bf2de49a0be2/go.mod h1:Ld9puTsIW75CHf65OeIOkyKbteujpZVXDpWK6YGZbxE= -github.com/markbates/oncer v1.0.0 h1:E83IaVAHygyndzPimgUYJjbshhDTALZyXxvk9FOlQRY= -github.com/markbates/oncer v1.0.0/go.mod h1:Z59JA581E9GP6w96jai+TGqafHPW+cPfRxz2aSZ0mcI= -github.com/markbates/safe v1.0.1 h1:yjZkbvRM6IzKj9tlu/zMJLS0n/V351OZWRnF3QfaUxI= -github.com/markbates/safe v1.0.1/go.mod h1:nAqgmRi7cY2nqMc92/bSEeQA+R4OheNU2T1kNSCBdG0= github.com/mastercactapus/proxyprotocol v0.0.4 h1:qSY75IZF30ZqIU9iW1ip3I7gTnm8wRAnGWqPxCBVgq0= github.com/mastercactapus/proxyprotocol v0.0.4/go.mod h1:X8FRVEDZz9FkrIoL4QYTBF4Ka4ELwTv0sah0/5NxCPw= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU= github.com/mattn/go-colorable v0.1.1/go.mod h1:FuOcm+DKB9mbwrcAfNl7/TZVBZ6rcnceauSikq3lYCQ= github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= -github.com/mattn/go-colorable v0.1.4/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE= github.com/mattn/go-colorable v0.1.6/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc= github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA= github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg= -github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc= github.com/mattn/go-isatty v0.0.3/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNxMWT7Zi4= github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s= github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ= -github.com/mattn/go-isatty v0.0.10/go.mod h1:qgIWMr58cqv1PHHyhnkY9lrL7etaEgOFcMEpPG5Rm84= -github.com/mattn/go-isatty v0.0.11/go.mod h1:PhnuNfih5lzO57/f3n+odYbM4JtupLOxQOAqxQCu2WE= github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94= github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= -github.com/mattn/go-isatty v0.0.18 h1:DOKFKCQ7FNG2L1rbrmstDN4QVRdS89Nkh85u68Uwp98= -github.com/mattn/go-isatty v0.0.18/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= +github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY= +github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y= github.com/mattn/go-localereader v0.0.1 h1:ygSAOl7ZXTx4RdPYinUpg6W99U8jWvWi9Ye2JC/oIi4= github.com/mattn/go-localereader v0.0.1/go.mod h1:8fBrzywKY7BI3czFoHkuzRoWE9C+EiG4R1k4Cjx5p88= -github.com/mattn/go-oci8 v0.1.1/go.mod h1:wjDx6Xm9q7dFtHJvIlrI99JytznLw5wQ4R+9mNXJwGI= github.com/mattn/go-runewidth v0.0.2/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.4/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU= -github.com/mattn/go-runewidth v0.0.6/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.7/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= -github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI= github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.12/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk= github.com/mattn/go-runewidth v0.0.13/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU= github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w= -github.com/mattn/go-shellwords v1.0.10/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= -github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/mattn/go-sqlite3 v1.14.6/go.mod h1:NyWgC/yNuGj7Q9rpYnZvas74GogHl5/Z4A/KQRfk6bU= -github.com/mattn/go-sqlite3 v1.14.15 h1:vfoHhTN1af61xCRSWzFIWzx2YskyMTwHLrExkBOjvxI= -github.com/mattn/go-sqlite3 v1.14.15/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= -github.com/mattn/go-zglob v0.0.1/go.mod h1:9fxibJccNxU2cnpIKLRRFA7zX7qhkJIQWBb449FYHOo= -github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y= +github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo= +github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d h1:5PJl274Y63IEHC+7izoQE9x6ikvDFZS2mDVS3drnohI= github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE= @@ -945,12 +657,9 @@ github.com/micromdm/scep/v2 v2.1.0/go.mod h1:BkF7TkPPhmgJAMtHfP+sFTKXmgzNJgLQlvv github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg= github.com/miekg/dns v1.1.55 h1:GoQ4hpsj0nFLYe+bWiCToyrBEJXkQfOOIvFGFy0lEgo= github.com/miekg/dns v1.1.55/go.mod h1:uInx36IzPl7FYnDcMeVWxj9byh7DutNykX4G9Sj60FY= -github.com/miekg/pkcs11 v1.0.2/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= -github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU= github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs= github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc= -github.com/mitchellh/cli v1.1.2/go.mod h1:6iaV0fGdElS6dPBx0EApTxHrcWvmJphyh2n8YBLPPZ4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= github.com/mitchellh/copystructure v1.2.0 h1:vpKXTN4ewci03Vljg/q9QvCGUDttBOGBIa15WveJJGw= github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= @@ -963,22 +672,16 @@ github.com/mitchellh/gox v0.4.0/go.mod h1:Sd9lOJ0+aimLBi73mGofS1ycjY8lL3uZM3JPS4 github.com/mitchellh/iochan v1.0.0/go.mod h1:JwYml1nuB7xOzsp52dPpHFffvOCDupsG0QubkSMEySY= github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= -github.com/mitchellh/mapstructure v1.3.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.1/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= -github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= -github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M= -github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= -github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826/go.mod h1:TaXosZuwdSHYgviHp1DAtfrULt5eUgsSMsZf+YrPgl8= github.com/montanaflynn/stats v0.0.0-20171201202039-1bf9dbcd8cbe/go.mod h1:wL8QJuTMNUDYhXwkmfOly8iTdp5TEcJFWZD2D7SIkUc= github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b h1:1XF24mVaiu7u+CFywTdcDo2ie1pzzhwjt6RHqzpMU34= github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho= @@ -994,10 +697,9 @@ github.com/muesli/termenv v0.13.0 h1:wK20DRpJdDX8b7Ek2QfhvqhRQFZ237RGRO0RQ/Iqdy0 github.com/muesli/termenv v0.13.0/go.mod h1:sP1+uffeLaEYpyOTb8pLCUctGcGLnoFjSn4YJK5e2bc= github.com/muhlemmer/gu v0.3.1 h1:7EAqmFrW7n3hETvuAdmFmn4hS8W+z3LgKtrnow+YzNM= github.com/muhlemmer/gu v0.3.1/go.mod h1:YHtHR+gxM+bKEIIs7Hmi9sPT3ZDUvTN/i88wQpZkrdM= +github.com/muhlemmer/httpforwarded v0.1.0 h1:x4DLrzXdliq8mprgUMR0olDvHGkou5BJsK/vWUetyzY= +github.com/muhlemmer/httpforwarded v0.1.0/go.mod h1:yo9czKedo2pdZhoXe+yDkGVbU0TJ0q9oQ90BVoDEtw0= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/mwitkow/go-proto-validators v0.0.0-20180403085117-0950a7990007/go.mod h1:m2XC9Qq0AlmmVksL6FktJCdTYyLk7V3fKyp0sl1yWQo= -github.com/mwitkow/go-proto-validators v0.2.0/go.mod h1:ZfA1hW+UH/2ZHOWvQ3HnQaU0DtnpXu850MZiy+YUgcc= github.com/nats-io/jwt v0.3.0/go.mod h1:fRYCDE99xlTsqUzISS1Bi75UBJ6ljOJQOAAu5VglpSg= github.com/nats-io/jwt v0.3.2/go.mod h1:/euKqTS1ZD+zzjYrY7pseZrTtWQSjujC7xjPc8wL6eU= github.com/nats-io/nats-server/v2 v2.1.2/go.mod h1:Afk+wRZqkMQs/p45uXdrVLuab3gwv3Z8C4HTBu8GD/k= @@ -1006,8 +708,6 @@ github.com/nats-io/nkeys v0.1.0/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxzi github.com/nats-io/nkeys v0.1.3/go.mod h1:xpnFELMwJABBLVhffcfd1MZx6VsNRFpEugbxziKVo7w= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= -github.com/nishanths/predeclared v0.0.0-20190419143655-18a43bb90ffc/go.mod h1:62PewwiQTlm/7Rj+cxVYqZvDIUc+JjZq6GHAC1fsObQ= -github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= @@ -1016,24 +716,20 @@ github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQ github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.1/go.mod h1:vsDQFd/mU46D+Z4whnwzcISnGGzXWMclvtLoiIKAKIo= -github.com/olekukonko/tablewriter v0.0.2/go.mod h1:rSAaSIOAGT9odnlyGlUfAJaoc5w2fSBUmeGDbRWPxyQ= -github.com/olekukonko/tablewriter v0.0.4/go.mod h1:zq6QwlOf5SlnkVbMSr5EoBv3636FWnp+qbPhuoO21uA= -github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY= github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= -github.com/onsi/ginkgo v1.10.3/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.15.0/go.mod h1:hF8qUzuuC8DJGygJH3726JnCZX4MYbRB8yFfISqnKUg= github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/ginkgo/v2 v2.9.5 h1:+6Hr4uxzP4XIUyAkg61dWBw8lb/gc4/X5luuxN/EC+Q= github.com/onsi/ginkgo/v2 v2.9.5/go.mod h1:tvAoo1QUJwNEU2ITftXTpR7R1RbCzoZUOs3RonqW57k= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= -github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48= github.com/onsi/gomega v1.27.6 h1:ENqfyGeS5AX/rlXDd/ETokDz93u0YufY1Pgxuy/PvWE= +github.com/onsi/gomega v1.27.6/go.mod h1:PIQNjfQwkP3aQAH7lf7j87O/5FiNr+ZR8+ipb+qQlhg= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/opentracing-contrib/go-observer v0.0.0-20170622124052-a52f23424492/go.mod h1:Ngi6UdF0k5OKD5t5wlmGhe/EDKPoUM3BXZSSfIuJbis= github.com/opentracing/basictracer-go v1.0.0/go.mod h1:QfBfYuafItcjQuMwinw9GhYKwFXS9KnPs5lxoYwgW74= @@ -1045,47 +741,40 @@ github.com/openzipkin-contrib/zipkin-go-opentracing v0.4.5/go.mod h1:/wsWhb9smxS github.com/openzipkin/zipkin-go v0.1.6/go.mod h1:QgAqvLzwWbR/WpD4A3cGpPtJrZXNIiJc5AZX7/PBEpw= github.com/openzipkin/zipkin-go v0.2.1/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= github.com/openzipkin/zipkin-go v0.2.2/go.mod h1:NaW6tEwdmWMaCDZzg8sh+IBNOxHMPnhQw8ySjnjRyN4= -github.com/openziti/channel/v2 v2.0.91 h1:HKD7uniGOfiQfbzaP1AJ0JGvls7oejC+fMj6gLJOlW8= -github.com/openziti/channel/v2 v2.0.91/go.mod h1:2uJzEV3pX4soPPl07l8ScONNGuSQTQzUqAYkgATA3Rk= -github.com/openziti/edge-api v0.25.33 h1:5XaQvUKeG8ZZ3WLhr/8xqZn56p53ZxWmFooR6I/xrvQ= -github.com/openziti/edge-api v0.25.33/go.mod h1:T+g7OHoj2KQi3SrSdgbbFoQdknLrehEIlZRGxpTYObI= +github.com/openziti/channel/v2 v2.0.119 h1:stfSrnDqoTi78LMvQA3+NSivHjQnRrYKrgij5NaOENI= +github.com/openziti/channel/v2 v2.0.119/go.mod h1:lSRJwqmbkE34DgXYEmUhVCzwcQcx65vZGE8nuBNK458= +github.com/openziti/edge-api v0.26.10 h1:LEDuJHZsExi0PBVO9iVuIdZWJ7eFo/i4TJhXoSFmfOU= +github.com/openziti/edge-api v0.26.10/go.mod h1:FQLjav9AfqxQYSL0xKPDZ/JWTSZXApkk7jM2/iczGXM= github.com/openziti/fabric v0.23.26 h1:wEPNh8m3qcq9sw1Zmg5YgFZw1FovsKGu53rRf8qzI7A= github.com/openziti/fabric v0.23.26/go.mod h1:0MtkZqIHs3cJPP4DB88xsWUemDm77nN/GvWBBfq7peo= -github.com/openziti/foundation/v2 v2.0.29 h1:E63p5/esqOJ/OSMePR3fKYHb3Wq2BR4PLkDFynESij8= -github.com/openziti/foundation/v2 v2.0.29/go.mod h1:MpXSCSn4MABvtIXzfTBFqhK5pNsNXHWnR8xxVrfxn0g= -github.com/openziti/identity v1.0.60 h1:6gvBXY9J6F7SbuksdxsUA1t1WmtsFfY61Oqm/00ijGU= -github.com/openziti/identity v1.0.60/go.mod h1:pUfQ1Rf6TJvpBULXKPAO4014Qd6g+uf6V/vqjUscipU= -github.com/openziti/metrics v1.2.31 h1:nEO9FAqCnpfuvxGYfy9/W46p2SaEcZ6vlRyz4fcipoY= -github.com/openziti/metrics v1.2.31/go.mod h1:SU2w3WW36O+ocVy+ka2UHA0cweTg3T8CQiqz4ZT4Umc= -github.com/openziti/sdk-golang v0.20.92 h1:dLN9MwM4oVfczad5wYtA2LMPa9xVZXIeSQM8lOu7uB0= -github.com/openziti/sdk-golang v0.20.92/go.mod h1:SRVFZTo8f5JmwytLi3gBhjLj9epzfCla2S3zI79qP90= -github.com/openziti/secretstream v0.1.10 h1:aLheoP6vVAv96mItwkXxWr9Ym0tTooJ5o9H1j2fAh04= -github.com/openziti/secretstream v0.1.10/go.mod h1:HrS6P9G0jjHNHuKESMaybNdxBHjD1b0SzxSi0rDzshY= +github.com/openziti/foundation/v2 v2.0.37 h1:7pa4vWrlwllEoLXaK2rx91AffLQJ8k5pvc92oWANavA= +github.com/openziti/foundation/v2 v2.0.37/go.mod h1:2NxzCnJbMw35U9RrFcdEaiXdxIMfBHOUNPngpyhvKeY= +github.com/openziti/identity v1.0.70 h1:JNwtJHmIS0DcXookm2xuXyh4z92T1O21GQvuO8PmHWs= +github.com/openziti/identity v1.0.70/go.mod h1:jsKBL4G1BsmDSCIfhK4jha5B3Sevgy1jyZq0GtFKhSk= +github.com/openziti/metrics v1.2.45 h1:+3zqszLWyFdTgzbsQD90V0yJcC9Ek77qKaIGMQXkAXs= +github.com/openziti/metrics v1.2.45/go.mod h1:g6CgAEbFes2UtdfGrsR8AKkuoBVL5dkU61843uQvllM= +github.com/openziti/sdk-golang v0.22.28 h1:s159CT42dXug4GiJiN/kM6/ol+N2LFZ2tUk6bOpbgiI= +github.com/openziti/sdk-golang v0.22.28/go.mod h1:BLaLvcLqAgf3JFoDPWLTj3j3X5rndo6ZejdDdkMlihQ= +github.com/openziti/secretstream v0.1.16 h1:tVanF7OpJL1MJ1gvWaRlR2i+kAbrGsxr3q6EXFOS08U= +github.com/openziti/secretstream v0.1.16/go.mod h1:bvjGBUW/0e5MzD5S3FW3rhGASRNWAi+kTkTENZ9qRDE= github.com/openziti/storage v0.2.6 h1:/pbIRzDwrczMWRVkN75PfwAXFbArplIqhpRsUrsUOBc= github.com/openziti/storage v0.2.6/go.mod h1:JnjCofrnPcajwn6VIB2CgI7pVVUFBL7evbezIsQ4AgA= -github.com/openziti/transport/v2 v2.0.99 h1:+/DYNzaUrzSSaoKEBNFumXJsXMDEUNiWbWZfX48Z2vc= -github.com/openziti/transport/v2 v2.0.99/go.mod h1:bV9XKtxnmqW8crReZB2z+cJhSNLl6EsyoouTkyUY8mk= +github.com/openziti/transport/v2 v2.0.122 h1:XWwZ6JcSO1nvbZgfp6kdf8aR5LEEN343mpZlhSihirk= +github.com/openziti/transport/v2 v2.0.122/go.mod h1:07Ak2jMsyZmi7/ECxGNfMXk8cF1Vj5qtKj90FoAnK8A= github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c= github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM= -github.com/otiai10/copy v1.2.0/go.mod h1:rrF5dJ5F0t/EWSYODDu4j9/vEeYHMkc8jt0zJChqQWw= -github.com/otiai10/curr v0.0.0-20150429015615-9b4961190c95/go.mod h1:9qAhocn7zKJG+0mI8eUu6xqkFDYS2kb2saOteoSB3cE= -github.com/otiai10/curr v1.0.0/go.mod h1:LskTG5wDwr8Rs+nNQ+1LlxRjAtTZZjtJW4rMXl6j4vs= -github.com/otiai10/mint v1.3.0/go.mod h1:F5AjcsTsWUqX+Na9fpHb52P8pcRX2CI6A3ctIT91xUo= -github.com/otiai10/mint v1.3.1/go.mod h1:/yxELlJQ0ufhjUwhshSj+wFjZ78CnZ48/1wtmBH1OTc= github.com/pact-foundation/pact-go v1.0.4/go.mod h1:uExwJY4kCzNPcHRj+hCR/HBbOOIwwtUjcrb0b5/5kLM= github.com/parallaxsecond/parsec-client-go v0.0.0-20221025095442-f0a77d263cf9 h1:mOvehYivJ4Aqu2CPe3D3lv8jhqOI9/1o0THxJHBE0qw= github.com/parallaxsecond/parsec-client-go v0.0.0-20221025095442-f0a77d263cf9/go.mod h1:gLH27qo/dvMhLTVVyMELpe3Tut7sOfkiDg7ZpeqKwsw= github.com/pascaldekloe/goe v0.0.0-20180627143212-57f6aae5913c/go.mod h1:lzWF7FIEvWOWxwDKqyGYQf6ZUaNfKdP144TG7ZOy1lc= github.com/pborman/uuid v1.2.0/go.mod h1:X/NO0urCmaxf9VXbdlT7C2Yzkj2IKimNn4k+gtPdI/k= -github.com/pelletier/go-buffruneio v0.2.0/go.mod h1:JkE26KsDizTr40EUHkXVtNPvgGtbSNq5BcowyYOWdKo= github.com/pelletier/go-toml v1.2.0/go.mod h1:5z9KED0ma1S8pY6P1sdut58dfprrGBbd/94hg7ilaic= -github.com/pelletier/go-toml v1.7.0/go.mod h1:vwGMzjaWMwyfHwgIBhI2YUM4fB6nL6lVAvS1LBMMhTE= github.com/pelletier/go-toml v1.9.3/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/performancecopilot/speed v3.0.0+incompatible/go.mod h1:/CLtqpZ5gBg1M9iaPbIdPPGyKcA8hKdoy6hAWba7Yac= github.com/peterbourgon/diskv/v3 v3.0.1 h1:x06SQA46+PKIUftmEujdwSEpIx8kR+M9eLYsUxeYveU= +github.com/peterbourgon/diskv/v3 v3.0.1/go.mod h1:kJ5Ny7vLdARGU3WUuy6uzO6T0nb/2gWcT1JiBvRmb5o= github.com/pierrec/lz4 v1.0.2-0.20190131084431-473cd7ce01a1/go.mod h1:3/3N9NVKO0jef7pBehbT1qWhCMrIgbYNnFAZCqQ5LRc= github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY= -github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA= github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= @@ -1093,69 +782,44 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE github.com/pkg/profile v1.2.1/go.mod h1:hJw3o1OdXxsrSjjVksARp5W95eeEaEfptyVZyv6JUPA= github.com/pkg/profile v1.6.0/go.mod h1:qBsxPvzyUincmltOk6iyRVxHYg4adc0OFOv72ZdLa18= github.com/pkg/sftp v1.10.1/go.mod h1:lYOWFsE0bwd1+KfKJaKeuokY15vzFx25BLbzYYoAxZI= -github.com/pmezard/go-difflib v0.0.0-20151028094244-d8ed2627bdf0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/posener/complete v1.1.1/go.mod h1:em0nMJCgc9GFtwrmVmEMR/ZL6WyhyjMBndrE9hABlRI= github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b h1:0LFwY6Q3gMACTjAbMZBjXAqTOzOwFaj2Ld6cjeQ7Rig= github.com/power-devops/perfstat v0.0.0-20221212215047-62379fc7944b/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1 h1:oL4IBbcqwhhNWh31bjOX8C/OCy0zs9906d/VUru+bqg= -github.com/poy/onpar v0.0.0-20190519213022-ee068f8ea4d1/go.mod h1:nSbFQvMj97ZyhFRSJYtut+msi4sOY6zJDGCdSc+/rZU= +github.com/poy/onpar v1.1.2 h1:QaNrNiZx0+Nar5dLgTVp5mXkyoVFIbepjyEoGSnhbAY= +github.com/poy/onpar v1.1.2/go.mod h1:6X8FLNoxyr9kkmnlqpK6LSoiOtrO6MICtWwEuWkLjzg= github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= github.com/prometheus/client_golang v0.9.3-0.20190127221311-3c4408c8b829/go.mod h1:p2iRAGwDERtqlqzRXnrOVns+ignqQo//hLXqYxZYVNs= -github.com/prometheus/client_golang v0.9.3/go.mod h1:/TN21ttK/J9q6uSwhBd54HahCDft0ttaMvbicHlPoso= github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= github.com/prometheus/client_golang v1.3.0/go.mod h1:hJaj2vgQTGQmVCsAACORcieXFeDPbaTKGT+JTgUa3og= -github.com/prometheus/client_golang v1.5.1/go.mod h1:e9GMxYsXl05ICDXkRhurwBS4Q3OK1iX/F2sw+iXX5zU= -github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M= -github.com/prometheus/client_golang v1.10.0/go.mod h1:WJM3cc3yu7XKBKa/I8WeZm+V3eltZnBwfENSU7mdogU= -github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0= -github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY= -github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw= -github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y= +github.com/prometheus/client_golang v1.15.1 h1:8tXpTmJbyH5lydzFPoxSIJ0J46jdh3tylbvM1xCv0LI= +github.com/prometheus/client_golang v1.15.1/go.mod h1:e9yaBhRPU2pPNsZwE+JdQl0KEt1N9XgF6zxWmaC0xOk= github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190115171406-56726106282f/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= github.com/prometheus/client_model v0.1.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= -github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4= -github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w= -github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro= +github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY= +github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU= github.com/prometheus/common v0.2.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= -github.com/prometheus/common v0.4.0/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= -github.com/prometheus/common v0.9.1/go.mod h1:yhUN8i9wzaXS3w1O07YhxHEBxD+W35wd8bs7vj7HSQ4= -github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo= -github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= -github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= -github.com/prometheus/common v0.37.0 h1:ccBbHCgIiT9uSoFY0vX8H3zsNR5eLt17/RQLUvn8pXE= -github.com/prometheus/common v0.37.0/go.mod h1:phzohg0JFMnBEFGxTDbfu3QyL5GI8gTQJFhYO5B3mfA= +github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM= +github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= -github.com/prometheus/procfs v0.0.0-20190507164030-5867b95ac084/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= github.com/prometheus/procfs v0.0.8/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+GxbHq6oeK9A= -github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= -github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.8.0 h1:ODq8ZFEaYeCaZOJlZZdJA2AbQR98dSHSM1KW/You5mo= -github.com/prometheus/procfs v0.8.0/go.mod h1:z7EfXMXOkbkqb9IINtpCn86r/to3BnA0uaxHdg830/4= -github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= -github.com/pseudomuto/protoc-gen-doc v1.3.2/go.mod h1:y5+P6n3iGrbKG+9O04V5ld71in3v/bX88wUwgt+U8EA= -github.com/pseudomuto/protoc-gen-doc v1.4.1/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= -github.com/pseudomuto/protoc-gen-doc v1.5.0/go.mod h1:exDTOVwqpp30eV/EDPFLZy3Pwr2sn6hBC1WIYH/UbIg= -github.com/pseudomuto/protokit v0.2.0/go.mod h1:2PdH30hxVHsup8KpBTOXTBeMVhJZVio3Q8ViKSAXT0Q= +github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI= +github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY= github.com/quic-go/qpack v0.4.0 h1:Cr9BXA1sQS2SmDUWjSofMPNKmvF6IiIfDRmgU0w1ZCo= github.com/quic-go/qpack v0.4.0/go.mod h1:UZVnYIfi5GRk+zI9UMaCPsmZ2xKJP7XBUvVyT1Knj9A= -github.com/quic-go/qtls-go1-20 v0.3.2 h1:rRgN3WfnKbyik4dBV8A6girlJVxGand/d+jVKbQq5GI= -github.com/quic-go/qtls-go1-20 v0.3.2/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= -github.com/quic-go/quic-go v0.38.0 h1:T45lASr5q/TrVwt+jrVccmqHhPL2XuSyoCLVCpfOSLc= -github.com/quic-go/quic-go v0.38.0/go.mod h1:MPCuRq7KBK2hNcfKj/1iD1BGuN3eAYMeNxp3T42LRUg= +github.com/quic-go/qtls-go1-20 v0.4.1 h1:D33340mCNDAIKBqXuAvexTNMUByrYmFYVfKfDN5nfFs= +github.com/quic-go/qtls-go1-20 v0.4.1/go.mod h1:X9Nh97ZL80Z+bX/gUXMbipO6OxdiDi58b/fMC9mAL+k= +github.com/quic-go/quic-go v0.40.0 h1:GYd1iznlKm7dpHD7pOVpUvItgMPo/jrMgDWZhMCecqw= +github.com/quic-go/quic-go v0.40.0/go.mod h1:PeN7kuVJ4xZbxSv/4OX6S1USOX8MJvydwpTx31vx60c= github.com/rabbitmq/amqp091-go v1.8.1 h1:RejT1SBUim5doqcL6s7iN6SBmsQqyTgXb1xMlH0h1hA= github.com/rabbitmq/amqp091-go v1.8.1/go.mod h1:+jPrT9iY2eLjRaMSRHUhc3z14E/l85kv/f+6luSD3pc= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1166,23 +830,19 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis= github.com/rivo/uniseg v0.4.4/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/rogpeppe/fastuuid v0.0.0-20150106093220-6724a57986af/go.mod h1:XWv6SoW27p1b0cqNHllgS5HIMJraePCO15w5zCzIWYg= -github.com/rogpeppe/fastuuid v1.1.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ= -github.com/rogpeppe/go-internal v1.1.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.2.2/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= -github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE= github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M= -github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU= -github.com/rs/cors v1.8.0/go.mod h1:EBwu+T5AvHOcXwvZIkQFjUN6s8Czyqw12GL/Y0tUyRM= -github.com/rs/cors v1.9.0 h1:l9HGsTsHJcvW14Nk7J9KFz8bzeAWXn3CG6bgt7LsrAE= +github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA= +github.com/rs/cors v1.10.1 h1:L0uuZVXIKlI1SShY2nhFfo44TYvDPQ1w4oFkUJNfhyo= +github.com/rs/cors v1.10.1/go.mod h1:XyqrcTp5zjWr1wsJ8PIRZssZ8b/WMcMf71DJnit4EMU= github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ= github.com/rs/xid v1.5.0 h1:mKX4bl4iPYJtEIxp6CYiUuLQ/8DYMoz0PUdtGgMFRVc= github.com/rs/xid v1.5.0/go.mod h1:trrq9SKmegXys3aeAKXMUTdJsYXVwGY3RLcfgqegfbg= github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU= github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc= -github.com/rubenv/sql-migrate v1.1.2 h1:9M6oj4e//owVVHYrFISmY9LBRw6gzkCNmD9MV36tZeQ= -github.com/rubenv/sql-migrate v1.1.2/go.mod h1:/7TZymwxN8VWumcIxw1jjHEcR1djpdkMHQPT4FWdnbQ= +github.com/rubenv/sql-migrate v1.6.0 h1:IZpcTlAx/VKXphWEpwWJ7BaMq05tYtE80zYz+8a5Il8= +github.com/rubenv/sql-migrate v1.6.0/go.mod h1:m3ilnKP7sNb4eYkLsp6cGdPOl4OBcXM6rcbzU+Oqc5k= github.com/russross/blackfriday v1.5.2/go.mod h1:JO/DiYxRf+HjHt06OyowR9PTA263kcR/rfWxYHBV53g= github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= @@ -1190,29 +850,25 @@ github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/ryanuber/columnize v0.0.0-20160712163229-9b3edd62028f/go.mod h1:sm1tb6uqfes/u+d4ooFouqFdy9/2g9QGwK3SQygK0Ts= github.com/sahilm/fuzzy v0.1.0/go.mod h1:VFvziUEIMCrT6A6tw2RFIXPXXmzXbOsSHF0DOI8ZK9Y= github.com/samuel/go-zookeeper v0.0.0-20190923202752-2cc03de413da/go.mod h1:gi+0XIa01GRL2eRQVjQkKGqKF3SF9vZR/HnPullcV2E= -github.com/sassoftware/go-rpmutils v0.0.0-20190420191620-a8f1baeba37b/go.mod h1:am+Fp8Bt506lA3Rk3QCmSqmYmLMnPDhdDUcosQCAx+I= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/jsonstore v1.1.0 h1:WZBDjgezFS34CHI+myb4s8GGpir3UMpy7vWoCeO0n6E= +github.com/schollz/jsonstore v1.1.0/go.mod h1:15c6+9guw8vDRyozGjN3FoILt0wpruJk9Pi66vjaZfg= github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc= -github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo= -github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/sergi/go-diff v1.2.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= -github.com/shirou/gopsutil/v3 v3.23.7 h1:C+fHO8hfIppoJ1WdsVm1RoI0RwXoNdfTK7yWXV0wVj4= -github.com/shirou/gopsutil/v3 v3.23.7/go.mod h1:c4gnmoRC0hQuaLqvxnx1//VXQ0Ms/X9UnJF8pddY5z4= +github.com/shirou/gopsutil/v3 v3.24.1 h1:R3t6ondCEvmARp3wxODhXMTLC/klMa87h2PHUw5m7QI= +github.com/shirou/gopsutil/v3 v3.24.1/go.mod h1:UU7a2MSBQa+kW1uuDq8DeEBS8kmrnQwsv2b5O513rwU= github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM= github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ= github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU= github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k= github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4= -github.com/shopspring/decimal v1.2.0 h1:abSATXmQEYyShuxI4/vyW3tV1MrKAJzCZ/0zLUXYbsQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= +github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8= +github.com/shopspring/decimal v1.3.1/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo= github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc= github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= -github.com/sirupsen/logrus v1.4.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q= github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= -github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88= github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.8.1/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0= github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ= @@ -1220,22 +876,18 @@ github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVs github.com/slackhq/nebula v1.6.1 h1:/OCTR3abj0Sbf2nGoLUrdDXImrCv0ZVFpVPP5qa0DsM= github.com/slackhq/nebula v1.6.1/go.mod h1:UmkqnXe4O53QwToSl/gG7sM4BroQwAB7dd4hUaT6MlI= github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262 h1:unQFBIznI+VYD1/1fApl1A+9VcBk+9dcqGfnePY87LY= -github.com/smallstep/certificates v0.24.3-rc.5 h1:l9N7NmFqW5it5UcDtbyZ4CrrvYYiHRM7Dj7Mk+YC1Io= -github.com/smallstep/certificates v0.24.3-rc.5/go.mod h1:buhMLsuk9tp7JC1uHeN2sYQTXH1OzGn55erpzUIiOGo= -github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738 h1:h+cZgVniTaE0uuRMdxTThLaJeuxsv4aas6oStz6f5VQ= -github.com/smallstep/go-attestation v0.4.4-0.20230509120429-e17291421738/go.mod h1:mk2hyNbyai1oon+ilW9t42BuBVw7ee8elDdgrPq4394= +github.com/smallstep/assert v0.0.0-20200723003110-82e2b9b3b262/go.mod h1:MyOHs9Po2fbM1LHej6sBUT8ozbxmMOFG+E+rx/GSGuc= +github.com/smallstep/certificates v0.25.0 h1:WWihtjQ7SprnRxDV44mBp8t5SMsNO5EWsQaEwy1rgFg= +github.com/smallstep/certificates v0.25.0/go.mod h1:thJmekMKUplKYip+la99Lk4IwQej/oVH/zS9PVMagEE= +github.com/smallstep/go-attestation v0.4.4-0.20230627102604-cf579e53cbd2 h1:UIAS8DTWkeclraEGH2aiJPyNPu16VbT41w4JoBlyFfU= +github.com/smallstep/go-attestation v0.4.4-0.20230627102604-cf579e53cbd2/go.mod h1:vNAduivU014fubg6ewygkAvQC0IQVXqdc8vaGl/0er4= github.com/smallstep/nosql v0.6.0 h1:ur7ysI8s9st0cMXnTvB8tA3+x5Eifmkb6hl4uqNV5jc= github.com/smallstep/nosql v0.6.0/go.mod h1:jOXwLtockXORUPPZ2MCUcIkGR6w0cN1QGZniY9DITQA= github.com/smallstep/truststore v0.12.1 h1:guLUKkc1UlsXeS3t6BuVMa4leOOpdiv02PCRTiy1WdY= github.com/smallstep/truststore v0.12.1/go.mod h1:M4mebeNy28KusGX3lJxpLARIktLcyqBOrj3ZiZ46pqw= github.com/smartystreets/assertions v0.0.0-20180927180507-b2de0cb4f26d/go.mod h1:OnSkiWE9lh6wB0YB77sQom3nweQdgAjqCqsofrRNTgc= -github.com/smartystreets/assertions v1.0.0/go.mod h1:kHHU4qYBaI3q23Pp3VPrmWhuIUrLW/7eUrw0BU5VaoM= -github.com/smartystreets/go-aws-auth v0.0.0-20180515143844-0c1422d1fdb9/go.mod h1:SnhjPscd9TpLiy1LpzGSKh3bXCfxxXuqd9xmQJy3slM= github.com/smartystreets/goconvey v1.6.4/go.mod h1:syvi0/a8iFYH4r/RixwvyeAJjdLS9QV7WQ/tjFTllLA= -github.com/smartystreets/gunit v1.0.0/go.mod h1:qwPWnhz6pn0NnRBP++URONOVyNkPyr4SauJk4cUOwJs= github.com/soheilhy/cmux v0.1.4/go.mod h1:IM3LyeVVIOuxMH7sFAkER9+bJ4dT7Ms6E4xg4kGIyLM= -github.com/soheilhy/cmux v0.1.5-0.20210205191134-5ec6847320e5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= -github.com/soheilhy/cmux v0.1.5/go.mod h1:T7TcVDs9LWfQgPlPsdngu6I6QIoyIFZDDC6sNE1GqG0= github.com/sony/gobreaker v0.4.1/go.mod h1:ZKptC7FHNvhBz7dN2LGjPVBz2sZJmc0/PkyDJOjmxWY= github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA= github.com/spaolacci/murmur3 v1.1.0 h1:7c1g84S4BPRrfL5Xrdp6fOJ206sU9y293DDHaoy0bLI= @@ -1246,16 +898,13 @@ github.com/spf13/afero v1.1.2/go.mod h1:j4pytiNVoe2o6bmDsKpLACNPDBIoEAkihy7loJ1B github.com/spf13/afero v1.6.0/go.mod h1:Ai8FlHk4v/PARR026UzYexafAt9roJ7LcLMAmO6Z93I= github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= github.com/spf13/cast v1.3.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= -github.com/spf13/cast v1.4.1 h1:s0hze+J0196ZfEMTs80N7UlFt0BDuQ7Q+JDnHiMWKdA= -github.com/spf13/cast v1.4.1/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE= +github.com/spf13/cast v1.5.0 h1:rj3WzYc11XZaIZMPKmwP96zkFEnnAmV8s6XbB2aY32w= +github.com/spf13/cast v1.5.0/go.mod h1:SpXXQ5YoyJw6s3/6cMTQuxvgRl3PCJiyaX9p6b155UU= github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.5/go.mod h1:3K3wKZymM7VvHMDS9+Akkh4K60UwM26emMESw8tLCHU= -github.com/spf13/cobra v1.0.0/go.mod h1:/6GTrnGXV9HjY+aR4k0oJ5tcvakLuG6EuKReYlHNrgE= -github.com/spf13/cobra v1.1.1/go.mod h1:WnodtKOvamDL/PwE2M4iKs8aMDBZ5Q5klgD3qfVJQMI= -github.com/spf13/cobra v1.1.3/go.mod h1:pGADOWyqRD/YMrPZigI/zbliZ2wVD/23d+is3pSWzOo= github.com/spf13/cobra v1.2.1/go.mod h1:ExllRjgxM/piMAM+3tAZvg8fsklGAf3tPfi+i8t68Nk= -github.com/spf13/cobra v1.7.0 h1:hyqWnYt1ZQShIddO5kBpj3vu05/++x6tJ6dg8EC572I= -github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0= +github.com/spf13/cobra v1.8.0 h1:7aJaZx1B85qltLMc546zn58BxxfZdR/W22ej9CFoEf0= +github.com/spf13/cobra v1.8.0/go.mod h1:WXLWApfZ71AjXPya3WOlMsY9yMs7YeiHhFVlvLyhcho= github.com/spf13/jwalterweatherman v1.0.0/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v1.1.0/go.mod h1:aNWZUN0dPAAO/Ljvb5BEdw96iTZ0EXowPYD95IqWIGo= github.com/spf13/pflag v1.0.1/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= @@ -1263,10 +912,7 @@ github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnIn github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/viper v1.3.2/go.mod h1:ZiWeW+zYFKm7srdB9IoDzzZXaJaI5eL9QjNiN/DMA2s= -github.com/spf13/viper v1.4.0/go.mod h1:PTJ7Z/lr49W6bUbkmS1V3by4uWynFiR9p7+dSq/yZzE= -github.com/spf13/viper v1.7.0/go.mod h1:8WkrPz2fc9jxqZNCJI/76HCieCp4Q8HaLFoCha5qpdg= github.com/spf13/viper v1.8.1/go.mod h1:o0Pch8wJ9BVSWGQMbra6iw0oQ5oktSIBaujf1rJH9Ns= -github.com/src-d/gcfg v1.4.0/go.mod h1:p/UMsR43ujA89BJY9duynAwIpvqEujIH/jFlfL7jWoI= github.com/stoewer/go-strcase v1.2.0 h1:Z2iHWqGXH00XYgqDmNgQbIBxf3wrNq0F3feEy0ainaU= github.com/stoewer/go-strcase v1.2.0/go.mod h1:IBiWB2sKIp3wVVQ3Y035++gc+knqhUQag1KpM8ahLw8= github.com/streadway/amqp v0.0.0-20190404075320-75d898a42a94/go.mod h1:AZpEONHx3DKn8O/DFsRAY58/XVQiIPMTMB1SddzLXVw= @@ -1277,7 +923,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= -github.com/stretchr/testify v0.0.0-20170130113145-4d4bfba8f1d1/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= @@ -1291,33 +936,16 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= github.com/subosito/gotenv v1.2.0/go.mod h1:N0PQaV/YGNqwC0u51sEeR/aUtSLEXKX9iv69rRypqCw= -github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad h1:JEOo9j4RzDPBJFTU9YZ/QPkLtfV8+6PbZFFOSUx5VP4= -github.com/tailscale/tscert v0.0.0-20230509043813-4e9cb4f2b4ad/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU= -github.com/tidwall/pretty v1.0.0/go.mod h1:XNkn88O1ChpSDQmQeStsy+sBenx6DDtFZJxhVysOjyk= -github.com/tj/assert v0.0.0-20171129193455-018094318fb0/go.mod h1:mZ9/Rh9oLWpLLDRpvE+3b7gP/C2YyLFYxNmcLnPTMe0= -github.com/tj/go-elastic v0.0.0-20171221160941-36157cbbebc2/go.mod h1:WjeM0Oo1eNAjXGDx2yma7uG2XoyRZTq1uv3M/o7imD0= -github.com/tj/go-kinesis v0.0.0-20171128231115-08b17f58cb1b/go.mod h1:/yhzCV0xPfx6jb1bBgRFjl5lytqVqZXEaeqWP8lTEao= -github.com/tj/go-spin v1.1.0/go.mod h1:Mg1mzmePZm4dva8Qz60H2lHwmJ2loum4VIrLgVnKwh4= -github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM= -github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI= -github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms= -github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4= +github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046 h1:8rUlviSVOEe7TMk7W0gIPrW8MqEzYfZHpsNWSf8s2vg= +github.com/tailscale/tscert v0.0.0-20230806124524-28a91b69a046/go.mod h1:kNGUQ3VESx3VZwRwA9MSCUegIl6+saPL8Noq82ozCaU= +github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU= +github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI= +github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk= +github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY= github.com/tmc/grpc-websocket-proxy v0.0.0-20170815181823-89b8d40f7ca8/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20190109142713-0ad062ec5ee5/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20200427203606-3cfed13b9966/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1:ncp9v5uamzpCO7NfCPTXjqaC+bZgJeR0sMTm6dMHP7U= -github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4= -github.com/ugorji/go v1.1.4/go.mod h1:uQMGLiO92mf5W77hV/PUCpI3pbzQx3CRekS0kk+RGrc= -github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= -github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= github.com/ugorji/go/codec v0.0.0-20181204163529-d75b2dcb6bc8/go.mod h1:VFNgLljTbGfSG7qAOspJ7OScBnGdDN/yBr0sguwnwf0= -github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= -github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= -github.com/ulikunitz/xz v0.5.6/go.mod h1:2bypXElzHzzJZwzH67Y6wb67pO62Rzfn7BSiF4ABRW8= -github.com/ulikunitz/xz v0.5.7/go.mod h1:nbz6k7qbPmH4IRqmfOplQw/tblSgqTqBwxkY0oWt/14= github.com/urfave/cli v1.20.0/go.mod h1:70zkFmudgCuE/ngEzBv17Jvp/497gISqfk5gWijbERA= github.com/urfave/cli v1.22.1/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= -github.com/urfave/cli v1.22.4/go.mod h1:Gos4lmkARVdJ6EkW0WaNv/tZAAMe9V7XWyB60NtXRu0= github.com/urfave/cli v1.22.14 h1:ebbhrRiGK2i4naQJr+1Xj92HXZCrK7MsyTS/ob3HnAk= github.com/urfave/cli v1.22.14/go.mod h1:X0eDS6pD6Exaclxm99NJ3FiCDRED7vIHpx2mDOHLvkA= github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc= @@ -1327,16 +955,9 @@ github.com/wneessen/go-mail v0.2.7 h1:4gj1flZjm05htmVj8AS6TbYXLQBYabzuQMmu8pZc/J github.com/wneessen/go-mail v0.2.7/go.mod h1:m25lkU2GYQnlVr6tdwK533/UXxo57V0kLOjaFYmub0E= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= -github.com/xanzy/go-gitlab v0.31.0/go.mod h1:sPLojNBn68fMUWSxIJtdVVIP8uSBYqesTfDUseX11Ug= -github.com/xanzy/ssh-agent v0.2.1/go.mod h1:mLlQY/MoOhWBj+gOGMQkOeiEvkx+8pJSI+0Bx9h2kr4= github.com/xdg-go/pbkdf2 v1.0.0/go.mod h1:jrpuAogTd400dnrH08LKmI/xc1MbPOebTwRqcT5RDeI= -github.com/xdg-go/scram v1.0.2/go.mod h1:1WAq6h33pAW+iRreB34OORO2Nf7qel3VV3fjBj+hCSs= -github.com/xdg-go/scram v1.1.1/go.mod h1:RaEWvsqvNKKvBPvcKeFjrG2cJqOkHTiyTpzz23ni57g= github.com/xdg-go/scram v1.1.2/go.mod h1:RT/sEzTbU5y00aCK8UOx6R7YryM0iF1N2MOmC3kKLN4= -github.com/xdg-go/stringprep v1.0.2/go.mod h1:8F9zXuvzgwmyT5DUm4GUfZGDdT3W+LCvS6+da4O5kxM= -github.com/xdg-go/stringprep v1.0.3/go.mod h1:W3f5j4i+9rC0kuIEJL0ky1VpHXQU3ocBgklLGvcBnW8= github.com/xdg-go/stringprep v1.0.4/go.mod h1:mPGuuIYwz7CmR2bT9j4GbQqutWS1zV24gijq1dTyGkM= -github.com/xi2/xz v0.0.0-20171230120015-48954b6210f8/go.mod h1:HUYIGzjTL3rfEspMxjDjgmT5uz5wzYJKVo23qUhYTos= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q= github.com/youmark/pkcs8 v0.0.0-20181117223130-1be2e3e5546d/go.mod h1:rHwXgn7JulP+udvsHwJoVG1YGAP6VLg4y9I5dyZdqmA= @@ -1345,11 +966,10 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= -github.com/yuin/goldmark v1.4.0/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1Zlc8k= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark v1.4.15/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/yuin/goldmark v1.5.5 h1:IJznPe8wOzfIKETmMkd06F8nXkmlhaHqFRM9l1hAGsU= -github.com/yuin/goldmark v1.5.5/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= +github.com/yuin/goldmark v1.5.6 h1:COmQAWTCcGetChm3Ig7G/t8AFAN00t+o8Mt4cf7JpwA= +github.com/yuin/goldmark v1.5.6/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc h1:+IAOyRda+RLrxa1WC7umKOZRsGq4QrFFMYApOeHzQwQ= github.com/yuin/goldmark-highlighting/v2 v2.0.0-20230729083705-37449abec8cc/go.mod h1:ovIvrum6DQJA4QsJSovrkC4saKHQVs7TvcaeO8AIl5I= github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw= @@ -1361,48 +981,20 @@ github.com/zeebo/blake3 v0.2.3/go.mod h1:mjJjZpnsyIVtVgTOSpJ9vmRE4wgDeyt2HU3qXvv github.com/zeebo/pcg v1.0.1 h1:lyqfGeWiv4ahac6ttHs+I5hwtH/+1mrhlCtVNQM2kHo= github.com/zeebo/pcg v1.0.1/go.mod h1:09F0S9iiKrwn9rlI5yjLkmrug154/YRW6KnnXVDM/l4= github.com/zenazn/goji v0.9.0/go.mod h1:7S9M489iMyHBNxwZnk9/EHS098H4/F6TATF2mIxtB1Q= -github.com/zitadel/oidc/v2 v2.7.0 h1:IGX4EDk6tegTjUSsZDWeTfLseFU0BdJ/Glf1tgys2lU= -github.com/zitadel/oidc/v2 v2.7.0/go.mod h1:zkUkVJS0sDVy9m0UA9RgO3f8i/C0rtjvXU36UJj7T+0= -github.com/ziutek/mymysql v1.5.4 h1:GB0qdRGsTwQSBVYuVShFBKaXSnSnYYC2d9knnE1LHFs= -github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= -go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= +github.com/zitadel/oidc/v2 v2.12.0 h1:4aMTAy99/4pqNwrawEyJqhRb3yY3PtcDxnoDSryhpn4= +github.com/zitadel/oidc/v2 v2.12.0/go.mod h1:LrRav74IiThHGapQgCHZOUNtnqJG0tcZKHro/91rtLw= go.etcd.io/bbolt v1.3.3/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU= -go.etcd.io/bbolt v1.3.4/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.5/go.mod h1:G5EMThwa9y8QZGBClrRx5EY+Yw9kAhnjy3bSjsnlVTQ= -go.etcd.io/bbolt v1.3.6/go.mod h1:qXsaaIqmgQH0T+OPdb99Bf+PKfBBQVAdyD6TY9G8XM4= go.etcd.io/bbolt v1.3.7 h1:j+zJOnnEjF/kyHlDDgGnVL/AIqIJPq8UoB2GSNfkUfQ= go.etcd.io/bbolt v1.3.7/go.mod h1:N9Mkw9X8x5fupy0IKsmuqVtoGDyxsaDlbk4Rd05IAQw= go.etcd.io/etcd v0.0.0-20191023171146-3cf2f69b5738/go.mod h1:dnLIgRNXwCJa5e+c6mIZCrds/GIG4ncV9HhK5PX7jPg= -go.etcd.io/etcd v0.0.0-20200513171258-e048e166ab9c/go.mod h1:xCI7ZzBfRuGgBXyXO6yfWfDmlWd35khcWpUa4L0xI/k= -go.etcd.io/etcd/api/v3 v3.5.0-alpha.0/go.mod h1:mPcW6aZJukV6Aa81LSKpBjQXTWlXB5r74ymPoSWa3Sw= go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs= go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g= -go.etcd.io/etcd/client/v2 v2.305.0-alpha.0/go.mod h1:kdV+xzCJ3luEBSIeQyB/OEKkWKd8Zkux4sbDeANrosU= go.etcd.io/etcd/client/v2 v2.305.0/go.mod h1:h9puh54ZTgAKtEbut2oe9P4L/oqKCVB6xsXlzd7alYQ= -go.etcd.io/etcd/client/v3 v3.5.0-alpha.0/go.mod h1:wKt7jgDgf/OfKiYmCq5WFGxOFAkVMLxiiXgLDFhECr8= -go.etcd.io/etcd/client/v3 v3.5.0/go.mod h1:AIKXXVX/DQXtfTEqBryiLTUXwON+GuvO6Z7lLS/oTh0= -go.etcd.io/etcd/etcdctl/v3 v3.5.0-alpha.0/go.mod h1:YPwSaBciV5G6Gpt435AasAG3ROetZsKNUzibRa/++oo= -go.etcd.io/etcd/etcdctl/v3 v3.5.0/go.mod h1:vGTfKdsh87RI7kA2JHFBEGxjQEYx+pi299wqEOdi34M= -go.etcd.io/etcd/etcdutl/v3 v3.5.0/go.mod h1:o98rKMCibbFAG8QS9KmvlYDGDShmmIbmRE8vSofzYNg= -go.etcd.io/etcd/pkg/v3 v3.5.0-alpha.0/go.mod h1:tV31atvwzcybuqejDoY3oaNRTtlD2l/Ot78Pc9w7DMY= -go.etcd.io/etcd/pkg/v3 v3.5.0/go.mod h1:UzJGatBQ1lXChBkQF0AuAtkRQMYnHubxAEYIrC3MSsE= -go.etcd.io/etcd/raft/v3 v3.5.0-alpha.0/go.mod h1:FAwse6Zlm5v4tEWZaTjmNhe17Int4Oxbu7+2r0DiD3w= -go.etcd.io/etcd/raft/v3 v3.5.0/go.mod h1:UFOHSIvO/nKwd4lhkwabrTD3cqW5yVyYYf/KlD00Szc= -go.etcd.io/etcd/server/v3 v3.5.0-alpha.0/go.mod h1:tsKetYpt980ZTpzl/gb+UOJj9RkIyCb1u4wjzMg90BQ= -go.etcd.io/etcd/server/v3 v3.5.0/go.mod h1:3Ah5ruV+M+7RZr0+Y/5mNLwC+eQlni+mQmOVdCRJoS4= -go.etcd.io/etcd/tests/v3 v3.5.0-alpha.0/go.mod h1:HnrHxjyCuZ8YDt8PYVyQQ5d1ZQfzJVEtQWllr5Vp/30= -go.etcd.io/etcd/tests/v3 v3.5.0/go.mod h1:f+mtZ1bE1YPvgKdOJV2BKy4JQW0nAFnQehgOE7+WyJE= -go.etcd.io/etcd/v3 v3.5.0-alpha.0/go.mod h1:JZ79d3LV6NUfPjUxXrpiFAYcjhT+06qqw+i28snx8To= -go.etcd.io/etcd/v3 v3.5.0/go.mod h1:FldM0/VzcxYWLvWx1sdA7ghKw7C3L2DvUTzGrcEtsC4= -go.mongodb.org/mongo-driver v1.7.3/go.mod h1:NqaYOwnXWr5Pm7AOpO5QFxKJ503nbMse/R79oO62zWg= -go.mongodb.org/mongo-driver v1.7.5/go.mod h1:VXEWRZ6URJIkUq2SCAyapmhH0ZLRBP+FT4xhp5Zvxng= -go.mongodb.org/mongo-driver v1.10.0/go.mod h1:wsihk0Kdgv8Kqu1Anit4sfK+22vSFbUrAVEYRhCXrA8= -go.mongodb.org/mongo-driver v1.12.1 h1:nLkghSU8fQNaK7oUmDhQFsnrtcoNy7Z6LVFKsEecqgE= -go.mongodb.org/mongo-driver v1.12.1/go.mod h1:/rGBTebI3XYboVmgz+Wv3Bcbl3aD0QF9zl6kDDw18rQ= +go.mongodb.org/mongo-driver v1.13.1 h1:YIc7HTYsKndGK4RFzJ3covLz1byri52x0IoMB0Pt/vk= +go.mongodb.org/mongo-driver v1.13.1/go.mod h1:wcDf1JBCXy2mOW0bWHwO/IOYqdca1MPCwDtFu/Z9+eo= go.mozilla.org/pkcs7 v0.0.0-20210730143726-725912489c62/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352 h1:CCriYyAfq1Br1aIYettdHZTy8mBTIPo7We18TuO/bak= go.mozilla.org/pkcs7 v0.0.0-20210826202110-33d05740a352/go.mod h1:SNgMg+EgDFwmvSmLRTNKC5fegJjB7v23qTQ0XLGUNHk= -go.opencensus.io v0.15.0/go.mod h1:UffZAU+4sDEINUGP/B7UfBBkq4fqLu9zXAX7ke6CHW0= go.opencensus.io v0.20.1/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.20.2/go.mod h1:6WKK9ahsWS3RSO+PY9ZHZUfv2irvY6gN279GOPZjmmk= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= @@ -1411,13 +1003,11 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk= -go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= -go.opentelemetry.io/contrib v0.20.0/go.mod h1:G/EtFaa6qaN7+LxqfIAT3GiZa7Wv5DTBUzl5H4LY0Kc= -go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.20.0/go.mod h1:oVGt1LRbBOBq1A5BQLlUg9UaU/54aiHw8cgjV3aWZ/E= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0 h1:pginetY7+onl4qN1vl0xW/V/v6OBZ0vVdH+esuJgvmM= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.42.0/go.mod h1:XiYsayHc36K3EByOO6nbAXnAWbrUxdjUROCEeeROOH8= +go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0 h1:x8Z78aZx8cOF0+Kkazoc7lwUNMGy0LrzEMxTm4BbTxg= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.45.0/go.mod h1:62CPTSry9QZtOaSsE3tOzhx6LzDhHnXJ6xHeMNNiM6Q= go.opentelemetry.io/contrib/propagators/autoprop v0.42.0 h1:s2RzYOAqHVgG23q8fPWYChobUoZM6rJZ98EnylJr66w= go.opentelemetry.io/contrib/propagators/autoprop v0.42.0/go.mod h1:Mv/tWNtZn+NbALDb2XcItP0OM3lWWZjAfSroINxfW+Y= go.opentelemetry.io/contrib/propagators/aws v1.17.0 h1:IX8d7l2uRw61BlmZBOTQFaK+y22j6vytMVTs9wFrO+c= @@ -1428,48 +1018,38 @@ go.opentelemetry.io/contrib/propagators/jaeger v1.17.0 h1:Zbpbmwav32Ea5jSotpmkWE go.opentelemetry.io/contrib/propagators/jaeger v1.17.0/go.mod h1:tcTUAlmO8nuInPDSBVfG+CP6Mzjy5+gNV4mPxMbL0IA= go.opentelemetry.io/contrib/propagators/ot v1.17.0 h1:ufo2Vsz8l76eI47jFjuVyjyB3Ae2DmfiCV/o6Vc8ii0= go.opentelemetry.io/contrib/propagators/ot v1.17.0/go.mod h1:SbKPj5XGp8K/sGm05XblaIABgMgw2jDczP8gGeuaVLk= -go.opentelemetry.io/otel v0.20.0/go.mod h1:Y3ugLH2oa81t5QO+Lty+zXf8zC9L26ax4Nzoxm/dooo= -go.opentelemetry.io/otel v1.16.0 h1:Z7GVAX/UkAXPKsy94IU+i6thsQS4nb7LviLpnaNeW8s= -go.opentelemetry.io/otel v1.16.0/go.mod h1:vl0h9NUa1D5s1nv3A5vZOYWn8av4K8Ml6JDeHrT/bx4= -go.opentelemetry.io/otel/exporters/otlp v0.20.0/go.mod h1:YIieizyaN77rtLJra0buKiNBOm9XQfkPEKBeuhoMwAM= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0 h1:t4ZwRPU+emrcvM2e9DHd0Fsf0JTPVcbfa/BhTDF03d0= -go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.16.0/go.mod h1:vLarbg68dH2Wa77g71zmKQqlQ8+8Rq3GRG31uc0WcWI= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0 h1:cbsD4cUcviQGXdw8+bo5x2wazq10SKz8hEbtCRPcU78= -go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.16.0/go.mod h1:JgXSGah17croqhJfhByOLVY719k1emAXC8MVhCIJlRs= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0 h1:TVQp/bboR4mhZSav+MdgXB8FaRho1RC8UwVn3T0vjVc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.16.0/go.mod h1:I33vtIe0sR96wfrUcilIzLoA3mLHhRmz9S9Te0S3gDo= -go.opentelemetry.io/otel/metric v0.20.0/go.mod h1:598I5tYlH1vzBjn+BTuhzTCSb/9debfNp6R3s7Pr1eU= -go.opentelemetry.io/otel/metric v1.16.0 h1:RbrpwVG1Hfv85LgnZ7+txXioPDoh6EdbZHo26Q3hqOo= -go.opentelemetry.io/otel/metric v1.16.0/go.mod h1:QE47cpOmkwipPiefDwo2wDzwJrlfxxNYodqc4xnGCo4= -go.opentelemetry.io/otel/oteltest v0.20.0/go.mod h1:L7bgKf9ZB7qCwT9Up7i9/pn0PWIa9FqQ2IQ8LoxiGnw= -go.opentelemetry.io/otel/sdk v0.20.0/go.mod h1:g/IcepuwNsoiX5Byy2nNV0ySUF1em498m7hBWC279Yc= -go.opentelemetry.io/otel/sdk v1.16.0 h1:Z1Ok1YsijYL0CSJpHt4cS3wDDh7p572grzNrBMiMWgE= -go.opentelemetry.io/otel/sdk v1.16.0/go.mod h1:tMsIuKXuuIWPBAOrH+eHtvhTL+SntFtXF9QD68aP6p4= -go.opentelemetry.io/otel/sdk/export/metric v0.20.0/go.mod h1:h7RBNMsDJ5pmI1zExLi+bJK+Dr8NQCh0qGhm1KDnNlE= -go.opentelemetry.io/otel/sdk/metric v0.20.0/go.mod h1:knxiS8Xd4E/N+ZqKmUPf3gTTZ4/0TjTXukfxjzSTpHE= -go.opentelemetry.io/otel/trace v0.20.0/go.mod h1:6GjCW8zgDjwGHGa6GkyeB8+/5vjT16gUEi0Nf1iBdgw= -go.opentelemetry.io/otel/trace v1.16.0 h1:8JRpaObFoW0pxuVPapkgH8UhHQj+bJW8jJsCZEu5MQs= -go.opentelemetry.io/otel/trace v1.16.0/go.mod h1:Yt9vYq1SdNz3xdjZZK7wcXv1qv2pwLkqr2QVwea0ef0= -go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= -go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw= -go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U= +go.opentelemetry.io/otel v1.21.0 h1:hzLeKBZEL7Okw2mGzZ0cc4k/A7Fta0uoPgaJCr8fsFc= +go.opentelemetry.io/otel v1.21.0/go.mod h1:QZzNPQPm1zLX4gZK4cMi+71eaorMSGT3A4znnUvNNEo= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0 h1:cl5P5/GIfFh4t6xyruOgJP5QiA1pw4fYYdv6nc6CBWw= +go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.21.0/go.mod h1:zgBdWWAu7oEEMC06MMKc5NLbA/1YDXV1sMpSqEeLQLg= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0 h1:tIqheXEFWAZ7O8A7m+J0aPTmpJN3YQ7qetUAdkkkKpk= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.21.0/go.mod h1:nUeKExfxAQVbiVFn32YXpXZZHZ61Cc3s3Rn1pDBGAb0= +go.opentelemetry.io/otel/metric v1.21.0 h1:tlYWfeo+Bocx5kLEloTjbcDwBuELRrIFxwdQ36PlJu4= +go.opentelemetry.io/otel/metric v1.21.0/go.mod h1:o1p3CA8nNHW8j5yuQLdc1eeqEaPfzug24uvsyIEJRWM= +go.opentelemetry.io/otel/sdk v1.21.0 h1:FTt8qirL1EysG6sTQRZ5TokkU8d0ugCj8htOgThZXQ8= +go.opentelemetry.io/otel/sdk v1.21.0/go.mod h1:Nna6Yv7PWTdgJHVRD9hIYywQBRx7pbox6nwBnZIxl/E= +go.opentelemetry.io/otel/trace v1.21.0 h1:WD9i5gzvoUPuXIXH24ZNBudiarZDKuekPqi/E8fpfLc= +go.opentelemetry.io/otel/trace v1.21.0/go.mod h1:LGbsEB0f9LGjN+OZaQQ26sohbOmiMR+BaslueVtS/qQ= +go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I= +go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM= go.step.sm/cli-utils v0.8.0 h1:b/Tc1/m3YuQq+u3ghTFP7Dz5zUekZj6GUmd5pCvkEXQ= go.step.sm/cli-utils v0.8.0/go.mod h1:S77aISrC0pKuflqiDfxxJlUbiXcAanyJ4POOnzFSxD4= -go.step.sm/crypto v0.33.0 h1:fP8awo6YkZ0/rrLhzbHYA3U8g24VnWEebZRnGwUobRo= -go.step.sm/crypto v0.33.0/go.mod h1:rMETKeIA1ZsLBiKT6phQ2IIeBH3GL+XqimeobcqUw1g= -go.step.sm/linkedca v0.20.0 h1:bH41rvyDm3nSSJ5xgGsKUZOpzJcq5x2zacMIeqtq9oI= -go.step.sm/linkedca v0.20.0/go.mod h1:eybHw6ZTpuFmkUQnTBRWM2SPIGaP0VbYeo1bupfPT70= +go.step.sm/crypto v0.35.1 h1:QAZZ7Q8xaM4TdungGSAYw/zxpyH4fMYTkfaXVV9H7pY= +go.step.sm/crypto v0.35.1/go.mod h1:vn8Vkx/Mbqgoe7AG8btC0qZ995Udm3e+JySuDS1LCJA= +go.step.sm/linkedca v0.20.1 h1:bHDn1+UG1NgRrERkWbbCiAIvv4lD5NOFaswPDTyO5vU= +go.step.sm/linkedca v0.20.1/go.mod h1:Vaq4+Umtjh7DLFI1KuIxeo598vfBzgSYZUjgVJ7Syxw= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= go.uber.org/atomic v1.7.0/go.mod h1:fEN4uk6kAWBTFdckzkM89CLk9XfWZrxpCo0nPH17wJc= -go.uber.org/goleak v1.1.10/go.mod h1:8a7PlsEVH3e/a/GLqe5IIrQx6GzcnRmZEufDUTk4A7A= -go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A= go.uber.org/goleak v1.2.1/go.mod h1:qlT2yGI9QafXHhZZLxlSuNsMw3FFLxBr+tBRlmO1xH4= +go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto= +go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= +go.uber.org/mock v0.3.0 h1:3mUxI1No2/60yUYax92Pt8eNOEecx2D3lcXZh2NEZJo= +go.uber.org/mock v0.3.0/go.mod h1:a6FSlNadKUHUa9IP5Vyt1zh4fC7uAwxMutEAscFbkZc= go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0= go.uber.org/multierr v1.3.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= -go.uber.org/multierr v1.4.0/go.mod h1:VgVr7evmIr6uPjLBxg28wmKNXyqE9akIJ5XnfpiKl+4= go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= go.uber.org/multierr v1.6.0/go.mod h1:cdWPpRnG4AhwMwsgIHip0KRBQjJy5kYEpYjJxpXp9iU= go.uber.org/multierr v1.11.0 h1:blXXJkSxSSfBVBlC76pxqeO+LN3aDfLQo+309xJstO0= @@ -1478,45 +1058,33 @@ go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9E go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q= go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= -go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= go.uber.org/zap v1.17.0/go.mod h1:MXVU+bhUf/A7Xi2HNOnopQOrmycQ5Ih87HtOu4q5SSo= go.uber.org/zap v1.25.0 h1:4Hvk6GtkucQ790dqmj7l1eEnRdKm3k3ZUrUMS2d5+5c= go.uber.org/zap v1.25.0/go.mod h1:JIAUzQIH94IC4fOJQm7gMmBJP5k7wQfdcnYdPoEXJYk= -gocloud.dev v0.19.0/go.mod h1:SmKwiR8YwIMMJvQBKLsC3fHNyMwXLw3PMDO+VVteJMI= -golang.org/x/crypto v0.0.0-20180501155221-613d6eafa307/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181029021203-45a5f77698d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181203042331-505ab145d0a9/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= -golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= -golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190422162423-af44ce270edf/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE= -golang.org/x/crypto v0.0.0-20190426145343-a29dc8fdc734/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190701094942-4def268fd1a4/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191002192127-34f69633bfdc/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= -golang.org/x/crypto v0.0.0-20191117063200-497ca9f6d64f/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20201002170205-7f63de1d35b0/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20201203163018-be400aefbc4c/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210616213533-5ff15b29337e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.1.0/go.mod h1:RecgLatLF4+eUMCP1PoPZQb+cVrJcOPbHkTkbkB9sbw= golang.org/x/crypto v0.3.0/go.mod h1:hebNnKkNXi2UzZN1eVRvBB7co0a+JxK6XbPiWVs/3J4= golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= -golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= -golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= +golang.org/x/crypto v0.14.0/go.mod h1:MVFd36DqK4CsrnJYDkBA3VC4m2GkXAM0PvzMCn4JQf4= +golang.org/x/crypto v0.18.0 h1:PGVlW0xEltQnzFZ55hkuX5+KLyrMYhHld1YHO4AKcdc= +golang.org/x/crypto v0.18.0/go.mod h1:R0j02AL6hcrfOiy9T4ZYp/rcWeMxM3L6QYxlOuEG1mg= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -1527,9 +1095,8 @@ golang.org/x/exp v0.0.0-20191227195350-da58074b4299/go.mod h1:2RIsYlXP63K8oxa1u0 golang.org/x/exp v0.0.0-20200119233911-0405dc783f0a/go.mod h1:2RIsYlXP63K8oxa1u096TMicItID8zy7Y6sNkU49FU4= golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EHIKF9dgMWnmCNThgcyBT1FY9mM= golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU= -golang.org/x/exp v0.0.0-20200331195152-e8c3332aa8e5/go.mod h1:4M0jN8W1tt0AVLNr8HDosyJCDCDuyL9N9+3m7wDWgKw= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53 h1:5llv2sWeaMSnA3w2kS57ouQQ4pudlXrR0dCgw51QK9o= -golang.org/x/exp v0.0.0-20230425010034-47ecfdc1ba53/go.mod h1:V1LtkGg67GoY2N1AnLN78QLrzxkLyJw7RJb1gzOOz9w= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9 h1:GoHiUyI/Tp2nVkLI2mCxVkOjsbSXD66ic0XW0js0R9g= +golang.org/x/exp v0.0.0-20230905200255-921286631fa9/go.mod h1:S2oDrQGGwySpoQPVqRShND87VCbxmc6bL1Yd2oYrm6k= golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js= golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= @@ -1556,14 +1123,14 @@ golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4= -golang.org/x/mod v0.11.0 h1:bUO06HqtnRcc/7l71XBe4WcqTZ+3AH1J59zWDDwLKgU= -golang.org/x/mod v0.11.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= +golang.org/x/mod v0.12.0 h1:rmsUpXtvNzj340zd98LZ4KntptpfRHwpFOHG188oHXc= +golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20170726083632-f5079bd7f6f7/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181023162649-9b4f9f5ad519/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20181108082009-03003ca0c849/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181201002055-351d144fa1fc/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= @@ -1574,17 +1141,12 @@ golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190522155817-f3200d17e092/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190619014844-b5b0513f8c1b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190813141303-74dc4d7220e7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20190923162816-aa69164e4478/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191002035440-2ec189313ef0/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20191119073136-fc4aabc6c914/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -1592,7 +1154,6 @@ golang.org/x/net v0.0.0-20200222125558-5a598a2470a0/go.mod h1:z5CRVTTTmAJ677TzLL golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= -golang.org/x/net v0.0.0-20200421231249-e086a090c8fd/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200501053045-e0ff5e5a1de5/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200506145744-7e3656a0809f/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= golang.org/x/net v0.0.0-20200513185701-a91f0712d120/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= @@ -1606,53 +1167,37 @@ golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwY golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= -golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= -golang.org/x/net v0.0.0-20210421230115-4e50805a0758/go.mod h1:72T/g9IO56b78aLF+1Kcs5dz7/ng1VjMUvfKvpfy+jM= -golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= -golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY= golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs= -golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14= -golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI= +golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg= +golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE= +golang.org/x/net v0.20.0 h1:aCL9BSgETF1k+blQaYUBx9hJ9LOGP3gAVemcZlf1Kpo= +golang.org/x/net v0.20.0/go.mod h1:z8BVo6PvndSri0LbOE3hAn0apkU+1YvI6E70E9jsnvY= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= -golang.org/x/oauth2 v0.0.0-20181106182150-f42d05182288/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= -golang.org/x/oauth2 v0.0.0-20190402181905-9f3314589c9a/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20191202225959-858c2ad4c8b6/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210126194326-f9ce19ea3013/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210402161424-2e8d93401602/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210413134643-5e61552d6c78/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc= -golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8= -golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI= +golang.org/x/oauth2 v0.16.0 h1:aDkGMBSYxElaoP81NpoUoz2oo2R2wHdZpGToUxfyQrQ= +golang.org/x/oauth2 v0.16.0/go.mod h1:hqZ+0LWXsiVoZpeld6jVt06P3adbS2Uu911W1SsJv2o= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190227155943-e225da77a7e6/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.0.0-20190412183630-56d357773e84/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -1661,8 +1206,9 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= -golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E= -golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y= +golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.5.0 h1:60k92dhOjHxJkrqnwsfl8KuaHbn/5dl0lUPUklKo3qE= +golang.org/x/sync v0.5.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sys v0.0.0-20170728174421-0f826bdd13b5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= @@ -1674,18 +1220,14 @@ golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20181122145206-62eef0e2fa9b/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181205085412-a5c9d58dba9a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= -golang.org/x/sys v0.0.0-20190221075227-b4e8571b14e0/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190403152447-81d4e9dc473e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190419153524-e8e3143a4f4a/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190531175056-4c3a928424d2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190606165138-5da285871e9c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20190620070143-6f217b454f45/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190624142023-c5567b49c5d0/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190626221950-04f50cda93cb/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20190726091711-fc99dfbffb4e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1695,14 +1237,11 @@ golang.org/x/sys v0.0.0-20190904154756-749cb33beabd/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191005200804-aed5e4c7ecf9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191008105621-543471e840be/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20191119060738-e882bf8e40c2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191120155948-bd437916bb0e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191220142924-d4481acd189f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1712,51 +1251,31 @@ golang.org/x/sys v0.0.0-20200223170610-d5e6a3e2c0ae/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200302150141-5c8b2ff67527/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200331124033-c3d80250170d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200420163511-1957bb5e6d1f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200826173525-f9321e4c35a6/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20200923182605-d9f96fdee20d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201009025420-dfb3f7c4e634/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20201207223542-d4d67f95c62d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210309074719-68d13333faf2/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210316092937-0b90fd5c4c48/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210403161142-5e06dd20ab57/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210412220455-f1c623a9e750/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210420072515-93ed5bcd2bfe/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= -golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210629170331-7dc0b73dc9fb/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20210809222454-d867a43fc93e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220204135822-1c1b9b1eba6a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220310020820-b874c991c1a5/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1764,22 +1283,26 @@ golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.13.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= +golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.2.0/go.mod h1:TVmDHMZPmdnySmBfhjOoOdhjzdE1h4u1VwSiw2l1Nuc= golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k= -golang.org/x/term v0.11.0 h1:F9tnn/DA/Im8nCwm+fX+1/eBwi4qFjRT++MhtVC4ZX0= -golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU= +golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo= +golang.org/x/term v0.13.0/go.mod h1:LTmsnFJwVN6bCy1rVCoS+qHT1HhALEFxKncY3WNNh4U= +golang.org/x/term v0.17.0 h1:mkTF7LCd6WGJNL3K1Ad7kwxNfYAW6a8a8QqtMblp/4U= +golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -1792,14 +1315,14 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= -golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= +golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= +golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= @@ -1807,38 +1330,28 @@ golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= -golang.org/x/tools v0.0.0-20181030221726-6c7e314b6563/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY= golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312151545-0bb0c0a6e846/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190312170243-e65039ee4138/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190329151228-23e29df326fe/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190416151739-9c9e1878f421/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190420181800-aa740d480789/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= -golang.org/x/tools v0.0.0-20190422233926-fe54fb35175b/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= golang.org/x/tools v0.0.0-20190425150028-36563e24a262/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190425163242-31fd60d6bfdc/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190506145303-2d16b83fe98c/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q= -golang.org/x/tools v0.0.0-20190531172133-b3315ee88b7d/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190606124116-d0a3d012864b/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= golang.org/x/tools v0.0.0-20190628153133-6cdbf07be9d0/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= -golang.org/x/tools v0.0.0-20190729092621-ff9f1409240a/go.mod h1:jcCCGcm9btYwXyDqrUWc6MKQKKGJCWEQ3AfLSRIbEuI= golang.org/x/tools v0.0.0-20190816200558-6889da9d5479/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190823170909-c4a336ef6a2f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20190911174233-4f2ddba30aff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191010075000-0337d82405ff/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191012152004-8de300cfc20a/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191108193012-7d206e10da11/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191112195655-aa38f8e97acc/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191113191852-77e3bb0ad9e7/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191115202509-3a792d9c32b2/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= -golang.org/x/tools v0.0.0-20191118222007-07fc4c7f2b98/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191125144606-a911d9008d1f/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191130070609-6e064ea0cf2d/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= @@ -1856,39 +1369,27 @@ golang.org/x/tools v0.0.0-20200227222343-706bc42d1f0d/go.mod h1:TB2adYChydJhpapK golang.org/x/tools v0.0.0-20200304193943-95d2e580d8eb/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200312045724-11d5b4c81c7d/go.mod h1:o4KQGtdN14AW+yjsvvwRTJJuXz8XRtIHtEnmAXLyFUw= golang.org/x/tools v0.0.0-20200331025713-a30bf2db82d4/go.mod h1:Sl4aGygMT6LrqrWclx+PTx3U+LnKx/seiNR+3G19Ar8= -golang.org/x/tools v0.0.0-20200426102838-f3a5411a4c3b/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200501065659-ab2804fb9c9d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200512131952-2bc93b1c0c88/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200515010526-7d3b6ebf133d/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200522201501-cb1345f3a375/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200618134242-20370b0cb4b2/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200626171337-aa94e735be7f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200630154851-b2d8b0336632/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= -golang.org/x/tools v0.0.0-20200706234117-b22de6825cf7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= -golang.org/x/tools v0.0.0-20200717024301-6ddee64345a6/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= -golang.org/x/tools v0.0.0-20201014170642-d1624618ad65/go.mod h1:z6u4i615ZeAfBE4XtMziQW1fSVJXACjjbWkB/mvPzlU= golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= -golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk= -golang.org/x/tools v0.1.7/go.mod h1:LGqMHiF4EqQNHR1JncWGqT5BVaXmza+X+BDGol+dOxo= golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= -golang.org/x/tools v0.10.0 h1:tvDr/iQoUqNdohiYm0LmmKcBk+q86lb9EprIUFhHHGg= -golang.org/x/tools v0.10.0/go.mod h1:UJwyiVBsOA2uwvK/e5OY3GTpDUJriEd+/YlqAwLPmyM= +golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU= +golang.org/x/tools v0.13.0 h1:Iey4qkscZuv0VvIt8E0neZjtPVQFSc870HQ448QgEmQ= +golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58= golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= @@ -1897,12 +1398,9 @@ golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= google.golang.org/api v0.3.1/go.mod h1:6wY9I6uQWHQ8EM57III9mq/AjF+i8G65rmVagqKMtkk= google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.5.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE= -google.golang.org/api v0.6.0/go.mod h1:btoxGiFvQNVUZQ8W08zLtrVS08CNpINPEfxXxgJL1Q4= google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M= google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.9.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= -google.golang.org/api v0.10.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg= google.golang.org/api v0.13.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.14.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= google.golang.org/api v0.15.0/go.mod h1:iLdEw5Ide6rF15KTC1Kkl0iskquN2gFfn9o9XIsbkAI= @@ -1917,44 +1415,30 @@ google.golang.org/api v0.29.0/go.mod h1:Lcubydp8VUV7KeIHD9z2Bys/sm/vGKnG1UHuDBSr google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz5138Fc= google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= -google.golang.org/api v0.37.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= google.golang.org/api v0.41.0/go.mod h1:RkxM5lITDfTzmyKFPt+wGrCJbVfniCr2ool8kTBzRTU= google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk/94= google.golang.org/api v0.44.0/go.mod h1:EBOGZqzyhtvMDoxwS97ctnh0zUmYY6CxqXsc1AvkYD8= -google.golang.org/api v0.45.0/go.mod h1:ISLIJCedJolbZvDfAk+Ctuq5hf+aJ33WgtUsfyFoLXA= -google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I= -google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo= -google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4= -google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw= -google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU= -google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k= -google.golang.org/api v0.132.0 h1:8t2/+qZ26kAOGSmOiHwVycqVaDg7q3JDILrNi/Z6rvc= +google.golang.org/api v0.142.0 h1:mf+7EJ94fi5ZcnpPy+m0Yv2dkz8bKm+UL0snTCuwXlY= +google.golang.org/api v0.142.0/go.mod h1:zJAN5o6HRqR7O+9qJUFOWrZkYE66RH+efPBdTLA4xBA= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.2.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.3.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= -google.golang.org/appengine v1.6.2/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww/cMBSeb0= google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/genproto v0.0.0-20170818010345-ee236bd376b0/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= -google.golang.org/genproto v0.0.0-20181107211654-5fc9ac540362/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= -google.golang.org/genproto v0.0.0-20190508193815-b515fa19cec8/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE= google.golang.org/genproto v0.0.0-20190530194941-fb225487d101/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= -google.golang.org/genproto v0.0.0-20190620144150-6af8c5fc6601/go.mod h1:z3L6/3dTEVtUr6QSP8miRzeRqwQOioJ9I66odjN4I7s= google.golang.org/genproto v0.0.0-20190801165951-fa694d86fc64/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20190911173649-1774047e7e51/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= -google.golang.org/genproto v0.0.0-20190927181202-20e1ac93f88c/go.mod h1:IbNlFCBrqXvoKpeg0TB2l7cyZUmoaFKYIwrEpbDKLA8= google.golang.org/genproto v0.0.0-20191108220845-16a3f7862a1a/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191115194625-c23dd37a84c9/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= google.golang.org/genproto v0.0.0-20191216164720-4f79533eabd1/go.mod h1:n3cpQtvxv34hfy77yVDNjmbRyujviMdxYliBSkLhpCc= @@ -1968,15 +1452,12 @@ google.golang.org/genproto v0.0.0-20200228133532-8c2c7df3a383/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200305110556-506484158171/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= -google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= -google.golang.org/genproto v0.0.0-20200626011028-ee7919e894b5/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20200707001353-8e8330bf89df/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200729003335-053ba62fc06f/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= @@ -1985,36 +1466,18 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210126160654-44e461bb6506/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20210331142528-b7513248f0ba/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= google.golang.org/genproto v0.0.0-20210402141018-6c239bbf2bb1/go.mod h1:9lPAdzaEmUacj36I+k7YKbEc5CXzPIeORRgDAUOu28A= -google.golang.org/genproto v0.0.0-20210413151531-c14fb6ef47c3/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210427215850-f767ed18ee4d/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210429181445-86c259c2b4ab/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= -google.golang.org/genproto v0.0.0-20210513213006-bf773b8c8384/go.mod h1:P3QM42oQyzQSnHPnZ/vqoCdDmzH28fzWByN9asMeM8A= google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0= -google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24= -google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k= -google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48= -google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w= -google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY= -google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130 h1:Au6te5hbKUV8pIYWHqOUZ1pva5qK/rwbIhoXEUB9Lu8= -google.golang.org/genproto v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:O9kGHb51iE/nOGvQaDUuadVYqovW56s5emA88lQnj6Y= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130 h1:XVeBY8d/FaK4848myy41HBqnDwvxeV3zMZhwN1TvAMU= -google.golang.org/genproto/googleapis/api v0.0.0-20230706204954-ccb25ca9f130/go.mod h1:mPBs5jNgx2GuQGvFwUvVKqtn6HsUw9nP64BedgvqEsQ= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U= -google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM= -google.golang.org/grpc v1.8.0/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a h1:fwgW9j3vHirt4ObdHoYNwuO24BEZjSzbh+zPaNWoiY8= +google.golang.org/genproto v0.0.0-20231012201019-e917dd12ba7a/go.mod h1:EMfReVxb80Dq1hhioy0sOsY9jCE46YDgHlJ7fWVUWRE= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k= +google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo= +google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc= google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM= @@ -2024,34 +1487,24 @@ google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ij google.golang.org/grpc v1.22.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.23.1/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= -google.golang.org/grpc v1.24.0/go.mod h1:XDChyiUovWa60DnaeDeZmSW86xtLtjtZbwvSiRnRtcA= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= google.golang.org/grpc v1.26.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk= google.golang.org/grpc v1.28.0/go.mod h1:rpkK4SK4GF4Ach/+MFLZUBavHOvF2JJB5uozKKal+60= -google.golang.org/grpc v1.29.0/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk= google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= -google.golang.org/grpc v1.32.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= -google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE= -google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= -google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU= -google.golang.org/grpc v1.56.2 h1:fVRFRnXvU+x6C4IlHZewvJOVHoOv1TUuQyoRsYnB4bI= -google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s= -google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= +google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk= +google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= @@ -2062,40 +1515,28 @@ google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2 google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4= google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= -google.golang.org/protobuf v1.25.1-0.20200805231151-a709e31e5d12/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c= google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= -google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8= -google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= +google.golang.org/protobuf v1.32.0 h1:pPC6BG5ex8PDFnkbrGU3EixyhKcQ2aDuBS36lqK/C7I= +google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200902074654-038fdea0a05b/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= gopkg.in/cheggaaa/pb.v1 v1.0.25/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= -gopkg.in/cheggaaa/pb.v1 v1.0.28/go.mod h1:V/YB90LKu/1FcN3WVnfiiE5oMCibMjukxqG/qStrOgw= gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= gopkg.in/gcfg.v1 v1.2.3/go.mod h1:yesOnuUOFQAhST5vPY4nbZsb/huCgGGXlipJsBn0b3o= -gopkg.in/go-playground/assert.v1 v1.2.1/go.mod h1:9RXL0bg/zibRAgZUYszZSwO/z8Y/a8bDuhia5mkpMnE= -gopkg.in/go-playground/validator.v9 v9.29.1/go.mod h1:+c9/zcJMFNgbLvly1L1V+PpxWdVbfP1avr/N00E2vyQ= gopkg.in/inconshreveable/log15.v2 v2.0.0-20180818164646-67afb5ed74ec/go.mod h1:aPpfJ7XW+gOuirDoZ8gHhLh3kZ1B08FtV2bbmy7Jv3s= -gopkg.in/ini.v1 v1.51.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= gopkg.in/ini.v1 v1.62.0/go.mod h1:pNLf8WUiyNEtQjuu5G5vTm06TEv9tsIgeAvK8hOrP4k= -gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= gopkg.in/natefinch/lumberjack.v2 v2.2.1 h1:bBRl1b0OH9s/DuPhuXpNl+VtCaJXFZ5/uEFST95x9zc= gopkg.in/natefinch/lumberjack.v2 v2.2.1/go.mod h1:YD8tP3GAjkrDg1eZH7EGmyESg/lsYskCTPBJVb9jqSc= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= gopkg.in/square/go-jose.v2 v2.6.0 h1:NGk74WTnPKBNUhNzQX7PYcTLUjoq7mzKk2OKbvwk2iI= gopkg.in/square/go-jose.v2 v2.6.0/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI= -gopkg.in/src-d/go-billy.v4 v4.3.2/go.mod h1:nDjArDMp+XMs1aFAESLRjfGSgfvoYN0hDfzEk0GjC98= -gopkg.in/src-d/go-git-fixtures.v3 v3.5.0/go.mod h1:dLBcvytrw/TYZsNTWCnkNF2DSIlzWYqTe3rJR56Ac7g= -gopkg.in/src-d/go-git.v4 v4.13.1/go.mod h1:nx5NYcxdKxq5fpltdHnPa2Exj4Sx0EclMWZQbYDu2z8= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI= @@ -2105,16 +1546,12 @@ gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.2.6/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200605160147-a5ece683394c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= +gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= @@ -2127,12 +1564,10 @@ honnef.co/go/tools v0.0.1-2020.1.3/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9 honnef.co/go/tools v0.0.1-2020.1.4/go.mod h1:X/FiERA/W4tHapMX5mGpAtMSVEeEUOyHaw9vFzvIQ3k= howett.net/plist v1.0.0 h1:7CrbWYbPPO/PyNy38b2EB/+gYbjCe2DXBxgtOOZbSQM= howett.net/plist v1.0.0/go.mod h1:lqaXoTrLY4hg8tnEzNru53gicrbv7rrk+2xJA/7hw9g= -nhooyr.io/websocket v1.8.7 h1:usjR2uOr/zjjkVMy0lW+PPohFok7PCow5sDjLgX4P4g= -nhooyr.io/websocket v1.8.7/go.mod h1:B70DZP8IakI65RVQ51MsWP/8jndNma26DVA/nFSCgW0= -pack.ag/amqp v0.11.2/go.mod h1:4/cbmt4EJXSKlG6LCfWHoqmN0uFdy5i/+YFz+fTfhV4= +nhooyr.io/websocket v1.8.10 h1:mv4p+MnGrLDcPlBoWsvPP7XCzTYMXP9F9eIGoKbgx7Q= +nhooyr.io/websocket v1.8.10/go.mod h1:rN9OFWIUwuxg4fR5tELlYC04bXYowCP9GX47ivo2l+c= rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8= rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0= rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA= sigs.k8s.io/yaml v1.1.0/go.mod h1:UJmg0vDUVViEyp3mgSv9WPwZCDxu4rQW1olrI1uml+o= -sigs.k8s.io/yaml v1.2.0/go.mod h1:yfXDCHCao9+ENCvLSE62v9VSji2MKu5jeNfTrofGhJc= sourcegraph.com/sourcegraph/appdash v0.0.0-20190731080439-ebfcffb1b5c0/go.mod h1:hI742Nqp5OhwiqlzhgfbWU4mW4yO10fP+LoT9WOswdU= diff --git a/nfpm/zrok-share.bash b/nfpm/zrok-share.bash index 38fefa0f..c99463f8 100644 --- a/nfpm/zrok-share.bash +++ b/nfpm/zrok-share.bash @@ -25,6 +25,10 @@ fi # set HOME to the first colon-sep dir in STATE_DIRECTORY inherited from systemd (/var/lib/zrok-share) or docker (/mnt) export HOME="${STATE_DIRECTORY%:*}" +: "${ZROK_SHARE_RESERVED:=true}" + +echo "DEBUG: ZROK_SHARE_RESERVED=${ZROK_SHARE_RESERVED}" + if (( $# )); then if [[ -s "$1" ]]; then echo "INFO: reading share configuration from $1" @@ -60,9 +64,14 @@ elif [[ -s ~/.zrok/reserved.json ]]; then exit 1 else echo "INFO: zrok backend is already reserved: ${ZROK_RESERVED_TOKEN}" - ZITI_CMD="${ZROK_RESERVED_TOKEN} ${ZROK_TARGET}" - ZITI_CMD+=" ${ZROK_VERBOSE:-} ${ZROK_INSECURE:-}" - share_reserved ${ZITI_CMD} + ZROK_CMD="${ZROK_RESERVED_TOKEN} ${ZROK_TARGET}" + ZROK_CMD+=" ${ZROK_VERBOSE:-} ${ZROK_INSECURE:-}" + if [[ "${ZROK_SHARE_RESERVED}" == true ]]; then + share_reserved ${ZROK_CMD} + else + echo "INFO: finished reserving zrok backend, continuing without sharing" + exit 0 + fi fi else ZROK_CMD="reserve public --json-output ${ZROK_VERBOSE:-}" @@ -110,6 +119,10 @@ case "${ZROK_BACKEND_MODE}" in ;; esac +[[ -n "${ZROK_UNIQUE_NAME:-}" ]] && { + ZROK_CMD+=" --unique-name ${ZROK_UNIQUE_NAME}" +} + ZROK_CMD+=" --backend-mode ${ZROK_BACKEND_MODE} ${ZROK_TARGET}" if [[ -n "${ZROK_SHARE_OPTS:-}" ]]; then @@ -154,6 +167,11 @@ else fi ZROK_CMD="${ZROK_RESERVED_TOKEN} ${ZROK_TARGET}" ZROK_CMD+=" ${ZROK_VERBOSE:-} ${ZROK_INSECURE:-}" - share_reserved ${ZROK_CMD} + if [[ "${ZROK_SHARE_RESERVED}" == true ]]; then + share_reserved ${ZROK_CMD} + else + echo "INFO: finished reserving zrok backend, continuing without sharing" + exit 0 + fi fi fi diff --git a/nfpm/zrok-share.env b/nfpm/zrok-share.env index d7eb30d7..55df7d33 100644 --- a/nfpm/zrok-share.env +++ b/nfpm/zrok-share.env @@ -56,7 +56,7 @@ ZROK_BACKEND_MODE="proxy" #ZROK_VERBOSE="--verbose" # you MAY set additional command-line options for the share; see "zrok reserve public --help" for hints -# WARNING: changing this value requires provisioning a new frontend URL +# WARNING: changes take effect the next time the frontend URL is reserved # NOTE: basic auth and oauth are mutually exclusive ZROK_SHARE_OPTS="" @@ -64,20 +64,25 @@ ZROK_SHARE_OPTS="" ## ZROK FRONTEND # +# you MAY customize the share token that is used to construct the reserved subdomain; if not set a random +# subdomain is reserved +# WARNING: changes take effect the next time the frontend URL is reserved +#ZROK_UNIQUE_NAME="" + # you MAY set one OAuth2/OIDC provider; "google" and "github" are valid for the default instance api.zrok.io -# WARNING: changing this value requires provisioning a new frontend URL +# WARNING: changes take effect the next time the frontend URL is reserved # NOTE: basic auth and oauth are mutually exclusive #ZROK_OAUTH_PROVIDER="google" # you MAY restrict access to one or more email addresses or domains; must be a space-separate list -# WARNING: changing this value requires provisioning a new frontend URL +# WARNING: changes take effect the next time the frontend URL is reserved #ZROK_OAUTH_EMAILS="bob@acme.example.com alice@forge.example.com @corp.example.com" # you MAY require a password with HTTP basic authentication -# WARNING: changing this value requires provisioning a new frontend URL +# WARNING: changes take effect the next time the frontend URL is reserved # NOTE: basic auth and oauth are mutually exclusive #ZROK_BASIC_AUTH="" # set if self-hosting zrok and not using only the default frontend name 'public'; must be a space-separated list -# WARNING: changing this value requires provisioning a new frontend URL +# WARNING: changes take effect the next time the frontend URL is reserved #ZROK_FRONTENDS="public" diff --git a/rest_client_zrok/account/account_client.go b/rest_client_zrok/account/account_client.go index 3df2ebbc..17323ed5 100644 --- a/rest_client_zrok/account/account_client.go +++ b/rest_client_zrok/account/account_client.go @@ -30,6 +30,8 @@ type ClientOption func(*runtime.ClientOperation) // ClientService is the interface for Client methods type ClientService interface { + ChangePassword(params *ChangePasswordParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ChangePasswordOK, error) + Invite(params *InviteParams, opts ...ClientOption) (*InviteCreated, error) Login(params *LoginParams, opts ...ClientOption) (*LoginOK, error) @@ -40,11 +42,52 @@ type ClientService interface { ResetPasswordRequest(params *ResetPasswordRequestParams, opts ...ClientOption) (*ResetPasswordRequestCreated, error) + ResetToken(params *ResetTokenParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ResetTokenOK, error) + Verify(params *VerifyParams, opts ...ClientOption) (*VerifyOK, error) SetTransport(transport runtime.ClientTransport) } +/* +ChangePassword change password API +*/ +func (a *Client) ChangePassword(params *ChangePasswordParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ChangePasswordOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewChangePasswordParams() + } + op := &runtime.ClientOperation{ + ID: "changePassword", + Method: "POST", + PathPattern: "/changePassword", + ProducesMediaTypes: []string{"application/zrok.v1+json"}, + ConsumesMediaTypes: []string{"application/zrok.v1+json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ChangePasswordReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ChangePasswordOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for changePassword: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* Invite invite API */ @@ -235,6 +278,45 @@ func (a *Client) ResetPasswordRequest(params *ResetPasswordRequestParams, opts . panic(msg) } +/* +ResetToken reset token API +*/ +func (a *Client) ResetToken(params *ResetTokenParams, authInfo runtime.ClientAuthInfoWriter, opts ...ClientOption) (*ResetTokenOK, error) { + // TODO: Validate the params before sending + if params == nil { + params = NewResetTokenParams() + } + op := &runtime.ClientOperation{ + ID: "resetToken", + Method: "POST", + PathPattern: "/resetToken", + ProducesMediaTypes: []string{"application/zrok.v1+json"}, + ConsumesMediaTypes: []string{"application/zrok.v1+json"}, + Schemes: []string{"http"}, + Params: params, + Reader: &ResetTokenReader{formats: a.formats}, + AuthInfo: authInfo, + Context: params.Context, + Client: params.HTTPClient, + } + for _, opt := range opts { + opt(op) + } + + result, err := a.transport.Submit(op) + if err != nil { + return nil, err + } + success, ok := result.(*ResetTokenOK) + if ok { + return success, nil + } + // unexpected success response + // safeguard: normally, absent a default response, unknown success responses return an error above: so this is a codegen issue + msg := fmt.Sprintf("unexpected success response for resetToken: API contract not enforced by server. Client expected to get an error, but got: %T", result) + panic(msg) +} + /* Verify verify API */ diff --git a/rest_client_zrok/account/change_password_parameters.go b/rest_client_zrok/account/change_password_parameters.go new file mode 100644 index 00000000..8ab0d86c --- /dev/null +++ b/rest_client_zrok/account/change_password_parameters.go @@ -0,0 +1,150 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// NewChangePasswordParams creates a new ChangePasswordParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewChangePasswordParams() *ChangePasswordParams { + return &ChangePasswordParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewChangePasswordParamsWithTimeout creates a new ChangePasswordParams object +// with the ability to set a timeout on a request. +func NewChangePasswordParamsWithTimeout(timeout time.Duration) *ChangePasswordParams { + return &ChangePasswordParams{ + timeout: timeout, + } +} + +// NewChangePasswordParamsWithContext creates a new ChangePasswordParams object +// with the ability to set a context for a request. +func NewChangePasswordParamsWithContext(ctx context.Context) *ChangePasswordParams { + return &ChangePasswordParams{ + Context: ctx, + } +} + +// NewChangePasswordParamsWithHTTPClient creates a new ChangePasswordParams object +// with the ability to set a custom HTTPClient for a request. +func NewChangePasswordParamsWithHTTPClient(client *http.Client) *ChangePasswordParams { + return &ChangePasswordParams{ + HTTPClient: client, + } +} + +/* +ChangePasswordParams contains all the parameters to send to the API endpoint + + for the change password operation. + + Typically these are written to a http.Request. +*/ +type ChangePasswordParams struct { + + // Body. + Body *rest_model_zrok.ChangePasswordRequest + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the change password params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ChangePasswordParams) WithDefaults() *ChangePasswordParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the change password params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ChangePasswordParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the change password params +func (o *ChangePasswordParams) WithTimeout(timeout time.Duration) *ChangePasswordParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the change password params +func (o *ChangePasswordParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the change password params +func (o *ChangePasswordParams) WithContext(ctx context.Context) *ChangePasswordParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the change password params +func (o *ChangePasswordParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the change password params +func (o *ChangePasswordParams) WithHTTPClient(client *http.Client) *ChangePasswordParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the change password params +func (o *ChangePasswordParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the change password params +func (o *ChangePasswordParams) WithBody(body *rest_model_zrok.ChangePasswordRequest) *ChangePasswordParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the change password params +func (o *ChangePasswordParams) SetBody(body *rest_model_zrok.ChangePasswordRequest) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ChangePasswordParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if o.Body != nil { + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/rest_client_zrok/account/change_password_responses.go b/rest_client_zrok/account/change_password_responses.go new file mode 100644 index 00000000..195afe8b --- /dev/null +++ b/rest_client_zrok/account/change_password_responses.go @@ -0,0 +1,349 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// ChangePasswordReader is a Reader for the ChangePassword structure. +type ChangePasswordReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ChangePasswordReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewChangePasswordOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 400: + result := NewChangePasswordBadRequest() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 401: + result := NewChangePasswordUnauthorized() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 422: + result := NewChangePasswordUnprocessableEntity() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewChangePasswordInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /changePassword] changePassword", response, response.Code()) + } +} + +// NewChangePasswordOK creates a ChangePasswordOK with default headers values +func NewChangePasswordOK() *ChangePasswordOK { + return &ChangePasswordOK{} +} + +/* +ChangePasswordOK describes a response with status code 200, with default header values. + +changed password +*/ +type ChangePasswordOK struct { +} + +// IsSuccess returns true when this change password o k response has a 2xx status code +func (o *ChangePasswordOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this change password o k response has a 3xx status code +func (o *ChangePasswordOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this change password o k response has a 4xx status code +func (o *ChangePasswordOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this change password o k response has a 5xx status code +func (o *ChangePasswordOK) IsServerError() bool { + return false +} + +// IsCode returns true when this change password o k response a status code equal to that given +func (o *ChangePasswordOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the change password o k response +func (o *ChangePasswordOK) Code() int { + return 200 +} + +func (o *ChangePasswordOK) Error() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordOK ", 200) +} + +func (o *ChangePasswordOK) String() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordOK ", 200) +} + +func (o *ChangePasswordOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewChangePasswordBadRequest creates a ChangePasswordBadRequest with default headers values +func NewChangePasswordBadRequest() *ChangePasswordBadRequest { + return &ChangePasswordBadRequest{} +} + +/* +ChangePasswordBadRequest describes a response with status code 400, with default header values. + +password not changed +*/ +type ChangePasswordBadRequest struct { +} + +// IsSuccess returns true when this change password bad request response has a 2xx status code +func (o *ChangePasswordBadRequest) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this change password bad request response has a 3xx status code +func (o *ChangePasswordBadRequest) IsRedirect() bool { + return false +} + +// IsClientError returns true when this change password bad request response has a 4xx status code +func (o *ChangePasswordBadRequest) IsClientError() bool { + return true +} + +// IsServerError returns true when this change password bad request response has a 5xx status code +func (o *ChangePasswordBadRequest) IsServerError() bool { + return false +} + +// IsCode returns true when this change password bad request response a status code equal to that given +func (o *ChangePasswordBadRequest) IsCode(code int) bool { + return code == 400 +} + +// Code gets the status code for the change password bad request response +func (o *ChangePasswordBadRequest) Code() int { + return 400 +} + +func (o *ChangePasswordBadRequest) Error() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordBadRequest ", 400) +} + +func (o *ChangePasswordBadRequest) String() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordBadRequest ", 400) +} + +func (o *ChangePasswordBadRequest) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewChangePasswordUnauthorized creates a ChangePasswordUnauthorized with default headers values +func NewChangePasswordUnauthorized() *ChangePasswordUnauthorized { + return &ChangePasswordUnauthorized{} +} + +/* +ChangePasswordUnauthorized describes a response with status code 401, with default header values. + +unauthorized +*/ +type ChangePasswordUnauthorized struct { +} + +// IsSuccess returns true when this change password unauthorized response has a 2xx status code +func (o *ChangePasswordUnauthorized) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this change password unauthorized response has a 3xx status code +func (o *ChangePasswordUnauthorized) IsRedirect() bool { + return false +} + +// IsClientError returns true when this change password unauthorized response has a 4xx status code +func (o *ChangePasswordUnauthorized) IsClientError() bool { + return true +} + +// IsServerError returns true when this change password unauthorized response has a 5xx status code +func (o *ChangePasswordUnauthorized) IsServerError() bool { + return false +} + +// IsCode returns true when this change password unauthorized response a status code equal to that given +func (o *ChangePasswordUnauthorized) IsCode(code int) bool { + return code == 401 +} + +// Code gets the status code for the change password unauthorized response +func (o *ChangePasswordUnauthorized) Code() int { + return 401 +} + +func (o *ChangePasswordUnauthorized) Error() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordUnauthorized ", 401) +} + +func (o *ChangePasswordUnauthorized) String() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordUnauthorized ", 401) +} + +func (o *ChangePasswordUnauthorized) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewChangePasswordUnprocessableEntity creates a ChangePasswordUnprocessableEntity with default headers values +func NewChangePasswordUnprocessableEntity() *ChangePasswordUnprocessableEntity { + return &ChangePasswordUnprocessableEntity{} +} + +/* +ChangePasswordUnprocessableEntity describes a response with status code 422, with default header values. + +password validation failure +*/ +type ChangePasswordUnprocessableEntity struct { + Payload rest_model_zrok.ErrorMessage +} + +// IsSuccess returns true when this change password unprocessable entity response has a 2xx status code +func (o *ChangePasswordUnprocessableEntity) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this change password unprocessable entity response has a 3xx status code +func (o *ChangePasswordUnprocessableEntity) IsRedirect() bool { + return false +} + +// IsClientError returns true when this change password unprocessable entity response has a 4xx status code +func (o *ChangePasswordUnprocessableEntity) IsClientError() bool { + return true +} + +// IsServerError returns true when this change password unprocessable entity response has a 5xx status code +func (o *ChangePasswordUnprocessableEntity) IsServerError() bool { + return false +} + +// IsCode returns true when this change password unprocessable entity response a status code equal to that given +func (o *ChangePasswordUnprocessableEntity) IsCode(code int) bool { + return code == 422 +} + +// Code gets the status code for the change password unprocessable entity response +func (o *ChangePasswordUnprocessableEntity) Code() int { + return 422 +} + +func (o *ChangePasswordUnprocessableEntity) Error() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ChangePasswordUnprocessableEntity) String() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordUnprocessableEntity %+v", 422, o.Payload) +} + +func (o *ChangePasswordUnprocessableEntity) GetPayload() rest_model_zrok.ErrorMessage { + return o.Payload +} + +func (o *ChangePasswordUnprocessableEntity) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + // response payload + if err := consumer.Consume(response.Body(), &o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewChangePasswordInternalServerError creates a ChangePasswordInternalServerError with default headers values +func NewChangePasswordInternalServerError() *ChangePasswordInternalServerError { + return &ChangePasswordInternalServerError{} +} + +/* +ChangePasswordInternalServerError describes a response with status code 500, with default header values. + +internal server error +*/ +type ChangePasswordInternalServerError struct { +} + +// IsSuccess returns true when this change password internal server error response has a 2xx status code +func (o *ChangePasswordInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this change password internal server error response has a 3xx status code +func (o *ChangePasswordInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this change password internal server error response has a 4xx status code +func (o *ChangePasswordInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this change password internal server error response has a 5xx status code +func (o *ChangePasswordInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this change password internal server error response a status code equal to that given +func (o *ChangePasswordInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the change password internal server error response +func (o *ChangePasswordInternalServerError) Code() int { + return 500 +} + +func (o *ChangePasswordInternalServerError) Error() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordInternalServerError ", 500) +} + +func (o *ChangePasswordInternalServerError) String() string { + return fmt.Sprintf("[POST /changePassword][%d] changePasswordInternalServerError ", 500) +} + +func (o *ChangePasswordInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} diff --git a/rest_client_zrok/account/invite_responses.go b/rest_client_zrok/account/invite_responses.go index 87d13542..a07d98a3 100644 --- a/rest_client_zrok/account/invite_responses.go +++ b/rest_client_zrok/account/invite_responses.go @@ -48,7 +48,7 @@ func (o *InviteReader) ReadResponse(response runtime.ClientResponse, consumer ru } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /invite] invite", response, response.Code()) } } @@ -90,6 +90,11 @@ func (o *InviteCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the invite created response +func (o *InviteCreated) Code() int { + return 201 +} + func (o *InviteCreated) Error() string { return fmt.Sprintf("[POST /invite][%d] inviteCreated ", 201) } @@ -142,6 +147,11 @@ func (o *InviteBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the invite bad request response +func (o *InviteBadRequest) Code() int { + return 400 +} + func (o *InviteBadRequest) Error() string { return fmt.Sprintf("[POST /invite][%d] inviteBadRequest %+v", 400, o.Payload) } @@ -202,6 +212,11 @@ func (o *InviteUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the invite unauthorized response +func (o *InviteUnauthorized) Code() int { + return 401 +} + func (o *InviteUnauthorized) Error() string { return fmt.Sprintf("[POST /invite][%d] inviteUnauthorized ", 401) } @@ -253,6 +268,11 @@ func (o *InviteInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the invite internal server error response +func (o *InviteInternalServerError) Code() int { + return 500 +} + func (o *InviteInternalServerError) Error() string { return fmt.Sprintf("[POST /invite][%d] inviteInternalServerError ", 500) } diff --git a/rest_client_zrok/account/login_responses.go b/rest_client_zrok/account/login_responses.go index 08ef25b8..534f6a4f 100644 --- a/rest_client_zrok/account/login_responses.go +++ b/rest_client_zrok/account/login_responses.go @@ -36,7 +36,7 @@ func (o *LoginReader) ReadResponse(response runtime.ClientResponse, consumer run } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /login] login", response, response.Code()) } } @@ -79,6 +79,11 @@ func (o *LoginOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the login o k response +func (o *LoginOK) Code() int { + return 200 +} + func (o *LoginOK) Error() string { return fmt.Sprintf("[POST /login][%d] loginOK %+v", 200, o.Payload) } @@ -139,6 +144,11 @@ func (o *LoginUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the login unauthorized response +func (o *LoginUnauthorized) Code() int { + return 401 +} + func (o *LoginUnauthorized) Error() string { return fmt.Sprintf("[POST /login][%d] loginUnauthorized ", 401) } diff --git a/rest_client_zrok/account/register_responses.go b/rest_client_zrok/account/register_responses.go index 82225bbe..2c510334 100644 --- a/rest_client_zrok/account/register_responses.go +++ b/rest_client_zrok/account/register_responses.go @@ -48,7 +48,7 @@ func (o *RegisterReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /register] register", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *RegisterOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the register o k response +func (o *RegisterOK) Code() int { + return 200 +} + func (o *RegisterOK) Error() string { return fmt.Sprintf("[POST /register][%d] registerOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *RegisterNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the register not found response +func (o *RegisterNotFound) Code() int { + return 404 +} + func (o *RegisterNotFound) Error() string { return fmt.Sprintf("[POST /register][%d] registerNotFound ", 404) } @@ -205,6 +215,11 @@ func (o *RegisterUnprocessableEntity) IsCode(code int) bool { return code == 422 } +// Code gets the status code for the register unprocessable entity response +func (o *RegisterUnprocessableEntity) Code() int { + return 422 +} + func (o *RegisterUnprocessableEntity) Error() string { return fmt.Sprintf("[POST /register][%d] registerUnprocessableEntity %+v", 422, o.Payload) } @@ -265,6 +280,11 @@ func (o *RegisterInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the register internal server error response +func (o *RegisterInternalServerError) Code() int { + return 500 +} + func (o *RegisterInternalServerError) Error() string { return fmt.Sprintf("[POST /register][%d] registerInternalServerError ", 500) } diff --git a/rest_client_zrok/account/reset_password_request_responses.go b/rest_client_zrok/account/reset_password_request_responses.go index 2f0bb971..7e05c525 100644 --- a/rest_client_zrok/account/reset_password_request_responses.go +++ b/rest_client_zrok/account/reset_password_request_responses.go @@ -41,7 +41,7 @@ func (o *ResetPasswordRequestReader) ReadResponse(response runtime.ClientRespons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /resetPasswordRequest] resetPasswordRequest", response, response.Code()) } } @@ -83,6 +83,11 @@ func (o *ResetPasswordRequestCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the reset password request created response +func (o *ResetPasswordRequestCreated) Code() int { + return 201 +} + func (o *ResetPasswordRequestCreated) Error() string { return fmt.Sprintf("[POST /resetPasswordRequest][%d] resetPasswordRequestCreated ", 201) } @@ -134,6 +139,11 @@ func (o *ResetPasswordRequestBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the reset password request bad request response +func (o *ResetPasswordRequestBadRequest) Code() int { + return 400 +} + func (o *ResetPasswordRequestBadRequest) Error() string { return fmt.Sprintf("[POST /resetPasswordRequest][%d] resetPasswordRequestBadRequest ", 400) } @@ -185,6 +195,11 @@ func (o *ResetPasswordRequestInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the reset password request internal server error response +func (o *ResetPasswordRequestInternalServerError) Code() int { + return 500 +} + func (o *ResetPasswordRequestInternalServerError) Error() string { return fmt.Sprintf("[POST /resetPasswordRequest][%d] resetPasswordRequestInternalServerError ", 500) } diff --git a/rest_client_zrok/account/reset_password_responses.go b/rest_client_zrok/account/reset_password_responses.go index fb106b41..01c83c7b 100644 --- a/rest_client_zrok/account/reset_password_responses.go +++ b/rest_client_zrok/account/reset_password_responses.go @@ -48,7 +48,7 @@ func (o *ResetPasswordReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /resetPassword] resetPassword", response, response.Code()) } } @@ -90,6 +90,11 @@ func (o *ResetPasswordOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the reset password o k response +func (o *ResetPasswordOK) Code() int { + return 200 +} + func (o *ResetPasswordOK) Error() string { return fmt.Sprintf("[POST /resetPassword][%d] resetPasswordOK ", 200) } @@ -141,6 +146,11 @@ func (o *ResetPasswordNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the reset password not found response +func (o *ResetPasswordNotFound) Code() int { + return 404 +} + func (o *ResetPasswordNotFound) Error() string { return fmt.Sprintf("[POST /resetPassword][%d] resetPasswordNotFound ", 404) } @@ -193,6 +203,11 @@ func (o *ResetPasswordUnprocessableEntity) IsCode(code int) bool { return code == 422 } +// Code gets the status code for the reset password unprocessable entity response +func (o *ResetPasswordUnprocessableEntity) Code() int { + return 422 +} + func (o *ResetPasswordUnprocessableEntity) Error() string { return fmt.Sprintf("[POST /resetPassword][%d] resetPasswordUnprocessableEntity %+v", 422, o.Payload) } @@ -253,6 +268,11 @@ func (o *ResetPasswordInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the reset password internal server error response +func (o *ResetPasswordInternalServerError) Code() int { + return 500 +} + func (o *ResetPasswordInternalServerError) Error() string { return fmt.Sprintf("[POST /resetPassword][%d] resetPasswordInternalServerError ", 500) } diff --git a/rest_client_zrok/account/reset_token_parameters.go b/rest_client_zrok/account/reset_token_parameters.go new file mode 100644 index 00000000..a8eae203 --- /dev/null +++ b/rest_client_zrok/account/reset_token_parameters.go @@ -0,0 +1,146 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "net/http" + "time" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + cr "github.com/go-openapi/runtime/client" + "github.com/go-openapi/strfmt" +) + +// NewResetTokenParams creates a new ResetTokenParams object, +// with the default timeout for this client. +// +// Default values are not hydrated, since defaults are normally applied by the API server side. +// +// To enforce default values in parameter, use SetDefaults or WithDefaults. +func NewResetTokenParams() *ResetTokenParams { + return &ResetTokenParams{ + timeout: cr.DefaultTimeout, + } +} + +// NewResetTokenParamsWithTimeout creates a new ResetTokenParams object +// with the ability to set a timeout on a request. +func NewResetTokenParamsWithTimeout(timeout time.Duration) *ResetTokenParams { + return &ResetTokenParams{ + timeout: timeout, + } +} + +// NewResetTokenParamsWithContext creates a new ResetTokenParams object +// with the ability to set a context for a request. +func NewResetTokenParamsWithContext(ctx context.Context) *ResetTokenParams { + return &ResetTokenParams{ + Context: ctx, + } +} + +// NewResetTokenParamsWithHTTPClient creates a new ResetTokenParams object +// with the ability to set a custom HTTPClient for a request. +func NewResetTokenParamsWithHTTPClient(client *http.Client) *ResetTokenParams { + return &ResetTokenParams{ + HTTPClient: client, + } +} + +/* +ResetTokenParams contains all the parameters to send to the API endpoint + + for the reset token operation. + + Typically these are written to a http.Request. +*/ +type ResetTokenParams struct { + + // Body. + Body ResetTokenBody + + timeout time.Duration + Context context.Context + HTTPClient *http.Client +} + +// WithDefaults hydrates default values in the reset token params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ResetTokenParams) WithDefaults() *ResetTokenParams { + o.SetDefaults() + return o +} + +// SetDefaults hydrates default values in the reset token params (not the query body). +// +// All values with no default are reset to their zero value. +func (o *ResetTokenParams) SetDefaults() { + // no default values defined for this parameter +} + +// WithTimeout adds the timeout to the reset token params +func (o *ResetTokenParams) WithTimeout(timeout time.Duration) *ResetTokenParams { + o.SetTimeout(timeout) + return o +} + +// SetTimeout adds the timeout to the reset token params +func (o *ResetTokenParams) SetTimeout(timeout time.Duration) { + o.timeout = timeout +} + +// WithContext adds the context to the reset token params +func (o *ResetTokenParams) WithContext(ctx context.Context) *ResetTokenParams { + o.SetContext(ctx) + return o +} + +// SetContext adds the context to the reset token params +func (o *ResetTokenParams) SetContext(ctx context.Context) { + o.Context = ctx +} + +// WithHTTPClient adds the HTTPClient to the reset token params +func (o *ResetTokenParams) WithHTTPClient(client *http.Client) *ResetTokenParams { + o.SetHTTPClient(client) + return o +} + +// SetHTTPClient adds the HTTPClient to the reset token params +func (o *ResetTokenParams) SetHTTPClient(client *http.Client) { + o.HTTPClient = client +} + +// WithBody adds the body to the reset token params +func (o *ResetTokenParams) WithBody(body ResetTokenBody) *ResetTokenParams { + o.SetBody(body) + return o +} + +// SetBody adds the body to the reset token params +func (o *ResetTokenParams) SetBody(body ResetTokenBody) { + o.Body = body +} + +// WriteToRequest writes these params to a swagger request +func (o *ResetTokenParams) WriteToRequest(r runtime.ClientRequest, reg strfmt.Registry) error { + + if err := r.SetTimeout(o.timeout); err != nil { + return err + } + var res []error + if err := r.SetBodyParam(o.Body); err != nil { + return err + } + + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/rest_client_zrok/account/reset_token_responses.go b/rest_client_zrok/account/reset_token_responses.go new file mode 100644 index 00000000..ed973e6a --- /dev/null +++ b/rest_client_zrok/account/reset_token_responses.go @@ -0,0 +1,303 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + "fmt" + "io" + + "github.com/go-openapi/runtime" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ResetTokenReader is a Reader for the ResetToken structure. +type ResetTokenReader struct { + formats strfmt.Registry +} + +// ReadResponse reads a server response into the received o. +func (o *ResetTokenReader) ReadResponse(response runtime.ClientResponse, consumer runtime.Consumer) (interface{}, error) { + switch response.Code() { + case 200: + result := NewResetTokenOK() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return result, nil + case 404: + result := NewResetTokenNotFound() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + case 500: + result := NewResetTokenInternalServerError() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result + default: + return nil, runtime.NewAPIError("[POST /resetToken] resetToken", response, response.Code()) + } +} + +// NewResetTokenOK creates a ResetTokenOK with default headers values +func NewResetTokenOK() *ResetTokenOK { + return &ResetTokenOK{} +} + +/* +ResetTokenOK describes a response with status code 200, with default header values. + +token reset +*/ +type ResetTokenOK struct { + Payload *ResetTokenOKBody +} + +// IsSuccess returns true when this reset token o k response has a 2xx status code +func (o *ResetTokenOK) IsSuccess() bool { + return true +} + +// IsRedirect returns true when this reset token o k response has a 3xx status code +func (o *ResetTokenOK) IsRedirect() bool { + return false +} + +// IsClientError returns true when this reset token o k response has a 4xx status code +func (o *ResetTokenOK) IsClientError() bool { + return false +} + +// IsServerError returns true when this reset token o k response has a 5xx status code +func (o *ResetTokenOK) IsServerError() bool { + return false +} + +// IsCode returns true when this reset token o k response a status code equal to that given +func (o *ResetTokenOK) IsCode(code int) bool { + return code == 200 +} + +// Code gets the status code for the reset token o k response +func (o *ResetTokenOK) Code() int { + return 200 +} + +func (o *ResetTokenOK) Error() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenOK %+v", 200, o.Payload) +} + +func (o *ResetTokenOK) String() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenOK %+v", 200, o.Payload) +} + +func (o *ResetTokenOK) GetPayload() *ResetTokenOKBody { + return o.Payload +} + +func (o *ResetTokenOK) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + o.Payload = new(ResetTokenOKBody) + + // response payload + if err := consumer.Consume(response.Body(), o.Payload); err != nil && err != io.EOF { + return err + } + + return nil +} + +// NewResetTokenNotFound creates a ResetTokenNotFound with default headers values +func NewResetTokenNotFound() *ResetTokenNotFound { + return &ResetTokenNotFound{} +} + +/* +ResetTokenNotFound describes a response with status code 404, with default header values. + +account not found +*/ +type ResetTokenNotFound struct { +} + +// IsSuccess returns true when this reset token not found response has a 2xx status code +func (o *ResetTokenNotFound) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this reset token not found response has a 3xx status code +func (o *ResetTokenNotFound) IsRedirect() bool { + return false +} + +// IsClientError returns true when this reset token not found response has a 4xx status code +func (o *ResetTokenNotFound) IsClientError() bool { + return true +} + +// IsServerError returns true when this reset token not found response has a 5xx status code +func (o *ResetTokenNotFound) IsServerError() bool { + return false +} + +// IsCode returns true when this reset token not found response a status code equal to that given +func (o *ResetTokenNotFound) IsCode(code int) bool { + return code == 404 +} + +// Code gets the status code for the reset token not found response +func (o *ResetTokenNotFound) Code() int { + return 404 +} + +func (o *ResetTokenNotFound) Error() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenNotFound ", 404) +} + +func (o *ResetTokenNotFound) String() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenNotFound ", 404) +} + +func (o *ResetTokenNotFound) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +// NewResetTokenInternalServerError creates a ResetTokenInternalServerError with default headers values +func NewResetTokenInternalServerError() *ResetTokenInternalServerError { + return &ResetTokenInternalServerError{} +} + +/* +ResetTokenInternalServerError describes a response with status code 500, with default header values. + +internal server error +*/ +type ResetTokenInternalServerError struct { +} + +// IsSuccess returns true when this reset token internal server error response has a 2xx status code +func (o *ResetTokenInternalServerError) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this reset token internal server error response has a 3xx status code +func (o *ResetTokenInternalServerError) IsRedirect() bool { + return false +} + +// IsClientError returns true when this reset token internal server error response has a 4xx status code +func (o *ResetTokenInternalServerError) IsClientError() bool { + return false +} + +// IsServerError returns true when this reset token internal server error response has a 5xx status code +func (o *ResetTokenInternalServerError) IsServerError() bool { + return true +} + +// IsCode returns true when this reset token internal server error response a status code equal to that given +func (o *ResetTokenInternalServerError) IsCode(code int) bool { + return code == 500 +} + +// Code gets the status code for the reset token internal server error response +func (o *ResetTokenInternalServerError) Code() int { + return 500 +} + +func (o *ResetTokenInternalServerError) Error() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenInternalServerError ", 500) +} + +func (o *ResetTokenInternalServerError) String() string { + return fmt.Sprintf("[POST /resetToken][%d] resetTokenInternalServerError ", 500) +} + +func (o *ResetTokenInternalServerError) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + +/* +ResetTokenBody reset token body +swagger:model ResetTokenBody +*/ +type ResetTokenBody struct { + + // email address + EmailAddress string `json:"emailAddress,omitempty"` +} + +// Validate validates this reset token body +func (o *ResetTokenBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this reset token body based on context it is used +func (o *ResetTokenBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ResetTokenBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ResetTokenBody) UnmarshalBinary(b []byte) error { + var res ResetTokenBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +/* +ResetTokenOKBody reset token o k body +swagger:model ResetTokenOKBody +*/ +type ResetTokenOKBody struct { + + // token + Token string `json:"token,omitempty"` +} + +// Validate validates this reset token o k body +func (o *ResetTokenOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this reset token o k body based on context it is used +func (o *ResetTokenOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ResetTokenOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ResetTokenOKBody) UnmarshalBinary(b []byte) error { + var res ResetTokenOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/rest_client_zrok/account/verify_responses.go b/rest_client_zrok/account/verify_responses.go index f333aeaa..ee26b595 100644 --- a/rest_client_zrok/account/verify_responses.go +++ b/rest_client_zrok/account/verify_responses.go @@ -42,7 +42,7 @@ func (o *VerifyReader) ReadResponse(response runtime.ClientResponse, consumer ru } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /verify] verify", response, response.Code()) } } @@ -85,6 +85,11 @@ func (o *VerifyOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the verify o k response +func (o *VerifyOK) Code() int { + return 200 +} + func (o *VerifyOK) Error() string { return fmt.Sprintf("[POST /verify][%d] verifyOK %+v", 200, o.Payload) } @@ -147,6 +152,11 @@ func (o *VerifyNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the verify not found response +func (o *VerifyNotFound) Code() int { + return 404 +} + func (o *VerifyNotFound) Error() string { return fmt.Sprintf("[POST /verify][%d] verifyNotFound ", 404) } @@ -198,6 +208,11 @@ func (o *VerifyInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the verify internal server error response +func (o *VerifyInternalServerError) Code() int { + return 500 +} + func (o *VerifyInternalServerError) Error() string { return fmt.Sprintf("[POST /verify][%d] verifyInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/create_frontend_responses.go b/rest_client_zrok/admin/create_frontend_responses.go index 35689cc5..69423f99 100644 --- a/rest_client_zrok/admin/create_frontend_responses.go +++ b/rest_client_zrok/admin/create_frontend_responses.go @@ -54,7 +54,7 @@ func (o *CreateFrontendReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /frontend] createFrontend", response, response.Code()) } } @@ -97,6 +97,11 @@ func (o *CreateFrontendCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the create frontend created response +func (o *CreateFrontendCreated) Code() int { + return 201 +} + func (o *CreateFrontendCreated) Error() string { return fmt.Sprintf("[POST /frontend][%d] createFrontendCreated %+v", 201, o.Payload) } @@ -159,6 +164,11 @@ func (o *CreateFrontendBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the create frontend bad request response +func (o *CreateFrontendBadRequest) Code() int { + return 400 +} + func (o *CreateFrontendBadRequest) Error() string { return fmt.Sprintf("[POST /frontend][%d] createFrontendBadRequest ", 400) } @@ -210,6 +220,11 @@ func (o *CreateFrontendUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the create frontend unauthorized response +func (o *CreateFrontendUnauthorized) Code() int { + return 401 +} + func (o *CreateFrontendUnauthorized) Error() string { return fmt.Sprintf("[POST /frontend][%d] createFrontendUnauthorized ", 401) } @@ -261,6 +276,11 @@ func (o *CreateFrontendNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the create frontend not found response +func (o *CreateFrontendNotFound) Code() int { + return 404 +} + func (o *CreateFrontendNotFound) Error() string { return fmt.Sprintf("[POST /frontend][%d] createFrontendNotFound ", 404) } @@ -312,6 +332,11 @@ func (o *CreateFrontendInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the create frontend internal server error response +func (o *CreateFrontendInternalServerError) Code() int { + return 500 +} + func (o *CreateFrontendInternalServerError) Error() string { return fmt.Sprintf("[POST /frontend][%d] createFrontendInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/create_identity_responses.go b/rest_client_zrok/admin/create_identity_responses.go index b41af209..17c7457b 100644 --- a/rest_client_zrok/admin/create_identity_responses.go +++ b/rest_client_zrok/admin/create_identity_responses.go @@ -42,7 +42,7 @@ func (o *CreateIdentityReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /identity] createIdentity", response, response.Code()) } } @@ -85,6 +85,11 @@ func (o *CreateIdentityCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the create identity created response +func (o *CreateIdentityCreated) Code() int { + return 201 +} + func (o *CreateIdentityCreated) Error() string { return fmt.Sprintf("[POST /identity][%d] createIdentityCreated %+v", 201, o.Payload) } @@ -147,6 +152,11 @@ func (o *CreateIdentityUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the create identity unauthorized response +func (o *CreateIdentityUnauthorized) Code() int { + return 401 +} + func (o *CreateIdentityUnauthorized) Error() string { return fmt.Sprintf("[POST /identity][%d] createIdentityUnauthorized ", 401) } @@ -198,6 +208,11 @@ func (o *CreateIdentityInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the create identity internal server error response +func (o *CreateIdentityInternalServerError) Code() int { + return 500 +} + func (o *CreateIdentityInternalServerError) Error() string { return fmt.Sprintf("[POST /identity][%d] createIdentityInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/delete_frontend_responses.go b/rest_client_zrok/admin/delete_frontend_responses.go index 20942835..4c37fb23 100644 --- a/rest_client_zrok/admin/delete_frontend_responses.go +++ b/rest_client_zrok/admin/delete_frontend_responses.go @@ -45,7 +45,7 @@ func (o *DeleteFrontendReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /frontend] deleteFrontend", response, response.Code()) } } @@ -87,6 +87,11 @@ func (o *DeleteFrontendOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the delete frontend o k response +func (o *DeleteFrontendOK) Code() int { + return 200 +} + func (o *DeleteFrontendOK) Error() string { return fmt.Sprintf("[DELETE /frontend][%d] deleteFrontendOK ", 200) } @@ -138,6 +143,11 @@ func (o *DeleteFrontendUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the delete frontend unauthorized response +func (o *DeleteFrontendUnauthorized) Code() int { + return 401 +} + func (o *DeleteFrontendUnauthorized) Error() string { return fmt.Sprintf("[DELETE /frontend][%d] deleteFrontendUnauthorized ", 401) } @@ -189,6 +199,11 @@ func (o *DeleteFrontendNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the delete frontend not found response +func (o *DeleteFrontendNotFound) Code() int { + return 404 +} + func (o *DeleteFrontendNotFound) Error() string { return fmt.Sprintf("[DELETE /frontend][%d] deleteFrontendNotFound ", 404) } @@ -240,6 +255,11 @@ func (o *DeleteFrontendInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the delete frontend internal server error response +func (o *DeleteFrontendInternalServerError) Code() int { + return 500 +} + func (o *DeleteFrontendInternalServerError) Error() string { return fmt.Sprintf("[DELETE /frontend][%d] deleteFrontendInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/invite_token_generate_responses.go b/rest_client_zrok/admin/invite_token_generate_responses.go index c1394db8..c2c61e65 100644 --- a/rest_client_zrok/admin/invite_token_generate_responses.go +++ b/rest_client_zrok/admin/invite_token_generate_responses.go @@ -45,7 +45,7 @@ func (o *InviteTokenGenerateReader) ReadResponse(response runtime.ClientResponse } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /invite/token/generate] inviteTokenGenerate", response, response.Code()) } } @@ -87,6 +87,11 @@ func (o *InviteTokenGenerateCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the invite token generate created response +func (o *InviteTokenGenerateCreated) Code() int { + return 201 +} + func (o *InviteTokenGenerateCreated) Error() string { return fmt.Sprintf("[POST /invite/token/generate][%d] inviteTokenGenerateCreated ", 201) } @@ -138,6 +143,11 @@ func (o *InviteTokenGenerateBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the invite token generate bad request response +func (o *InviteTokenGenerateBadRequest) Code() int { + return 400 +} + func (o *InviteTokenGenerateBadRequest) Error() string { return fmt.Sprintf("[POST /invite/token/generate][%d] inviteTokenGenerateBadRequest ", 400) } @@ -189,6 +199,11 @@ func (o *InviteTokenGenerateUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the invite token generate unauthorized response +func (o *InviteTokenGenerateUnauthorized) Code() int { + return 401 +} + func (o *InviteTokenGenerateUnauthorized) Error() string { return fmt.Sprintf("[POST /invite/token/generate][%d] inviteTokenGenerateUnauthorized ", 401) } @@ -240,6 +255,11 @@ func (o *InviteTokenGenerateInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the invite token generate internal server error response +func (o *InviteTokenGenerateInternalServerError) Code() int { + return 500 +} + func (o *InviteTokenGenerateInternalServerError) Error() string { return fmt.Sprintf("[POST /invite/token/generate][%d] inviteTokenGenerateInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/list_frontends_responses.go b/rest_client_zrok/admin/list_frontends_responses.go index fe9d15fb..e03e7d17 100644 --- a/rest_client_zrok/admin/list_frontends_responses.go +++ b/rest_client_zrok/admin/list_frontends_responses.go @@ -42,7 +42,7 @@ func (o *ListFrontendsReader) ReadResponse(response runtime.ClientResponse, cons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /frontends] listFrontends", response, response.Code()) } } @@ -85,6 +85,11 @@ func (o *ListFrontendsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the list frontends o k response +func (o *ListFrontendsOK) Code() int { + return 200 +} + func (o *ListFrontendsOK) Error() string { return fmt.Sprintf("[GET /frontends][%d] listFrontendsOK %+v", 200, o.Payload) } @@ -145,6 +150,11 @@ func (o *ListFrontendsUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the list frontends unauthorized response +func (o *ListFrontendsUnauthorized) Code() int { + return 401 +} + func (o *ListFrontendsUnauthorized) Error() string { return fmt.Sprintf("[GET /frontends][%d] listFrontendsUnauthorized ", 401) } @@ -196,6 +206,11 @@ func (o *ListFrontendsInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the list frontends internal server error response +func (o *ListFrontendsInternalServerError) Code() int { + return 500 +} + func (o *ListFrontendsInternalServerError) Error() string { return fmt.Sprintf("[GET /frontends][%d] listFrontendsInternalServerError ", 500) } diff --git a/rest_client_zrok/admin/update_frontend_responses.go b/rest_client_zrok/admin/update_frontend_responses.go index f267e101..f1c633be 100644 --- a/rest_client_zrok/admin/update_frontend_responses.go +++ b/rest_client_zrok/admin/update_frontend_responses.go @@ -45,7 +45,7 @@ func (o *UpdateFrontendReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /frontend] updateFrontend", response, response.Code()) } } @@ -87,6 +87,11 @@ func (o *UpdateFrontendOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the update frontend o k response +func (o *UpdateFrontendOK) Code() int { + return 200 +} + func (o *UpdateFrontendOK) Error() string { return fmt.Sprintf("[PATCH /frontend][%d] updateFrontendOK ", 200) } @@ -138,6 +143,11 @@ func (o *UpdateFrontendUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the update frontend unauthorized response +func (o *UpdateFrontendUnauthorized) Code() int { + return 401 +} + func (o *UpdateFrontendUnauthorized) Error() string { return fmt.Sprintf("[PATCH /frontend][%d] updateFrontendUnauthorized ", 401) } @@ -189,6 +199,11 @@ func (o *UpdateFrontendNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the update frontend not found response +func (o *UpdateFrontendNotFound) Code() int { + return 404 +} + func (o *UpdateFrontendNotFound) Error() string { return fmt.Sprintf("[PATCH /frontend][%d] updateFrontendNotFound ", 404) } @@ -240,6 +255,11 @@ func (o *UpdateFrontendInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the update frontend internal server error response +func (o *UpdateFrontendInternalServerError) Code() int { + return 500 +} + func (o *UpdateFrontendInternalServerError) Error() string { return fmt.Sprintf("[PATCH /frontend][%d] updateFrontendInternalServerError ", 500) } diff --git a/rest_client_zrok/environment/disable_responses.go b/rest_client_zrok/environment/disable_responses.go index 7fd6a29d..60fb2fa9 100644 --- a/rest_client_zrok/environment/disable_responses.go +++ b/rest_client_zrok/environment/disable_responses.go @@ -39,7 +39,7 @@ func (o *DisableReader) ReadResponse(response runtime.ClientResponse, consumer r } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /disable] disable", response, response.Code()) } } @@ -81,6 +81,11 @@ func (o *DisableOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the disable o k response +func (o *DisableOK) Code() int { + return 200 +} + func (o *DisableOK) Error() string { return fmt.Sprintf("[POST /disable][%d] disableOK ", 200) } @@ -132,6 +137,11 @@ func (o *DisableUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the disable unauthorized response +func (o *DisableUnauthorized) Code() int { + return 401 +} + func (o *DisableUnauthorized) Error() string { return fmt.Sprintf("[POST /disable][%d] disableUnauthorized ", 401) } @@ -183,6 +193,11 @@ func (o *DisableInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the disable internal server error response +func (o *DisableInternalServerError) Code() int { + return 500 +} + func (o *DisableInternalServerError) Error() string { return fmt.Sprintf("[POST /disable][%d] disableInternalServerError ", 500) } diff --git a/rest_client_zrok/environment/enable_responses.go b/rest_client_zrok/environment/enable_responses.go index 7a3c461a..a79daa5a 100644 --- a/rest_client_zrok/environment/enable_responses.go +++ b/rest_client_zrok/environment/enable_responses.go @@ -48,7 +48,7 @@ func (o *EnableReader) ReadResponse(response runtime.ClientResponse, consumer ru } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /enable] enable", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *EnableCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the enable created response +func (o *EnableCreated) Code() int { + return 201 +} + func (o *EnableCreated) Error() string { return fmt.Sprintf("[POST /enable][%d] enableCreated %+v", 201, o.Payload) } @@ -153,6 +158,11 @@ func (o *EnableUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the enable unauthorized response +func (o *EnableUnauthorized) Code() int { + return 401 +} + func (o *EnableUnauthorized) Error() string { return fmt.Sprintf("[POST /enable][%d] enableUnauthorized ", 401) } @@ -204,6 +214,11 @@ func (o *EnableNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the enable not found response +func (o *EnableNotFound) Code() int { + return 404 +} + func (o *EnableNotFound) Error() string { return fmt.Sprintf("[POST /enable][%d] enableNotFound ", 404) } @@ -255,6 +270,11 @@ func (o *EnableInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the enable internal server error response +func (o *EnableInternalServerError) Code() int { + return 500 +} + func (o *EnableInternalServerError) Error() string { return fmt.Sprintf("[POST /enable][%d] enableInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/configuration_responses.go b/rest_client_zrok/metadata/configuration_responses.go index 22e28b70..792cd003 100644 --- a/rest_client_zrok/metadata/configuration_responses.go +++ b/rest_client_zrok/metadata/configuration_responses.go @@ -30,7 +30,7 @@ func (o *ConfigurationReader) ReadResponse(response runtime.ClientResponse, cons } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /configuration] configuration", response, response.Code()) } } @@ -73,6 +73,11 @@ func (o *ConfigurationOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the configuration o k response +func (o *ConfigurationOK) Code() int { + return 200 +} + func (o *ConfigurationOK) Error() string { return fmt.Sprintf("[GET /configuration][%d] configurationOK %+v", 200, o.Payload) } diff --git a/rest_client_zrok/metadata/get_account_detail_responses.go b/rest_client_zrok/metadata/get_account_detail_responses.go index 2d51df0d..119c3335 100644 --- a/rest_client_zrok/metadata/get_account_detail_responses.go +++ b/rest_client_zrok/metadata/get_account_detail_responses.go @@ -36,7 +36,7 @@ func (o *GetAccountDetailReader) ReadResponse(response runtime.ClientResponse, c } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /detail/account] getAccountDetail", response, response.Code()) } } @@ -79,6 +79,11 @@ func (o *GetAccountDetailOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get account detail o k response +func (o *GetAccountDetailOK) Code() int { + return 200 +} + func (o *GetAccountDetailOK) Error() string { return fmt.Sprintf("[GET /detail/account][%d] getAccountDetailOK %+v", 200, o.Payload) } @@ -139,6 +144,11 @@ func (o *GetAccountDetailInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get account detail internal server error response +func (o *GetAccountDetailInternalServerError) Code() int { + return 500 +} + func (o *GetAccountDetailInternalServerError) Error() string { return fmt.Sprintf("[GET /detail/account][%d] getAccountDetailInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_account_metrics_responses.go b/rest_client_zrok/metadata/get_account_metrics_responses.go index 5613b3a5..6cc34746 100644 --- a/rest_client_zrok/metadata/get_account_metrics_responses.go +++ b/rest_client_zrok/metadata/get_account_metrics_responses.go @@ -42,7 +42,7 @@ func (o *GetAccountMetricsReader) ReadResponse(response runtime.ClientResponse, } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /metrics/account] getAccountMetrics", response, response.Code()) } } @@ -85,6 +85,11 @@ func (o *GetAccountMetricsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get account metrics o k response +func (o *GetAccountMetricsOK) Code() int { + return 200 +} + func (o *GetAccountMetricsOK) Error() string { return fmt.Sprintf("[GET /metrics/account][%d] getAccountMetricsOK %+v", 200, o.Payload) } @@ -147,6 +152,11 @@ func (o *GetAccountMetricsBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get account metrics bad request response +func (o *GetAccountMetricsBadRequest) Code() int { + return 400 +} + func (o *GetAccountMetricsBadRequest) Error() string { return fmt.Sprintf("[GET /metrics/account][%d] getAccountMetricsBadRequest ", 400) } @@ -198,6 +208,11 @@ func (o *GetAccountMetricsInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get account metrics internal server error response +func (o *GetAccountMetricsInternalServerError) Code() int { + return 500 +} + func (o *GetAccountMetricsInternalServerError) Error() string { return fmt.Sprintf("[GET /metrics/account][%d] getAccountMetricsInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_environment_detail_responses.go b/rest_client_zrok/metadata/get_environment_detail_responses.go index 35d86c4c..7247746c 100644 --- a/rest_client_zrok/metadata/get_environment_detail_responses.go +++ b/rest_client_zrok/metadata/get_environment_detail_responses.go @@ -48,7 +48,7 @@ func (o *GetEnvironmentDetailReader) ReadResponse(response runtime.ClientRespons } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /detail/environment/{envZId}] getEnvironmentDetail", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *GetEnvironmentDetailOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get environment detail o k response +func (o *GetEnvironmentDetailOK) Code() int { + return 200 +} + func (o *GetEnvironmentDetailOK) Error() string { return fmt.Sprintf("[GET /detail/environment/{envZId}][%d] getEnvironmentDetailOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *GetEnvironmentDetailUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the get environment detail unauthorized response +func (o *GetEnvironmentDetailUnauthorized) Code() int { + return 401 +} + func (o *GetEnvironmentDetailUnauthorized) Error() string { return fmt.Sprintf("[GET /detail/environment/{envZId}][%d] getEnvironmentDetailUnauthorized ", 401) } @@ -204,6 +214,11 @@ func (o *GetEnvironmentDetailNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get environment detail not found response +func (o *GetEnvironmentDetailNotFound) Code() int { + return 404 +} + func (o *GetEnvironmentDetailNotFound) Error() string { return fmt.Sprintf("[GET /detail/environment/{envZId}][%d] getEnvironmentDetailNotFound ", 404) } @@ -255,6 +270,11 @@ func (o *GetEnvironmentDetailInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get environment detail internal server error response +func (o *GetEnvironmentDetailInternalServerError) Code() int { + return 500 +} + func (o *GetEnvironmentDetailInternalServerError) Error() string { return fmt.Sprintf("[GET /detail/environment/{envZId}][%d] getEnvironmentDetailInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_environment_metrics_responses.go b/rest_client_zrok/metadata/get_environment_metrics_responses.go index e0fb3eac..0e331725 100644 --- a/rest_client_zrok/metadata/get_environment_metrics_responses.go +++ b/rest_client_zrok/metadata/get_environment_metrics_responses.go @@ -48,7 +48,7 @@ func (o *GetEnvironmentMetricsReader) ReadResponse(response runtime.ClientRespon } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /metrics/environment/{envId}] getEnvironmentMetrics", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *GetEnvironmentMetricsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get environment metrics o k response +func (o *GetEnvironmentMetricsOK) Code() int { + return 200 +} + func (o *GetEnvironmentMetricsOK) Error() string { return fmt.Sprintf("[GET /metrics/environment/{envId}][%d] getEnvironmentMetricsOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *GetEnvironmentMetricsBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get environment metrics bad request response +func (o *GetEnvironmentMetricsBadRequest) Code() int { + return 400 +} + func (o *GetEnvironmentMetricsBadRequest) Error() string { return fmt.Sprintf("[GET /metrics/environment/{envId}][%d] getEnvironmentMetricsBadRequest ", 400) } @@ -204,6 +214,11 @@ func (o *GetEnvironmentMetricsUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the get environment metrics unauthorized response +func (o *GetEnvironmentMetricsUnauthorized) Code() int { + return 401 +} + func (o *GetEnvironmentMetricsUnauthorized) Error() string { return fmt.Sprintf("[GET /metrics/environment/{envId}][%d] getEnvironmentMetricsUnauthorized ", 401) } @@ -255,6 +270,11 @@ func (o *GetEnvironmentMetricsInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get environment metrics internal server error response +func (o *GetEnvironmentMetricsInternalServerError) Code() int { + return 500 +} + func (o *GetEnvironmentMetricsInternalServerError) Error() string { return fmt.Sprintf("[GET /metrics/environment/{envId}][%d] getEnvironmentMetricsInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_frontend_detail_responses.go b/rest_client_zrok/metadata/get_frontend_detail_responses.go index 90058d0b..14020476 100644 --- a/rest_client_zrok/metadata/get_frontend_detail_responses.go +++ b/rest_client_zrok/metadata/get_frontend_detail_responses.go @@ -48,7 +48,7 @@ func (o *GetFrontendDetailReader) ReadResponse(response runtime.ClientResponse, } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /detail/frontend/{feId}] getFrontendDetail", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *GetFrontendDetailOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get frontend detail o k response +func (o *GetFrontendDetailOK) Code() int { + return 200 +} + func (o *GetFrontendDetailOK) Error() string { return fmt.Sprintf("[GET /detail/frontend/{feId}][%d] getFrontendDetailOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *GetFrontendDetailUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the get frontend detail unauthorized response +func (o *GetFrontendDetailUnauthorized) Code() int { + return 401 +} + func (o *GetFrontendDetailUnauthorized) Error() string { return fmt.Sprintf("[GET /detail/frontend/{feId}][%d] getFrontendDetailUnauthorized ", 401) } @@ -204,6 +214,11 @@ func (o *GetFrontendDetailNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get frontend detail not found response +func (o *GetFrontendDetailNotFound) Code() int { + return 404 +} + func (o *GetFrontendDetailNotFound) Error() string { return fmt.Sprintf("[GET /detail/frontend/{feId}][%d] getFrontendDetailNotFound ", 404) } @@ -255,6 +270,11 @@ func (o *GetFrontendDetailInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get frontend detail internal server error response +func (o *GetFrontendDetailInternalServerError) Code() int { + return 500 +} + func (o *GetFrontendDetailInternalServerError) Error() string { return fmt.Sprintf("[GET /detail/frontend/{feId}][%d] getFrontendDetailInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_share_detail_responses.go b/rest_client_zrok/metadata/get_share_detail_responses.go index bc4407b0..420efde5 100644 --- a/rest_client_zrok/metadata/get_share_detail_responses.go +++ b/rest_client_zrok/metadata/get_share_detail_responses.go @@ -48,7 +48,7 @@ func (o *GetShareDetailReader) ReadResponse(response runtime.ClientResponse, con } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /detail/share/{shrToken}] getShareDetail", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *GetShareDetailOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get share detail o k response +func (o *GetShareDetailOK) Code() int { + return 200 +} + func (o *GetShareDetailOK) Error() string { return fmt.Sprintf("[GET /detail/share/{shrToken}][%d] getShareDetailOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *GetShareDetailUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the get share detail unauthorized response +func (o *GetShareDetailUnauthorized) Code() int { + return 401 +} + func (o *GetShareDetailUnauthorized) Error() string { return fmt.Sprintf("[GET /detail/share/{shrToken}][%d] getShareDetailUnauthorized ", 401) } @@ -204,6 +214,11 @@ func (o *GetShareDetailNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the get share detail not found response +func (o *GetShareDetailNotFound) Code() int { + return 404 +} + func (o *GetShareDetailNotFound) Error() string { return fmt.Sprintf("[GET /detail/share/{shrToken}][%d] getShareDetailNotFound ", 404) } @@ -255,6 +270,11 @@ func (o *GetShareDetailInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get share detail internal server error response +func (o *GetShareDetailInternalServerError) Code() int { + return 500 +} + func (o *GetShareDetailInternalServerError) Error() string { return fmt.Sprintf("[GET /detail/share/{shrToken}][%d] getShareDetailInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/get_share_metrics_responses.go b/rest_client_zrok/metadata/get_share_metrics_responses.go index d6d0500d..1a189a63 100644 --- a/rest_client_zrok/metadata/get_share_metrics_responses.go +++ b/rest_client_zrok/metadata/get_share_metrics_responses.go @@ -48,7 +48,7 @@ func (o *GetShareMetricsReader) ReadResponse(response runtime.ClientResponse, co } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /metrics/share/{shrToken}] getShareMetrics", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *GetShareMetricsOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the get share metrics o k response +func (o *GetShareMetricsOK) Code() int { + return 200 +} + func (o *GetShareMetricsOK) Error() string { return fmt.Sprintf("[GET /metrics/share/{shrToken}][%d] getShareMetricsOK %+v", 200, o.Payload) } @@ -153,6 +158,11 @@ func (o *GetShareMetricsBadRequest) IsCode(code int) bool { return code == 400 } +// Code gets the status code for the get share metrics bad request response +func (o *GetShareMetricsBadRequest) Code() int { + return 400 +} + func (o *GetShareMetricsBadRequest) Error() string { return fmt.Sprintf("[GET /metrics/share/{shrToken}][%d] getShareMetricsBadRequest ", 400) } @@ -204,6 +214,11 @@ func (o *GetShareMetricsUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the get share metrics unauthorized response +func (o *GetShareMetricsUnauthorized) Code() int { + return 401 +} + func (o *GetShareMetricsUnauthorized) Error() string { return fmt.Sprintf("[GET /metrics/share/{shrToken}][%d] getShareMetricsUnauthorized ", 401) } @@ -255,6 +270,11 @@ func (o *GetShareMetricsInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the get share metrics internal server error response +func (o *GetShareMetricsInternalServerError) Code() int { + return 500 +} + func (o *GetShareMetricsInternalServerError) Error() string { return fmt.Sprintf("[GET /metrics/share/{shrToken}][%d] getShareMetricsInternalServerError ", 500) } diff --git a/rest_client_zrok/metadata/overview_responses.go b/rest_client_zrok/metadata/overview_responses.go index c16b2a21..9b3b6be4 100644 --- a/rest_client_zrok/metadata/overview_responses.go +++ b/rest_client_zrok/metadata/overview_responses.go @@ -36,7 +36,7 @@ func (o *OverviewReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /overview] overview", response, response.Code()) } } @@ -79,6 +79,11 @@ func (o *OverviewOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the overview o k response +func (o *OverviewOK) Code() int { + return 200 +} + func (o *OverviewOK) Error() string { return fmt.Sprintf("[GET /overview][%d] overviewOK %+v", 200, o.Payload) } @@ -142,6 +147,11 @@ func (o *OverviewInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the overview internal server error response +func (o *OverviewInternalServerError) Code() int { + return 500 +} + func (o *OverviewInternalServerError) Error() string { return fmt.Sprintf("[GET /overview][%d] overviewInternalServerError %+v", 500, o.Payload) } diff --git a/rest_client_zrok/metadata/version_responses.go b/rest_client_zrok/metadata/version_responses.go index b1388fed..97a23fb1 100644 --- a/rest_client_zrok/metadata/version_responses.go +++ b/rest_client_zrok/metadata/version_responses.go @@ -30,7 +30,7 @@ func (o *VersionReader) ReadResponse(response runtime.ClientResponse, consumer r } return result, nil default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[GET /version] version", response, response.Code()) } } @@ -73,6 +73,11 @@ func (o *VersionOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the version o k response +func (o *VersionOK) Code() int { + return 200 +} + func (o *VersionOK) Error() string { return fmt.Sprintf("[GET /version][%d] versionOK %+v", 200, o.Payload) } diff --git a/rest_client_zrok/share/access_responses.go b/rest_client_zrok/share/access_responses.go index 8d1e8e18..3e505933 100644 --- a/rest_client_zrok/share/access_responses.go +++ b/rest_client_zrok/share/access_responses.go @@ -48,7 +48,7 @@ func (o *AccessReader) ReadResponse(response runtime.ClientResponse, consumer ru } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /access] access", response, response.Code()) } } @@ -91,6 +91,11 @@ func (o *AccessCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the access created response +func (o *AccessCreated) Code() int { + return 201 +} + func (o *AccessCreated) Error() string { return fmt.Sprintf("[POST /access][%d] accessCreated %+v", 201, o.Payload) } @@ -153,6 +158,11 @@ func (o *AccessUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the access unauthorized response +func (o *AccessUnauthorized) Code() int { + return 401 +} + func (o *AccessUnauthorized) Error() string { return fmt.Sprintf("[POST /access][%d] accessUnauthorized ", 401) } @@ -204,6 +214,11 @@ func (o *AccessNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the access not found response +func (o *AccessNotFound) Code() int { + return 404 +} + func (o *AccessNotFound) Error() string { return fmt.Sprintf("[POST /access][%d] accessNotFound ", 404) } @@ -255,6 +270,11 @@ func (o *AccessInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the access internal server error response +func (o *AccessInternalServerError) Code() int { + return 500 +} + func (o *AccessInternalServerError) Error() string { return fmt.Sprintf("[POST /access][%d] accessInternalServerError ", 500) } diff --git a/rest_client_zrok/share/share_responses.go b/rest_client_zrok/share/share_responses.go index 8e1c6150..8af0e374 100644 --- a/rest_client_zrok/share/share_responses.go +++ b/rest_client_zrok/share/share_responses.go @@ -41,6 +41,12 @@ func (o *ShareReader) ReadResponse(response runtime.ClientResponse, consumer run return nil, err } return nil, result + case 409: + result := NewShareConflict() + if err := result.readResponse(response, consumer, o.formats); err != nil { + return nil, err + } + return nil, result case 422: result := NewShareUnprocessableEntity() if err := result.readResponse(response, consumer, o.formats); err != nil { @@ -54,7 +60,7 @@ func (o *ShareReader) ReadResponse(response runtime.ClientResponse, consumer run } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[POST /share] share", response, response.Code()) } } @@ -97,6 +103,11 @@ func (o *ShareCreated) IsCode(code int) bool { return code == 201 } +// Code gets the status code for the share created response +func (o *ShareCreated) Code() int { + return 201 +} + func (o *ShareCreated) Error() string { return fmt.Sprintf("[POST /share][%d] shareCreated %+v", 201, o.Payload) } @@ -159,6 +170,11 @@ func (o *ShareUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the share unauthorized response +func (o *ShareUnauthorized) Code() int { + return 401 +} + func (o *ShareUnauthorized) Error() string { return fmt.Sprintf("[POST /share][%d] shareUnauthorized ", 401) } @@ -210,6 +226,11 @@ func (o *ShareNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the share not found response +func (o *ShareNotFound) Code() int { + return 404 +} + func (o *ShareNotFound) Error() string { return fmt.Sprintf("[POST /share][%d] shareNotFound ", 404) } @@ -223,6 +244,62 @@ func (o *ShareNotFound) readResponse(response runtime.ClientResponse, consumer r return nil } +// NewShareConflict creates a ShareConflict with default headers values +func NewShareConflict() *ShareConflict { + return &ShareConflict{} +} + +/* +ShareConflict describes a response with status code 409, with default header values. + +conflict +*/ +type ShareConflict struct { +} + +// IsSuccess returns true when this share conflict response has a 2xx status code +func (o *ShareConflict) IsSuccess() bool { + return false +} + +// IsRedirect returns true when this share conflict response has a 3xx status code +func (o *ShareConflict) IsRedirect() bool { + return false +} + +// IsClientError returns true when this share conflict response has a 4xx status code +func (o *ShareConflict) IsClientError() bool { + return true +} + +// IsServerError returns true when this share conflict response has a 5xx status code +func (o *ShareConflict) IsServerError() bool { + return false +} + +// IsCode returns true when this share conflict response a status code equal to that given +func (o *ShareConflict) IsCode(code int) bool { + return code == 409 +} + +// Code gets the status code for the share conflict response +func (o *ShareConflict) Code() int { + return 409 +} + +func (o *ShareConflict) Error() string { + return fmt.Sprintf("[POST /share][%d] shareConflict ", 409) +} + +func (o *ShareConflict) String() string { + return fmt.Sprintf("[POST /share][%d] shareConflict ", 409) +} + +func (o *ShareConflict) readResponse(response runtime.ClientResponse, consumer runtime.Consumer, formats strfmt.Registry) error { + + return nil +} + // NewShareUnprocessableEntity creates a ShareUnprocessableEntity with default headers values func NewShareUnprocessableEntity() *ShareUnprocessableEntity { return &ShareUnprocessableEntity{} @@ -261,6 +338,11 @@ func (o *ShareUnprocessableEntity) IsCode(code int) bool { return code == 422 } +// Code gets the status code for the share unprocessable entity response +func (o *ShareUnprocessableEntity) Code() int { + return 422 +} + func (o *ShareUnprocessableEntity) Error() string { return fmt.Sprintf("[POST /share][%d] shareUnprocessableEntity ", 422) } @@ -313,6 +395,11 @@ func (o *ShareInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the share internal server error response +func (o *ShareInternalServerError) Code() int { + return 500 +} + func (o *ShareInternalServerError) Error() string { return fmt.Sprintf("[POST /share][%d] shareInternalServerError %+v", 500, o.Payload) } diff --git a/rest_client_zrok/share/unaccess_responses.go b/rest_client_zrok/share/unaccess_responses.go index 853a66af..cf0ce393 100644 --- a/rest_client_zrok/share/unaccess_responses.go +++ b/rest_client_zrok/share/unaccess_responses.go @@ -45,7 +45,7 @@ func (o *UnaccessReader) ReadResponse(response runtime.ClientResponse, consumer } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /unaccess] unaccess", response, response.Code()) } } @@ -87,6 +87,11 @@ func (o *UnaccessOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the unaccess o k response +func (o *UnaccessOK) Code() int { + return 200 +} + func (o *UnaccessOK) Error() string { return fmt.Sprintf("[DELETE /unaccess][%d] unaccessOK ", 200) } @@ -138,6 +143,11 @@ func (o *UnaccessUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the unaccess unauthorized response +func (o *UnaccessUnauthorized) Code() int { + return 401 +} + func (o *UnaccessUnauthorized) Error() string { return fmt.Sprintf("[DELETE /unaccess][%d] unaccessUnauthorized ", 401) } @@ -189,6 +199,11 @@ func (o *UnaccessNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the unaccess not found response +func (o *UnaccessNotFound) Code() int { + return 404 +} + func (o *UnaccessNotFound) Error() string { return fmt.Sprintf("[DELETE /unaccess][%d] unaccessNotFound ", 404) } @@ -240,6 +255,11 @@ func (o *UnaccessInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the unaccess internal server error response +func (o *UnaccessInternalServerError) Code() int { + return 500 +} + func (o *UnaccessInternalServerError) Error() string { return fmt.Sprintf("[DELETE /unaccess][%d] unaccessInternalServerError ", 500) } diff --git a/rest_client_zrok/share/unshare_responses.go b/rest_client_zrok/share/unshare_responses.go index e30e2e50..71910bf5 100644 --- a/rest_client_zrok/share/unshare_responses.go +++ b/rest_client_zrok/share/unshare_responses.go @@ -48,7 +48,7 @@ func (o *UnshareReader) ReadResponse(response runtime.ClientResponse, consumer r } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[DELETE /unshare] unshare", response, response.Code()) } } @@ -90,6 +90,11 @@ func (o *UnshareOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the unshare o k response +func (o *UnshareOK) Code() int { + return 200 +} + func (o *UnshareOK) Error() string { return fmt.Sprintf("[DELETE /unshare][%d] unshareOK ", 200) } @@ -141,6 +146,11 @@ func (o *UnshareUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the unshare unauthorized response +func (o *UnshareUnauthorized) Code() int { + return 401 +} + func (o *UnshareUnauthorized) Error() string { return fmt.Sprintf("[DELETE /unshare][%d] unshareUnauthorized ", 401) } @@ -192,6 +202,11 @@ func (o *UnshareNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the unshare not found response +func (o *UnshareNotFound) Code() int { + return 404 +} + func (o *UnshareNotFound) Error() string { return fmt.Sprintf("[DELETE /unshare][%d] unshareNotFound ", 404) } @@ -244,6 +259,11 @@ func (o *UnshareInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the unshare internal server error response +func (o *UnshareInternalServerError) Code() int { + return 500 +} + func (o *UnshareInternalServerError) Error() string { return fmt.Sprintf("[DELETE /unshare][%d] unshareInternalServerError %+v", 500, o.Payload) } diff --git a/rest_client_zrok/share/update_share_responses.go b/rest_client_zrok/share/update_share_responses.go index e18e45bd..96726ef6 100644 --- a/rest_client_zrok/share/update_share_responses.go +++ b/rest_client_zrok/share/update_share_responses.go @@ -45,7 +45,7 @@ func (o *UpdateShareReader) ReadResponse(response runtime.ClientResponse, consum } return nil, result default: - return nil, runtime.NewAPIError("response status code does not match any response statuses defined for this endpoint in the swagger spec", response, response.Code()) + return nil, runtime.NewAPIError("[PATCH /share] updateShare", response, response.Code()) } } @@ -87,6 +87,11 @@ func (o *UpdateShareOK) IsCode(code int) bool { return code == 200 } +// Code gets the status code for the update share o k response +func (o *UpdateShareOK) Code() int { + return 200 +} + func (o *UpdateShareOK) Error() string { return fmt.Sprintf("[PATCH /share][%d] updateShareOK ", 200) } @@ -138,6 +143,11 @@ func (o *UpdateShareUnauthorized) IsCode(code int) bool { return code == 401 } +// Code gets the status code for the update share unauthorized response +func (o *UpdateShareUnauthorized) Code() int { + return 401 +} + func (o *UpdateShareUnauthorized) Error() string { return fmt.Sprintf("[PATCH /share][%d] updateShareUnauthorized ", 401) } @@ -189,6 +199,11 @@ func (o *UpdateShareNotFound) IsCode(code int) bool { return code == 404 } +// Code gets the status code for the update share not found response +func (o *UpdateShareNotFound) Code() int { + return 404 +} + func (o *UpdateShareNotFound) Error() string { return fmt.Sprintf("[PATCH /share][%d] updateShareNotFound ", 404) } @@ -240,6 +255,11 @@ func (o *UpdateShareInternalServerError) IsCode(code int) bool { return code == 500 } +// Code gets the status code for the update share internal server error response +func (o *UpdateShareInternalServerError) Code() int { + return 500 +} + func (o *UpdateShareInternalServerError) Error() string { return fmt.Sprintf("[PATCH /share][%d] updateShareInternalServerError ", 500) } diff --git a/rest_model_zrok/change_password_request.go b/rest_model_zrok/change_password_request.go new file mode 100644 index 00000000..962219d1 --- /dev/null +++ b/rest_model_zrok/change_password_request.go @@ -0,0 +1,56 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package rest_model_zrok + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "context" + + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" +) + +// ChangePasswordRequest change password request +// +// swagger:model changePasswordRequest +type ChangePasswordRequest struct { + + // email + Email string `json:"email,omitempty"` + + // new password + NewPassword string `json:"newPassword,omitempty"` + + // old password + OldPassword string `json:"oldPassword,omitempty"` +} + +// Validate validates this change password request +func (m *ChangePasswordRequest) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this change password request based on context it is used +func (m *ChangePasswordRequest) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (m *ChangePasswordRequest) MarshalBinary() ([]byte, error) { + if m == nil { + return nil, nil + } + return swag.WriteJSON(m) +} + +// UnmarshalBinary interface implementation +func (m *ChangePasswordRequest) UnmarshalBinary(b []byte) error { + var res ChangePasswordRequest + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *m = res + return nil +} diff --git a/rest_model_zrok/configuration.go b/rest_model_zrok/configuration.go index fad8c738..680a1254 100644 --- a/rest_model_zrok/configuration.go +++ b/rest_model_zrok/configuration.go @@ -87,6 +87,11 @@ func (m *Configuration) ContextValidate(ctx context.Context, formats strfmt.Regi func (m *Configuration) contextValidatePasswordRequirements(ctx context.Context, formats strfmt.Registry) error { if m.PasswordRequirements != nil { + + if swag.IsZero(m.PasswordRequirements) { // not required + return nil + } + if err := m.PasswordRequirements.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("passwordRequirements") diff --git a/rest_model_zrok/environment_and_resources.go b/rest_model_zrok/environment_and_resources.go index 493d2818..b48a9ac2 100644 --- a/rest_model_zrok/environment_and_resources.go +++ b/rest_model_zrok/environment_and_resources.go @@ -128,6 +128,11 @@ func (m *EnvironmentAndResources) ContextValidate(ctx context.Context, formats s func (m *EnvironmentAndResources) contextValidateEnvironment(ctx context.Context, formats strfmt.Registry) error { if m.Environment != nil { + + if swag.IsZero(m.Environment) { // not required + return nil + } + if err := m.Environment.ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("environment") diff --git a/rest_model_zrok/environments.go b/rest_model_zrok/environments.go index f98b0dcf..3789676a 100644 --- a/rest_model_zrok/environments.go +++ b/rest_model_zrok/environments.go @@ -54,6 +54,11 @@ func (m Environments) ContextValidate(ctx context.Context, formats strfmt.Regist for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/rest_model_zrok/frontend.go b/rest_model_zrok/frontend.go index 3185dcaf..5e13f2c9 100644 --- a/rest_model_zrok/frontend.go +++ b/rest_model_zrok/frontend.go @@ -26,6 +26,9 @@ type Frontend struct { // shr token ShrToken string `json:"shrToken,omitempty"` + // token + Token string `json:"token,omitempty"` + // updated at UpdatedAt int64 `json:"updatedAt,omitempty"` diff --git a/rest_model_zrok/frontends.go b/rest_model_zrok/frontends.go index 62f23b2a..0cdd8c52 100644 --- a/rest_model_zrok/frontends.go +++ b/rest_model_zrok/frontends.go @@ -54,6 +54,11 @@ func (m Frontends) ContextValidate(ctx context.Context, formats strfmt.Registry) for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/rest_model_zrok/metrics.go b/rest_model_zrok/metrics.go index 1aa24172..7c069ad6 100644 --- a/rest_model_zrok/metrics.go +++ b/rest_model_zrok/metrics.go @@ -91,6 +91,11 @@ func (m *Metrics) contextValidateSamples(ctx context.Context, formats strfmt.Reg for i := 0; i < len(m.Samples); i++ { if m.Samples[i] != nil { + + if swag.IsZero(m.Samples[i]) { // not required + return nil + } + if err := m.Samples[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("samples" + "." + strconv.Itoa(i)) diff --git a/rest_model_zrok/overview.go b/rest_model_zrok/overview.go index fc2dbeb0..a9fd4f6f 100644 --- a/rest_model_zrok/overview.go +++ b/rest_model_zrok/overview.go @@ -85,6 +85,11 @@ func (m *Overview) contextValidateEnvironments(ctx context.Context, formats strf for i := 0; i < len(m.Environments); i++ { if m.Environments[i] != nil { + + if swag.IsZero(m.Environments[i]) { // not required + return nil + } + if err := m.Environments[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("environments" + "." + strconv.Itoa(i)) diff --git a/rest_model_zrok/public_frontend_list.go b/rest_model_zrok/public_frontend_list.go index af910b1d..7f294bc5 100644 --- a/rest_model_zrok/public_frontend_list.go +++ b/rest_model_zrok/public_frontend_list.go @@ -54,6 +54,11 @@ func (m PublicFrontendList) ContextValidate(ctx context.Context, formats strfmt. for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/rest_model_zrok/share_request.go b/rest_model_zrok/share_request.go index 8fddf367..6125bd95 100644 --- a/rest_model_zrok/share_request.go +++ b/rest_model_zrok/share_request.go @@ -28,7 +28,7 @@ type ShareRequest struct { AuthUsers []*AuthUser `json:"authUsers"` // backend mode - // Enum: [proxy web tcpTunnel udpTunnel caddy drive] + // Enum: [proxy web tcpTunnel udpTunnel caddy drive socks] BackendMode string `json:"backendMode,omitempty"` // backend proxy endpoint @@ -56,6 +56,9 @@ type ShareRequest struct { // share mode // Enum: [public private] ShareMode string `json:"shareMode,omitempty"` + + // unique name + UniqueName string `json:"uniqueName,omitempty"` } // Validate validates this share request @@ -114,7 +117,7 @@ var shareRequestTypeBackendModePropEnum []interface{} func init() { var res []string - if err := json.Unmarshal([]byte(`["proxy","web","tcpTunnel","udpTunnel","caddy","drive"]`), &res); err != nil { + if err := json.Unmarshal([]byte(`["proxy","web","tcpTunnel","udpTunnel","caddy","drive","socks"]`), &res); err != nil { panic(err) } for _, v := range res { @@ -141,6 +144,9 @@ const ( // ShareRequestBackendModeDrive captures enum value "drive" ShareRequestBackendModeDrive string = "drive" + + // ShareRequestBackendModeSocks captures enum value "socks" + ShareRequestBackendModeSocks string = "socks" ) // prop value enum @@ -267,6 +273,11 @@ func (m *ShareRequest) contextValidateAuthUsers(ctx context.Context, formats str for i := 0; i < len(m.AuthUsers); i++ { if m.AuthUsers[i] != nil { + + if swag.IsZero(m.AuthUsers[i]) { // not required + return nil + } + if err := m.AuthUsers[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName("authUsers" + "." + strconv.Itoa(i)) diff --git a/rest_model_zrok/shares.go b/rest_model_zrok/shares.go index 42b60acd..527919f9 100644 --- a/rest_model_zrok/shares.go +++ b/rest_model_zrok/shares.go @@ -54,6 +54,11 @@ func (m Shares) ContextValidate(ctx context.Context, formats strfmt.Registry) er for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/rest_model_zrok/spark_data.go b/rest_model_zrok/spark_data.go index 1d0f6635..8faf73bd 100644 --- a/rest_model_zrok/spark_data.go +++ b/rest_model_zrok/spark_data.go @@ -54,6 +54,11 @@ func (m SparkData) ContextValidate(ctx context.Context, formats strfmt.Registry) for i := 0; i < len(m); i++ { if m[i] != nil { + + if swag.IsZero(m[i]) { // not required + return nil + } + if err := m[i].ContextValidate(ctx, formats); err != nil { if ve, ok := err.(*errors.Validation); ok { return ve.ValidateName(strconv.Itoa(i)) diff --git a/rest_server_zrok/embedded_spec.go b/rest_server_zrok/embedded_spec.go index 67e51899..0997d670 100644 --- a/rest_server_zrok/embedded_spec.go +++ b/rest_server_zrok/embedded_spec.go @@ -74,6 +74,48 @@ func init() { } } }, + "/changePassword": { + "post": { + "security": [ + { + "key": [] + } + ], + "tags": [ + "account" + ], + "operationId": "changePassword", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/changePasswordRequest" + } + } + ], + "responses": { + "200": { + "description": "changed password" + }, + "400": { + "description": "password not changed" + }, + "401": { + "description": "unauthorized" + }, + "422": { + "description": "password validation failure", + "schema": { + "$ref": "#/definitions/errorMessage" + } + }, + "500": { + "description": "internal server error" + } + } + } + }, "/configuration": { "get": { "tags": [ @@ -832,6 +874,50 @@ func init() { } } }, + "/resetToken": { + "post": { + "security": [ + { + "key": [] + } + ], + "tags": [ + "account" + ], + "operationId": "resetToken", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "properties": { + "emailAddress": { + "type": "string" + } + } + } + } + ], + "responses": { + "200": { + "description": "token reset", + "schema": { + "properties": { + "token": { + "type": "string" + } + } + } + }, + "404": { + "description": "account not found" + }, + "500": { + "description": "internal server error" + } + } + } + }, "/share": { "post": { "security": [ @@ -865,6 +951,9 @@ func init() { "404": { "description": "not found" }, + "409": { + "description": "conflict" + }, "422": { "description": "unprocessable" }, @@ -1068,6 +1157,20 @@ func init() { } } }, + "changePasswordRequest": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "newPassword": { + "type": "string" + }, + "oldPassword": { + "type": "string" + } + } + }, "configuration": { "type": "object", "properties": { @@ -1215,6 +1318,9 @@ func init() { "shrToken": { "type": "string" }, + "token": { + "type": "string" + }, "updatedAt": { "type": "integer" }, @@ -1473,7 +1579,8 @@ func init() { "tcpTunnel", "udpTunnel", "caddy", - "drive" + "drive", + "socks" ] }, "backendProxyEndpoint": { @@ -1513,6 +1620,9 @@ func init() { "public", "private" ] + }, + "uniqueName": { + "type": "string" } } }, @@ -1691,6 +1801,48 @@ func init() { } } }, + "/changePassword": { + "post": { + "security": [ + { + "key": [] + } + ], + "tags": [ + "account" + ], + "operationId": "changePassword", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "$ref": "#/definitions/changePasswordRequest" + } + } + ], + "responses": { + "200": { + "description": "changed password" + }, + "400": { + "description": "password not changed" + }, + "401": { + "description": "unauthorized" + }, + "422": { + "description": "password validation failure", + "schema": { + "$ref": "#/definitions/errorMessage" + } + }, + "500": { + "description": "internal server error" + } + } + } + }, "/configuration": { "get": { "tags": [ @@ -2449,6 +2601,50 @@ func init() { } } }, + "/resetToken": { + "post": { + "security": [ + { + "key": [] + } + ], + "tags": [ + "account" + ], + "operationId": "resetToken", + "parameters": [ + { + "name": "body", + "in": "body", + "schema": { + "properties": { + "emailAddress": { + "type": "string" + } + } + } + } + ], + "responses": { + "200": { + "description": "token reset", + "schema": { + "properties": { + "token": { + "type": "string" + } + } + } + }, + "404": { + "description": "account not found" + }, + "500": { + "description": "internal server error" + } + } + } + }, "/share": { "post": { "security": [ @@ -2482,6 +2678,9 @@ func init() { "404": { "description": "not found" }, + "409": { + "description": "conflict" + }, "422": { "description": "unprocessable" }, @@ -2685,6 +2884,20 @@ func init() { } } }, + "changePasswordRequest": { + "type": "object", + "properties": { + "email": { + "type": "string" + }, + "newPassword": { + "type": "string" + }, + "oldPassword": { + "type": "string" + } + } + }, "configuration": { "type": "object", "properties": { @@ -2832,6 +3045,9 @@ func init() { "shrToken": { "type": "string" }, + "token": { + "type": "string" + }, "updatedAt": { "type": "integer" }, @@ -3090,7 +3306,8 @@ func init() { "tcpTunnel", "udpTunnel", "caddy", - "drive" + "drive", + "socks" ] }, "backendProxyEndpoint": { @@ -3130,6 +3347,9 @@ func init() { "public", "private" ] + }, + "uniqueName": { + "type": "string" } } }, diff --git a/rest_server_zrok/operations/account/change_password.go b/rest_server_zrok/operations/account/change_password.go new file mode 100644 index 00000000..5324b58e --- /dev/null +++ b/rest_server_zrok/operations/account/change_password.go @@ -0,0 +1,71 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime/middleware" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// ChangePasswordHandlerFunc turns a function with the right signature into a change password handler +type ChangePasswordHandlerFunc func(ChangePasswordParams, *rest_model_zrok.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ChangePasswordHandlerFunc) Handle(params ChangePasswordParams, principal *rest_model_zrok.Principal) middleware.Responder { + return fn(params, principal) +} + +// ChangePasswordHandler interface for that can handle valid change password params +type ChangePasswordHandler interface { + Handle(ChangePasswordParams, *rest_model_zrok.Principal) middleware.Responder +} + +// NewChangePassword creates a new http.Handler for the change password operation +func NewChangePassword(ctx *middleware.Context, handler ChangePasswordHandler) *ChangePassword { + return &ChangePassword{Context: ctx, Handler: handler} +} + +/* + ChangePassword swagger:route POST /changePassword account changePassword + +ChangePassword change password API +*/ +type ChangePassword struct { + Context *middleware.Context + Handler ChangePasswordHandler +} + +func (o *ChangePassword) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewChangePasswordParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *rest_model_zrok.Principal + if uprinc != nil { + principal = uprinc.(*rest_model_zrok.Principal) // this is really a rest_model_zrok.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} diff --git a/rest_server_zrok/operations/account/change_password_parameters.go b/rest_server_zrok/operations/account/change_password_parameters.go new file mode 100644 index 00000000..5afcfb6e --- /dev/null +++ b/rest_server_zrok/operations/account/change_password_parameters.go @@ -0,0 +1,76 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// NewChangePasswordParams creates a new ChangePasswordParams object +// +// There are no default values defined in the spec. +func NewChangePasswordParams() ChangePasswordParams { + + return ChangePasswordParams{} +} + +// ChangePasswordParams contains all the bound params for the change password operation +// typically these are obtained from a http.Request +// +// swagger:parameters changePassword +type ChangePasswordParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + In: body + */ + Body *rest_model_zrok.ChangePasswordRequest +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewChangePasswordParams() beforehand. +func (o *ChangePasswordParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body rest_model_zrok.ChangePasswordRequest + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = &body + } + } + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/rest_server_zrok/operations/account/change_password_responses.go b/rest_server_zrok/operations/account/change_password_responses.go new file mode 100644 index 00000000..020b9d64 --- /dev/null +++ b/rest_server_zrok/operations/account/change_password_responses.go @@ -0,0 +1,157 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// ChangePasswordOKCode is the HTTP code returned for type ChangePasswordOK +const ChangePasswordOKCode int = 200 + +/* +ChangePasswordOK changed password + +swagger:response changePasswordOK +*/ +type ChangePasswordOK struct { +} + +// NewChangePasswordOK creates ChangePasswordOK with default headers values +func NewChangePasswordOK() *ChangePasswordOK { + + return &ChangePasswordOK{} +} + +// WriteResponse to the client +func (o *ChangePasswordOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(200) +} + +// ChangePasswordBadRequestCode is the HTTP code returned for type ChangePasswordBadRequest +const ChangePasswordBadRequestCode int = 400 + +/* +ChangePasswordBadRequest password not changed + +swagger:response changePasswordBadRequest +*/ +type ChangePasswordBadRequest struct { +} + +// NewChangePasswordBadRequest creates ChangePasswordBadRequest with default headers values +func NewChangePasswordBadRequest() *ChangePasswordBadRequest { + + return &ChangePasswordBadRequest{} +} + +// WriteResponse to the client +func (o *ChangePasswordBadRequest) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(400) +} + +// ChangePasswordUnauthorizedCode is the HTTP code returned for type ChangePasswordUnauthorized +const ChangePasswordUnauthorizedCode int = 401 + +/* +ChangePasswordUnauthorized unauthorized + +swagger:response changePasswordUnauthorized +*/ +type ChangePasswordUnauthorized struct { +} + +// NewChangePasswordUnauthorized creates ChangePasswordUnauthorized with default headers values +func NewChangePasswordUnauthorized() *ChangePasswordUnauthorized { + + return &ChangePasswordUnauthorized{} +} + +// WriteResponse to the client +func (o *ChangePasswordUnauthorized) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(401) +} + +// ChangePasswordUnprocessableEntityCode is the HTTP code returned for type ChangePasswordUnprocessableEntity +const ChangePasswordUnprocessableEntityCode int = 422 + +/* +ChangePasswordUnprocessableEntity password validation failure + +swagger:response changePasswordUnprocessableEntity +*/ +type ChangePasswordUnprocessableEntity struct { + + /* + In: Body + */ + Payload rest_model_zrok.ErrorMessage `json:"body,omitempty"` +} + +// NewChangePasswordUnprocessableEntity creates ChangePasswordUnprocessableEntity with default headers values +func NewChangePasswordUnprocessableEntity() *ChangePasswordUnprocessableEntity { + + return &ChangePasswordUnprocessableEntity{} +} + +// WithPayload adds the payload to the change password unprocessable entity response +func (o *ChangePasswordUnprocessableEntity) WithPayload(payload rest_model_zrok.ErrorMessage) *ChangePasswordUnprocessableEntity { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the change password unprocessable entity response +func (o *ChangePasswordUnprocessableEntity) SetPayload(payload rest_model_zrok.ErrorMessage) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ChangePasswordUnprocessableEntity) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(422) + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } +} + +// ChangePasswordInternalServerErrorCode is the HTTP code returned for type ChangePasswordInternalServerError +const ChangePasswordInternalServerErrorCode int = 500 + +/* +ChangePasswordInternalServerError internal server error + +swagger:response changePasswordInternalServerError +*/ +type ChangePasswordInternalServerError struct { +} + +// NewChangePasswordInternalServerError creates ChangePasswordInternalServerError with default headers values +func NewChangePasswordInternalServerError() *ChangePasswordInternalServerError { + + return &ChangePasswordInternalServerError{} +} + +// WriteResponse to the client +func (o *ChangePasswordInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(500) +} diff --git a/rest_server_zrok/operations/account/change_password_urlbuilder.go b/rest_server_zrok/operations/account/change_password_urlbuilder.go new file mode 100644 index 00000000..1601a8a7 --- /dev/null +++ b/rest_server_zrok/operations/account/change_password_urlbuilder.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ChangePasswordURL generates an URL for the change password operation +type ChangePasswordURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ChangePasswordURL) WithBasePath(bp string) *ChangePasswordURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ChangePasswordURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ChangePasswordURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/changePassword" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ChangePasswordURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ChangePasswordURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ChangePasswordURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ChangePasswordURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ChangePasswordURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ChangePasswordURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/rest_server_zrok/operations/account/reset_token.go b/rest_server_zrok/operations/account/reset_token.go new file mode 100644 index 00000000..1c278616 --- /dev/null +++ b/rest_server_zrok/operations/account/reset_token.go @@ -0,0 +1,148 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "context" + "net/http" + + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/strfmt" + "github.com/go-openapi/swag" + + "github.com/openziti/zrok/rest_model_zrok" +) + +// ResetTokenHandlerFunc turns a function with the right signature into a reset token handler +type ResetTokenHandlerFunc func(ResetTokenParams, *rest_model_zrok.Principal) middleware.Responder + +// Handle executing the request and returning a response +func (fn ResetTokenHandlerFunc) Handle(params ResetTokenParams, principal *rest_model_zrok.Principal) middleware.Responder { + return fn(params, principal) +} + +// ResetTokenHandler interface for that can handle valid reset token params +type ResetTokenHandler interface { + Handle(ResetTokenParams, *rest_model_zrok.Principal) middleware.Responder +} + +// NewResetToken creates a new http.Handler for the reset token operation +func NewResetToken(ctx *middleware.Context, handler ResetTokenHandler) *ResetToken { + return &ResetToken{Context: ctx, Handler: handler} +} + +/* + ResetToken swagger:route POST /resetToken account resetToken + +ResetToken reset token API +*/ +type ResetToken struct { + Context *middleware.Context + Handler ResetTokenHandler +} + +func (o *ResetToken) ServeHTTP(rw http.ResponseWriter, r *http.Request) { + route, rCtx, _ := o.Context.RouteInfo(r) + if rCtx != nil { + *r = *rCtx + } + var Params = NewResetTokenParams() + uprinc, aCtx, err := o.Context.Authorize(r, route) + if err != nil { + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + if aCtx != nil { + *r = *aCtx + } + var principal *rest_model_zrok.Principal + if uprinc != nil { + principal = uprinc.(*rest_model_zrok.Principal) // this is really a rest_model_zrok.Principal, I promise + } + + if err := o.Context.BindValidRequest(r, route, &Params); err != nil { // bind params + o.Context.Respond(rw, r, route.Produces, route, err) + return + } + + res := o.Handler.Handle(Params, principal) // actually handle the request + o.Context.Respond(rw, r, route.Produces, route, res) + +} + +// ResetTokenBody reset token body +// +// swagger:model ResetTokenBody +type ResetTokenBody struct { + + // email address + EmailAddress string `json:"emailAddress,omitempty"` +} + +// Validate validates this reset token body +func (o *ResetTokenBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this reset token body based on context it is used +func (o *ResetTokenBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ResetTokenBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ResetTokenBody) UnmarshalBinary(b []byte) error { + var res ResetTokenBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} + +// ResetTokenOKBody reset token o k body +// +// swagger:model ResetTokenOKBody +type ResetTokenOKBody struct { + + // token + Token string `json:"token,omitempty"` +} + +// Validate validates this reset token o k body +func (o *ResetTokenOKBody) Validate(formats strfmt.Registry) error { + return nil +} + +// ContextValidate validates this reset token o k body based on context it is used +func (o *ResetTokenOKBody) ContextValidate(ctx context.Context, formats strfmt.Registry) error { + return nil +} + +// MarshalBinary interface implementation +func (o *ResetTokenOKBody) MarshalBinary() ([]byte, error) { + if o == nil { + return nil, nil + } + return swag.WriteJSON(o) +} + +// UnmarshalBinary interface implementation +func (o *ResetTokenOKBody) UnmarshalBinary(b []byte) error { + var res ResetTokenOKBody + if err := swag.ReadJSON(b, &res); err != nil { + return err + } + *o = res + return nil +} diff --git a/rest_server_zrok/operations/account/reset_token_parameters.go b/rest_server_zrok/operations/account/reset_token_parameters.go new file mode 100644 index 00000000..76decd3a --- /dev/null +++ b/rest_server_zrok/operations/account/reset_token_parameters.go @@ -0,0 +1,74 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/errors" + "github.com/go-openapi/runtime" + "github.com/go-openapi/runtime/middleware" + "github.com/go-openapi/validate" +) + +// NewResetTokenParams creates a new ResetTokenParams object +// +// There are no default values defined in the spec. +func NewResetTokenParams() ResetTokenParams { + + return ResetTokenParams{} +} + +// ResetTokenParams contains all the bound params for the reset token operation +// typically these are obtained from a http.Request +// +// swagger:parameters resetToken +type ResetTokenParams struct { + + // HTTP Request Object + HTTPRequest *http.Request `json:"-"` + + /* + In: body + */ + Body ResetTokenBody +} + +// BindRequest both binds and validates a request, it assumes that complex things implement a Validatable(strfmt.Registry) error interface +// for simple values it will use straight method calls. +// +// To ensure default values, the struct must have been initialized with NewResetTokenParams() beforehand. +func (o *ResetTokenParams) BindRequest(r *http.Request, route *middleware.MatchedRoute) error { + var res []error + + o.HTTPRequest = r + + if runtime.HasBody(r) { + defer r.Body.Close() + var body ResetTokenBody + if err := route.Consumer.Consume(r.Body, &body); err != nil { + res = append(res, errors.NewParseError("body", "body", "", err)) + } else { + // validate body object + if err := body.Validate(route.Formats); err != nil { + res = append(res, err) + } + + ctx := validate.WithOperationRequest(r.Context()) + if err := body.ContextValidate(ctx, route.Formats); err != nil { + res = append(res, err) + } + + if len(res) == 0 { + o.Body = body + } + } + } + if len(res) > 0 { + return errors.CompositeValidationError(res...) + } + return nil +} diff --git a/rest_server_zrok/operations/account/reset_token_responses.go b/rest_server_zrok/operations/account/reset_token_responses.go new file mode 100644 index 00000000..f67e0e0e --- /dev/null +++ b/rest_server_zrok/operations/account/reset_token_responses.go @@ -0,0 +1,107 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the swagger generate command + +import ( + "net/http" + + "github.com/go-openapi/runtime" +) + +// ResetTokenOKCode is the HTTP code returned for type ResetTokenOK +const ResetTokenOKCode int = 200 + +/* +ResetTokenOK token reset + +swagger:response resetTokenOK +*/ +type ResetTokenOK struct { + + /* + In: Body + */ + Payload *ResetTokenOKBody `json:"body,omitempty"` +} + +// NewResetTokenOK creates ResetTokenOK with default headers values +func NewResetTokenOK() *ResetTokenOK { + + return &ResetTokenOK{} +} + +// WithPayload adds the payload to the reset token o k response +func (o *ResetTokenOK) WithPayload(payload *ResetTokenOKBody) *ResetTokenOK { + o.Payload = payload + return o +} + +// SetPayload sets the payload to the reset token o k response +func (o *ResetTokenOK) SetPayload(payload *ResetTokenOKBody) { + o.Payload = payload +} + +// WriteResponse to the client +func (o *ResetTokenOK) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.WriteHeader(200) + if o.Payload != nil { + payload := o.Payload + if err := producer.Produce(rw, payload); err != nil { + panic(err) // let the recovery middleware deal with this + } + } +} + +// ResetTokenNotFoundCode is the HTTP code returned for type ResetTokenNotFound +const ResetTokenNotFoundCode int = 404 + +/* +ResetTokenNotFound account not found + +swagger:response resetTokenNotFound +*/ +type ResetTokenNotFound struct { +} + +// NewResetTokenNotFound creates ResetTokenNotFound with default headers values +func NewResetTokenNotFound() *ResetTokenNotFound { + + return &ResetTokenNotFound{} +} + +// WriteResponse to the client +func (o *ResetTokenNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(404) +} + +// ResetTokenInternalServerErrorCode is the HTTP code returned for type ResetTokenInternalServerError +const ResetTokenInternalServerErrorCode int = 500 + +/* +ResetTokenInternalServerError internal server error + +swagger:response resetTokenInternalServerError +*/ +type ResetTokenInternalServerError struct { +} + +// NewResetTokenInternalServerError creates ResetTokenInternalServerError with default headers values +func NewResetTokenInternalServerError() *ResetTokenInternalServerError { + + return &ResetTokenInternalServerError{} +} + +// WriteResponse to the client +func (o *ResetTokenInternalServerError) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(500) +} diff --git a/rest_server_zrok/operations/account/reset_token_urlbuilder.go b/rest_server_zrok/operations/account/reset_token_urlbuilder.go new file mode 100644 index 00000000..aacabb41 --- /dev/null +++ b/rest_server_zrok/operations/account/reset_token_urlbuilder.go @@ -0,0 +1,87 @@ +// Code generated by go-swagger; DO NOT EDIT. + +package account + +// This file was generated by the swagger tool. +// Editing this file might prove futile when you re-run the generate command + +import ( + "errors" + "net/url" + golangswaggerpaths "path" +) + +// ResetTokenURL generates an URL for the reset token operation +type ResetTokenURL struct { + _basePath string +} + +// WithBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ResetTokenURL) WithBasePath(bp string) *ResetTokenURL { + o.SetBasePath(bp) + return o +} + +// SetBasePath sets the base path for this url builder, only required when it's different from the +// base path specified in the swagger spec. +// When the value of the base path is an empty string +func (o *ResetTokenURL) SetBasePath(bp string) { + o._basePath = bp +} + +// Build a url path and query string +func (o *ResetTokenURL) Build() (*url.URL, error) { + var _result url.URL + + var _path = "/resetToken" + + _basePath := o._basePath + if _basePath == "" { + _basePath = "/api/v1" + } + _result.Path = golangswaggerpaths.Join(_basePath, _path) + + return &_result, nil +} + +// Must is a helper function to panic when the url builder returns an error +func (o *ResetTokenURL) Must(u *url.URL, err error) *url.URL { + if err != nil { + panic(err) + } + if u == nil { + panic("url can't be nil") + } + return u +} + +// String returns the string representation of the path with query string +func (o *ResetTokenURL) String() string { + return o.Must(o.Build()).String() +} + +// BuildFull builds a full url with scheme, host, path and query string +func (o *ResetTokenURL) BuildFull(scheme, host string) (*url.URL, error) { + if scheme == "" { + return nil, errors.New("scheme is required for a full url on ResetTokenURL") + } + if host == "" { + return nil, errors.New("host is required for a full url on ResetTokenURL") + } + + base, err := o.Build() + if err != nil { + return nil, err + } + + base.Scheme = scheme + base.Host = host + return base, nil +} + +// StringFull returns the string representation of a complete url +func (o *ResetTokenURL) StringFull(scheme, host string) string { + return o.Must(o.BuildFull(scheme, host)).String() +} diff --git a/rest_server_zrok/operations/share/share_responses.go b/rest_server_zrok/operations/share/share_responses.go index d49c044e..9bf46588 100644 --- a/rest_server_zrok/operations/share/share_responses.go +++ b/rest_server_zrok/operations/share/share_responses.go @@ -108,6 +108,31 @@ func (o *ShareNotFound) WriteResponse(rw http.ResponseWriter, producer runtime.P rw.WriteHeader(404) } +// ShareConflictCode is the HTTP code returned for type ShareConflict +const ShareConflictCode int = 409 + +/* +ShareConflict conflict + +swagger:response shareConflict +*/ +type ShareConflict struct { +} + +// NewShareConflict creates ShareConflict with default headers values +func NewShareConflict() *ShareConflict { + + return &ShareConflict{} +} + +// WriteResponse to the client +func (o *ShareConflict) WriteResponse(rw http.ResponseWriter, producer runtime.Producer) { + + rw.Header().Del(runtime.HeaderContentType) //Remove Content-Type on empty responses + + rw.WriteHeader(409) +} + // ShareUnprocessableEntityCode is the HTTP code returned for type ShareUnprocessableEntity const ShareUnprocessableEntityCode int = 422 diff --git a/rest_server_zrok/operations/zrok_api.go b/rest_server_zrok/operations/zrok_api.go index 158e03c3..8de85d53 100644 --- a/rest_server_zrok/operations/zrok_api.go +++ b/rest_server_zrok/operations/zrok_api.go @@ -52,6 +52,9 @@ func NewZrokAPI(spec *loads.Document) *ZrokAPI { ShareAccessHandler: share.AccessHandlerFunc(func(params share.AccessParams, principal *rest_model_zrok.Principal) middleware.Responder { return middleware.NotImplemented("operation share.Access has not yet been implemented") }), + AccountChangePasswordHandler: account.ChangePasswordHandlerFunc(func(params account.ChangePasswordParams, principal *rest_model_zrok.Principal) middleware.Responder { + return middleware.NotImplemented("operation account.ChangePassword has not yet been implemented") + }), MetadataConfigurationHandler: metadata.ConfigurationHandlerFunc(func(params metadata.ConfigurationParams) middleware.Responder { return middleware.NotImplemented("operation metadata.Configuration has not yet been implemented") }), @@ -115,6 +118,9 @@ func NewZrokAPI(spec *loads.Document) *ZrokAPI { AccountResetPasswordRequestHandler: account.ResetPasswordRequestHandlerFunc(func(params account.ResetPasswordRequestParams) middleware.Responder { return middleware.NotImplemented("operation account.ResetPasswordRequest has not yet been implemented") }), + AccountResetTokenHandler: account.ResetTokenHandlerFunc(func(params account.ResetTokenParams, principal *rest_model_zrok.Principal) middleware.Responder { + return middleware.NotImplemented("operation account.ResetToken has not yet been implemented") + }), ShareShareHandler: share.ShareHandlerFunc(func(params share.ShareParams, principal *rest_model_zrok.Principal) middleware.Responder { return middleware.NotImplemented("operation share.Share has not yet been implemented") }), @@ -188,6 +194,8 @@ type ZrokAPI struct { // ShareAccessHandler sets the operation handler for the access operation ShareAccessHandler share.AccessHandler + // AccountChangePasswordHandler sets the operation handler for the change password operation + AccountChangePasswordHandler account.ChangePasswordHandler // MetadataConfigurationHandler sets the operation handler for the configuration operation MetadataConfigurationHandler metadata.ConfigurationHandler // AdminCreateFrontendHandler sets the operation handler for the create frontend operation @@ -230,6 +238,8 @@ type ZrokAPI struct { AccountResetPasswordHandler account.ResetPasswordHandler // AccountResetPasswordRequestHandler sets the operation handler for the reset password request operation AccountResetPasswordRequestHandler account.ResetPasswordRequestHandler + // AccountResetTokenHandler sets the operation handler for the reset token operation + AccountResetTokenHandler account.ResetTokenHandler // ShareShareHandler sets the operation handler for the share operation ShareShareHandler share.ShareHandler // ShareUnaccessHandler sets the operation handler for the unaccess operation @@ -328,6 +338,9 @@ func (o *ZrokAPI) Validate() error { if o.ShareAccessHandler == nil { unregistered = append(unregistered, "share.AccessHandler") } + if o.AccountChangePasswordHandler == nil { + unregistered = append(unregistered, "account.ChangePasswordHandler") + } if o.MetadataConfigurationHandler == nil { unregistered = append(unregistered, "metadata.ConfigurationHandler") } @@ -391,6 +404,9 @@ func (o *ZrokAPI) Validate() error { if o.AccountResetPasswordRequestHandler == nil { unregistered = append(unregistered, "account.ResetPasswordRequestHandler") } + if o.AccountResetTokenHandler == nil { + unregistered = append(unregistered, "account.ResetTokenHandler") + } if o.ShareShareHandler == nil { unregistered = append(unregistered, "share.ShareHandler") } @@ -515,6 +531,10 @@ func (o *ZrokAPI) initHandlerCache() { o.handlers["POST"] = make(map[string]http.Handler) } o.handlers["POST"]["/access"] = share.NewAccess(o.context, o.ShareAccessHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } + o.handlers["POST"]["/changePassword"] = account.NewChangePassword(o.context, o.AccountChangePasswordHandler) if o.handlers["GET"] == nil { o.handlers["GET"] = make(map[string]http.Handler) } @@ -602,6 +622,10 @@ func (o *ZrokAPI) initHandlerCache() { if o.handlers["POST"] == nil { o.handlers["POST"] = make(map[string]http.Handler) } + o.handlers["POST"]["/resetToken"] = account.NewResetToken(o.context, o.AccountResetTokenHandler) + if o.handlers["POST"] == nil { + o.handlers["POST"] = make(map[string]http.Handler) + } o.handlers["POST"]["/share"] = share.NewShare(o.context, o.ShareShareHandler) if o.handlers["DELETE"] == nil { o.handlers["DELETE"] = make(map[string]http.Handler) @@ -668,6 +692,6 @@ func (o *ZrokAPI) AddMiddlewareFor(method, path string, builder middleware.Build } o.Init() if h, ok := o.handlers[um][path]; ok { - o.handlers[method][path] = builder(h) + o.handlers[um][path] = builder(h) } } diff --git a/sdk/golang/examples/http-server/README.md b/sdk/golang/examples/http-server/README.md new file mode 100644 index 00000000..e37b2239 --- /dev/null +++ b/sdk/golang/examples/http-server/README.md @@ -0,0 +1,59 @@ +# "http-server" SDK Example + +This `http-server` example is a minimal `zrok` application that surfaces a basic http server over a public zrok share. + +## Implementation + +```go + root, err := environment.LoadRoot() + if err != nil { + panic(err) + } +``` + +The `root` is a structure that contains all of the user's environment detail and allows the SDK application to access the `zrok` service instance and the underlying OpenZiti network. + +```go + shr, err := sdk.CreateShare(root, &sdk.ShareRequest{ + BackendMode: sdk.TcpTunnelBackendMode, + ShareMode: sdk.PublicShareMode, + Frontends: []string{"public"}, + Target: "http-server", + }) + + if err != nil { + panic(err) + } + defer func() { + if err := sdk.DeleteShare(root, shr); err != nil { + panic(err) + } + }() + ... + fmt.Println("Access server at the following endpoints: ", strings.Join(shr.FrontendEndpoints, "\n")) + +``` + +The `sdk.CreateShare` call uses the loaded `environment` root along with the details of the share request (`sdk.ShareRequest`) to create the share that will be used to access the `http-server`. + +We are using the `sdk.TcpTunnelBackendMode` to handle tcp traffic. This time we are using `sdk.PublicShareMode` to take advantage of a public share that is running. With that we set which frontends to listen on, so we use whatever is configured, `public` here. + +Further down we emit where to access the service. + +Then we create a listener and use that to server our http server: + +```go +conn, err := sdk.NewListener(shr.Token, root) + if err != nil { + panic(err) + } + defer conn.Close() + + ... + + http.HandleFunc("/", helloZrok) + + if err := http.Serve(conn, nil); err != nil { + panic(err) + } +``` diff --git a/sdk/golang/examples/http-server/cmd/http-server/main.go b/sdk/golang/examples/http-server/cmd/http-server/main.go new file mode 100644 index 00000000..5f51d023 --- /dev/null +++ b/sdk/golang/examples/http-server/cmd/http-server/main.go @@ -0,0 +1,51 @@ +package main + +import ( + "fmt" + "github.com/openziti/zrok/environment" + "github.com/openziti/zrok/sdk/golang/sdk" + "io" + "net/http" + "strings" +) + +func helloZrok(w http.ResponseWriter, r *http.Request) { + io.WriteString(w, "Hello zrok!\n") +} + +func main() { + root, err := environment.LoadRoot() + if err != nil { + panic(err) + } + + shr, err := sdk.CreateShare(root, &sdk.ShareRequest{ + BackendMode: sdk.TcpTunnelBackendMode, + ShareMode: sdk.PublicShareMode, + Frontends: []string{"public"}, + Target: "http-server", + }) + + if err != nil { + panic(err) + } + defer func() { + if err := sdk.DeleteShare(root, shr); err != nil { + panic(err) + } + }() + + conn, err := sdk.NewListener(shr.Token, root) + if err != nil { + panic(err) + } + defer conn.Close() + + fmt.Println("Access server at the following endpoints: ", strings.Join(shr.FrontendEndpoints, "\n")) + + http.HandleFunc("/", helloZrok) + + if err := http.Serve(conn, nil); err != nil { + panic(err) + } +} diff --git a/sdk/golang/sdk/listener.go b/sdk/golang/sdk/listener.go index 4aece922..0a41fcc5 100644 --- a/sdk/golang/sdk/listener.go +++ b/sdk/golang/sdk/listener.go @@ -9,7 +9,10 @@ import ( ) func NewListener(shrToken string, root env_core.Root) (edge.Listener, error) { - return NewListenerWithOptions(shrToken, root, &ziti.ListenOptions{ConnectTimeout: 30 * time.Second, MaxConnections: 64}) + return NewListenerWithOptions(shrToken, root, &ziti.ListenOptions{ + ConnectTimeout: 30 * time.Second, + WaitForNEstablishedListeners: 1, + }) } func NewListenerWithOptions(shrToken string, root env_core.Root, opts *ziti.ListenOptions) (edge.Listener, error) { diff --git a/sdk/golang/sdk/model.go b/sdk/golang/sdk/model.go index 4eed621b..07a85764 100644 --- a/sdk/golang/sdk/model.go +++ b/sdk/golang/sdk/model.go @@ -22,13 +22,14 @@ const ( type ShareRequest struct { Reserved bool + UniqueName string BackendMode BackendMode ShareMode ShareMode Target string Frontends []string BasicAuth []string OauthProvider string - OauthEmailDomains []string + OauthEmailAddressPatterns []string OauthAuthorizationCheckInterval time.Duration } diff --git a/sdk/golang/sdk/share.go b/sdk/golang/sdk/share.go index 45feb57f..04b25eed 100644 --- a/sdk/golang/sdk/share.go +++ b/sdk/golang/sdk/share.go @@ -26,6 +26,9 @@ func CreateShare(root env_core.Root, request *ShareRequest) (*Share, error) { return nil, errors.Errorf("unknown share mode '%v'", request.ShareMode) } out.Body.Reserved = request.Reserved + if request.Reserved { + out.Body.UniqueName = request.UniqueName + } if len(request.BasicAuth) > 0 { out.Body.AuthScheme = string(Basic) @@ -81,7 +84,7 @@ func newPublicShare(root env_core.Root, request *ShareRequest) *share.ShareParam BackendMode: string(request.BackendMode), BackendProxyEndpoint: request.Target, AuthScheme: string(None), - OauthEmailDomains: request.OauthEmailDomains, + OauthEmailDomains: request.OauthEmailAddressPatterns, OauthProvider: request.OauthProvider, OauthAuthorizationCheckInterval: request.OauthAuthorizationCheckInterval.String(), } diff --git a/sdk/python/examples/http-server/README.md b/sdk/python/examples/http-server/README.md new file mode 100644 index 00000000..25b35367 --- /dev/null +++ b/sdk/python/examples/http-server/README.md @@ -0,0 +1,54 @@ +# "http-server" SDK Example + +This `http-server` example is a minimal `zrok` application that surfaces a basic http server over a public zrok share. + +## Implementation + +```go + root = zrok.environment.root.Load() +``` + +The `root` is a structure that contains all of the user's environment detail and allows the SDK application to access the `zrok` service instance and the underlying OpenZiti network. + +```python + try: + shr = zrok.share.CreateShare(root=root, request=ShareRequest( + BackendMode=zrok.model.TCP_TUNNEL_BACKEND_MODE, + ShareMode=zrok.model.PUBLIC_SHARE_MODE, + Frontends=['public'], + Target="http-server" + )) + shrToken = shr.Token + print("Access server at the following endpoints: ", "\n".join(shr.FrontendEndpoints)) + + def removeShare(): + zrok.share.DeleteShare(root=root, shr=shr) + print("Deleted share") + atexit.register(removeShare) + except Exception as e: + print("unable to create share", e) + sys.exit(1) + +``` + +The `sdk.CreateShare` call uses the loaded `environment` root along with the details of the share request (`sdk.ShareRequest`) to create the share that will be used to access the `http-server`. + +We are using the `sdk.TcpTunnelBackendMode` to handle tcp traffic. This time we are using `sdk.PublicShareMode` to take advantage of a public share that is running. With that we set which frontends to listen on, so we use whatever is configured, `public` here. + + +Next we populate our cfg options for our decorator + +```python +zrok_opts['cfg'] = zrok.decor.Opts(root=root, shrToken=shrToken, bindPort=bindPort) +``` + +Next we run the server which ends up calling the following: + +```python +@zrok.decor.zrok(opts=zrok_opts) +def runApp(): + from waitress import serve + # the port is only used to integrate Zrok with frameworks that expect a "hostname:port" combo + serve(app, port=bindPort) +``` + diff --git a/sdk/python/examples/http-server/requirements.txt b/sdk/python/examples/http-server/requirements.txt new file mode 100644 index 00000000..23ab2518 --- /dev/null +++ b/sdk/python/examples/http-server/requirements.txt @@ -0,0 +1,3 @@ +Flask==3.0.0 +waitress==2.1.2 +zrok \ No newline at end of file diff --git a/sdk/python/examples/http-server/server.py b/sdk/python/examples/http-server/server.py new file mode 100755 index 00000000..50c82b01 --- /dev/null +++ b/sdk/python/examples/http-server/server.py @@ -0,0 +1,48 @@ +#!python3 +from flask import Flask +import sys +import zrok +from zrok.model import ShareRequest +import atexit + +app = Flask(__name__) +zrok_opts = {} +bindPort = 18081 + + +@zrok.decor.zrok(opts=zrok_opts) +def runApp(): + from waitress import serve + # the port is only used to integrate Zrok with frameworks that expect a "hostname:port" combo + serve(app, port=bindPort) + + +@app.route('/') +def hello_world(): + print("received a request to /") + return "Look! It's zrok!" + + +if __name__ == '__main__': + root = zrok.environment.root.Load() + try: + shr = zrok.share.CreateShare(root=root, request=ShareRequest( + BackendMode=zrok.model.TCP_TUNNEL_BACKEND_MODE, + ShareMode=zrok.model.PUBLIC_SHARE_MODE, + Frontends=['public'], + Target="http-server" + )) + shrToken = shr.Token + print("Access server at the following endpoints: ", "\n".join(shr.FrontendEndpoints)) + + def removeShare(): + zrok.share.DeleteShare(root=root, shr=shr) + print("Deleted share") + atexit.register(removeShare) + except Exception as e: + print("unable to create share", e) + sys.exit(1) + + zrok_opts['cfg'] = zrok.decor.Opts(root=root, shrToken=shrToken, bindPort=bindPort) + + runApp() diff --git a/sdk/python/examples/README.md b/sdk/python/examples/pastebin/README.md similarity index 100% rename from sdk/python/examples/README.md rename to sdk/python/examples/pastebin/README.md diff --git a/sdk/python/examples/pastebin.py b/sdk/python/examples/pastebin/pastebin.py similarity index 76% rename from sdk/python/examples/pastebin.py rename to sdk/python/examples/pastebin/pastebin.py index 6466052b..85c8fd89 100755 --- a/sdk/python/examples/pastebin.py +++ b/sdk/python/examples/pastebin/pastebin.py @@ -1,5 +1,6 @@ #!python3 import argparse +import atexit import sys import os import zrok @@ -29,20 +30,23 @@ class copyto: print("unable to create share", e) sys.exit(1) + def removeShare(): + try: + zrok.share.DeleteShare(root, shr) + except Exception as e: + print("unable to delete share", e) + sys.exit(1) + atexit.register(removeShare) + data = self.loadData() print("access your pastebin using 'pastebin.py pastefrom " + shr.Token + "'") - try: - with zrok.listener.Listener(shr.Token, root) as server: - while not exit_signal.is_set(): - conn, peer = server.accept() - with conn: - conn.sendall(data.encode('utf-8')) + with zrok.listener.Listener(shr.Token, root) as server: + while not exit_signal.is_set(): + conn, peer = server.accept() + with conn: + conn.sendall(data.encode('utf-8')) - except KeyboardInterrupt: - pass - - zrok.share.DeleteShare(root, shr) print("Server stopped.") @@ -62,15 +66,18 @@ def pastefrom(options): except Exception as e: print("unable to create access", e) sys.exit(1) + + def removeAccess(): + try: + zrok.access.DeleteAccess(root, acc) + except Exception as e: + print("unable to delete access", e) + sys.exit(1) + atexit.register(removeAccess) client = zrok.dialer.Dialer(options.shrToken, root) data = client.recv(1024) print(data.decode('utf-8')) - try: - zrok.access.DeleteAccess(root, acc) - except Exception as e: - print("unable to delete access", e) - sys.exit(1) if __name__ == "__main__": parser = argparse.ArgumentParser() diff --git a/sdk/python/examples/requirements.txt b/sdk/python/examples/pastebin/requirements.txt similarity index 80% rename from sdk/python/examples/requirements.txt rename to sdk/python/examples/pastebin/requirements.txt index 24666918..b927021b 100644 --- a/sdk/python/examples/requirements.txt +++ b/sdk/python/examples/pastebin/requirements.txt @@ -1,3 +1,3 @@ openziti==0.8.1 requests==2.31.0 -zrok-sdk \ No newline at end of file +zrok \ No newline at end of file diff --git a/sdk/python/sdk/zrok/.gitattributes b/sdk/python/sdk/zrok/.gitattributes new file mode 100644 index 00000000..4abb03b2 --- /dev/null +++ b/sdk/python/sdk/zrok/.gitattributes @@ -0,0 +1 @@ +zrok/_version.py export-subst diff --git a/sdk/python/sdk/zrok/.swagger-codegen-ignore b/sdk/python/sdk/zrok/.swagger-codegen-ignore index 10ba119a..cbe3e7fe 100644 --- a/sdk/python/sdk/zrok/.swagger-codegen-ignore +++ b/sdk/python/sdk/zrok/.swagger-codegen-ignore @@ -28,4 +28,5 @@ tox.ini test-requirements.txt test/ docs/ -README.md \ No newline at end of file +README.md +setup.py \ No newline at end of file diff --git a/sdk/python/sdk/zrok/.swagger-codegen/VERSION b/sdk/python/sdk/zrok/.swagger-codegen/VERSION index 48e69894..97e616f0 100644 --- a/sdk/python/sdk/zrok/.swagger-codegen/VERSION +++ b/sdk/python/sdk/zrok/.swagger-codegen/VERSION @@ -1 +1 @@ -3.0.50 \ No newline at end of file +3.0.52 \ No newline at end of file diff --git a/sdk/python/sdk/zrok/__init__.py b/sdk/python/sdk/zrok/__init__.py index bc7cfc7b..e6b36ad8 100644 --- a/sdk/python/sdk/zrok/__init__.py +++ b/sdk/python/sdk/zrok/__init__.py @@ -1 +1 @@ -from . import environment \ No newline at end of file +from . import environment # noqa diff --git a/sdk/python/sdk/zrok/setup.cfg b/sdk/python/sdk/zrok/setup.cfg new file mode 100644 index 00000000..2194d4d2 --- /dev/null +++ b/sdk/python/sdk/zrok/setup.cfg @@ -0,0 +1,33 @@ +[metadata] +name = openziti +author = OpenZiti Developers +author_email = developers@openziti.org +description = Ziti Python SDK +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/openziti/zrok +license = Apache 2.0 +project_urls = + Source = https://github.com/openziti/zrok + Tracker = https://github.com/openziti/zrok/issues + Discussion = https://openziti.discourse.group/ + +[options] +package_dir = + = . +packages = find: + +[options.packages.find] +where = . + +[flake8] +exclude = zrok_api, build +max-line-length = 120 + +[versioneer] +VCS = git +style = pep440-pre +versionfile_source = zrok/_version.py +versionfile_build = zrok/_version.py +tag_prefix = v +parentdir_prefix = zrok- diff --git a/sdk/python/sdk/zrok/setup.py b/sdk/python/sdk/zrok/setup.py index e8eb84d5..801f64a3 100644 --- a/sdk/python/sdk/zrok/setup.py +++ b/sdk/python/sdk/zrok/setup.py @@ -1,12 +1,10 @@ from setuptools import setup, find_packages # noqa: H301 import os +import versioneer -NAME = "zrok_sdk" -VERSION = "0.0.0.dev" -try: - VERSION = os.environ['ZROK_VERSION'] -except KeyError: - pass +# optionally upload to TestPyPi with alternative name in testing repo +NAME = os.getenv('ZROK_PY_NAME', "zrok") +VERSION = "1.0.0" # To install the library, run the following # # python setup.py install @@ -14,17 +12,18 @@ except KeyError: # prerequisite: setuptools # http://pypi.python.org/pypi/setuptools -REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil"] +REQUIRES = ["urllib3 >= 1.15", "six >= 1.10", "certifi", "python-dateutil", "openziti >= 0.8.1"] setup( name=NAME, - version=VERSION, + cmdclass=versioneer.get_cmdclass(dict()), + version=versioneer.get_version(), description="zrok", - author_email="cameron.otts@netfoundry.io", - url="https://zrok.io", - python_requires='>=3.10', + author_email="", + url="", keywords=["Swagger", "zrok"], install_requires=REQUIRES, + python_requires='>3.10.0', packages=find_packages(), include_package_data=True, long_description="""\ diff --git a/sdk/python/sdk/zrok/versioneer.py b/sdk/python/sdk/zrok/versioneer.py new file mode 100644 index 00000000..1e3753e6 --- /dev/null +++ b/sdk/python/sdk/zrok/versioneer.py @@ -0,0 +1,2277 @@ + +# Version: 0.29 + +"""The Versioneer - like a rocketeer, but for versions. + +The Versioneer +============== + +* like a rocketeer, but for versions! +* https://github.com/python-versioneer/python-versioneer +* Brian Warner +* License: Public Domain (Unlicense) +* Compatible with: Python 3.7, 3.8, 3.9, 3.10, 3.11 and pypy3 +* [![Latest Version][pypi-image]][pypi-url] +* [![Build Status][travis-image]][travis-url] + +This is a tool for managing a recorded version number in setuptools-based +python projects. The goal is to remove the tedious and error-prone "update +the embedded version string" step from your release process. Making a new +release should be as easy as recording a new tag in your version-control +system, and maybe making new tarballs. + + +## Quick Install + +Versioneer provides two installation modes. The "classic" vendored mode installs +a copy of versioneer into your repository. The experimental build-time dependency mode +is intended to allow you to skip this step and simplify the process of upgrading. + +### Vendored mode + +* `pip install versioneer` to somewhere in your $PATH + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) + * Note that you will need to add `tomli; python_version < "3.11"` to your + build-time dependencies if you use `pyproject.toml` +* run `versioneer install --vendor` in your source tree, commit the results +* verify version information with `python setup.py version` + +### Build-time dependency mode + +* `pip install versioneer` to somewhere in your $PATH + * A [conda-forge recipe](https://github.com/conda-forge/versioneer-feedstock) is + available, so you can also use `conda install -c conda-forge versioneer` +* add a `[tool.versioneer]` section to your `pyproject.toml` or a + `[versioneer]` section to your `setup.cfg` (see [Install](INSTALL.md)) +* add `versioneer` (with `[toml]` extra, if configuring in `pyproject.toml`) + to the `requires` key of the `build-system` table in `pyproject.toml`: + ```toml + [build-system] + requires = ["setuptools", "versioneer[toml]"] + build-backend = "setuptools.build_meta" + ``` +* run `versioneer install --no-vendor` in your source tree, commit the results +* verify version information with `python setup.py version` + +## Version Identifiers + +Source trees come from a variety of places: + +* a version-control system checkout (mostly used by developers) +* a nightly tarball, produced by build automation +* a snapshot tarball, produced by a web-based VCS browser, like github's + "tarball from tag" feature +* a release tarball, produced by "setup.py sdist", distributed through PyPI + +Within each source tree, the version identifier (either a string or a number, +this tool is format-agnostic) can come from a variety of places: + +* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows + about recent "tags" and an absolute revision-id +* the name of the directory into which the tarball was unpacked +* an expanded VCS keyword ($Id$, etc) +* a `_version.py` created by some earlier build step + +For released software, the version identifier is closely related to a VCS +tag. Some projects use tag names that include more than just the version +string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool +needs to strip the tag prefix to extract the version identifier. For +unreleased software (between tags), the version identifier should provide +enough information to help developers recreate the same tree, while also +giving them an idea of roughly how old the tree is (after version 1.2, before +version 1.3). Many VCS systems can report a description that captures this, +for example `git describe --tags --dirty --always` reports things like +"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the +0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has +uncommitted changes). + +The version identifier is used for multiple purposes: + +* to allow the module to self-identify its version: `myproject.__version__` +* to choose a name and prefix for a 'setup.py sdist' tarball + +## Theory of Operation + +Versioneer works by adding a special `_version.py` file into your source +tree, where your `__init__.py` can import it. This `_version.py` knows how to +dynamically ask the VCS tool for version information at import time. + +`_version.py` also contains `$Revision$` markers, and the installation +process marks `_version.py` to have this marker rewritten with a tag name +during the `git archive` command. As a result, generated tarballs will +contain enough information to get the proper version. + +To allow `setup.py` to compute a version too, a `versioneer.py` is added to +the top level of your source tree, next to `setup.py` and the `setup.cfg` +that configures it. This overrides several distutils/setuptools commands to +compute the version when invoked, and changes `setup.py build` and `setup.py +sdist` to replace `_version.py` with a small static file that contains just +the generated version data. + +## Installation + +See [INSTALL.md](./INSTALL.md) for detailed installation instructions. + +## Version-String Flavors + +Code which uses Versioneer can learn about its version string at runtime by +importing `_version` from your main `__init__.py` file and running the +`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can +import the top-level `versioneer.py` and run `get_versions()`. + +Both functions return a dictionary with different flavors of version +information: + +* `['version']`: A condensed version string, rendered using the selected + style. This is the most commonly used value for the project's version + string. The default "pep440" style yields strings like `0.11`, + `0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section + below for alternative styles. + +* `['full-revisionid']`: detailed revision identifier. For Git, this is the + full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac". + +* `['date']`: Date and time of the latest `HEAD` commit. For Git, it is the + commit date in ISO 8601 format. This will be None if the date is not + available. + +* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that + this is only accurate if run in a VCS checkout, otherwise it is likely to + be False or None + +* `['error']`: if the version string could not be computed, this will be set + to a string describing the problem, otherwise it will be None. It may be + useful to throw an exception in setup.py if this is set, to avoid e.g. + creating tarballs with a version string of "unknown". + +Some variants are more useful than others. Including `full-revisionid` in a +bug report should allow developers to reconstruct the exact code being tested +(or indicate the presence of local changes that should be shared with the +developers). `version` is suitable for display in an "about" box or a CLI +`--version` output: it can be easily compared against release notes and lists +of bugs fixed in various releases. + +The installer adds the following text to your `__init__.py` to place a basic +version in `YOURPROJECT.__version__`: + + from ._version import get_versions + __version__ = get_versions()['version'] + del get_versions + +## Styles + +The setup.cfg `style=` configuration controls how the VCS information is +rendered into a version string. + +The default style, "pep440", produces a PEP440-compliant string, equal to the +un-prefixed tag name for actual releases, and containing an additional "local +version" section with more detail for in-between builds. For Git, this is +TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags +--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the +tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and +that this commit is two revisions ("+2") beyond the "0.11" tag. For released +software (exactly equal to a known tag), the identifier will only contain the +stripped tag, e.g. "0.11". + +Other styles are available. See [details.md](details.md) in the Versioneer +source tree for descriptions. + +## Debugging + +Versioneer tries to avoid fatal errors: if something goes wrong, it will tend +to return a version of "0+unknown". To investigate the problem, run `setup.py +version`, which will run the version-lookup code in a verbose mode, and will +display the full contents of `get_versions()` (including the `error` string, +which may help identify what went wrong). + +## Known Limitations + +Some situations are known to cause problems for Versioneer. This details the +most significant ones. More can be found on Github +[issues page](https://github.com/python-versioneer/python-versioneer/issues). + +### Subprojects + +Versioneer has limited support for source trees in which `setup.py` is not in +the root directory (e.g. `setup.py` and `.git/` are *not* siblings). The are +two common reasons why `setup.py` might not be in the root: + +* Source trees which contain multiple subprojects, such as + [Buildbot](https://github.com/buildbot/buildbot), which contains both + "master" and "slave" subprojects, each with their own `setup.py`, + `setup.cfg`, and `tox.ini`. Projects like these produce multiple PyPI + distributions (and upload multiple independently-installable tarballs). +* Source trees whose main purpose is to contain a C library, but which also + provide bindings to Python (and perhaps other languages) in subdirectories. + +Versioneer will look for `.git` in parent directories, and most operations +should get the right version string. However `pip` and `setuptools` have bugs +and implementation details which frequently cause `pip install .` from a +subproject directory to fail to find a correct version string (so it usually +defaults to `0+unknown`). + +`pip install --editable .` should work correctly. `setup.py install` might +work too. + +Pip-8.1.1 is known to have this problem, but hopefully it will get fixed in +some later version. + +[Bug #38](https://github.com/python-versioneer/python-versioneer/issues/38) is tracking +this issue. The discussion in +[PR #61](https://github.com/python-versioneer/python-versioneer/pull/61) describes the +issue from the Versioneer side in more detail. +[pip PR#3176](https://github.com/pypa/pip/pull/3176) and +[pip PR#3615](https://github.com/pypa/pip/pull/3615) contain work to improve +pip to let Versioneer work correctly. + +Versioneer-0.16 and earlier only looked for a `.git` directory next to the +`setup.cfg`, so subprojects were completely unsupported with those releases. + +### Editable installs with setuptools <= 18.5 + +`setup.py develop` and `pip install --editable .` allow you to install a +project into a virtualenv once, then continue editing the source code (and +test) without re-installing after every change. + +"Entry-point scripts" (`setup(entry_points={"console_scripts": ..})`) are a +convenient way to specify executable scripts that should be installed along +with the python package. + +These both work as expected when using modern setuptools. When using +setuptools-18.5 or earlier, however, certain operations will cause +`pkg_resources.DistributionNotFound` errors when running the entrypoint +script, which must be resolved by re-installing the package. This happens +when the install happens with one version, then the egg_info data is +regenerated while a different version is checked out. Many setup.py commands +cause egg_info to be rebuilt (including `sdist`, `wheel`, and installing into +a different virtualenv), so this can be surprising. + +[Bug #83](https://github.com/python-versioneer/python-versioneer/issues/83) describes +this one, but upgrading to a newer version of setuptools should probably +resolve it. + + +## Updating Versioneer + +To upgrade your project to a new release of Versioneer, do the following: + +* install the new Versioneer (`pip install -U versioneer` or equivalent) +* edit `setup.cfg` and `pyproject.toml`, if necessary, + to include any new configuration settings indicated by the release notes. + See [UPGRADING](./UPGRADING.md) for details. +* re-run `versioneer install --[no-]vendor` in your source tree, to replace + `SRC/_version.py` +* commit any changed files + +## Future Directions + +This tool is designed to make it easily extended to other version-control +systems: all VCS-specific components are in separate directories like +src/git/ . The top-level `versioneer.py` script is assembled from these +components by running make-versioneer.py . In the future, make-versioneer.py +will take a VCS name as an argument, and will construct a version of +`versioneer.py` that is specific to the given VCS. It might also take the +configuration arguments that are currently provided manually during +installation by editing setup.py . Alternatively, it might go the other +direction and include code from all supported VCS systems, reducing the +number of intermediate scripts. + +## Similar projects + +* [setuptools_scm](https://github.com/pypa/setuptools_scm/) - a non-vendored build-time + dependency +* [minver](https://github.com/jbweston/miniver) - a lightweight reimplementation of + versioneer +* [versioningit](https://github.com/jwodder/versioningit) - a PEP 518-based setuptools + plugin + +## License + +To make Versioneer easier to embed, all its code is dedicated to the public +domain. The `_version.py` that it creates is also in the public domain. +Specifically, both are released under the "Unlicense", as described in +https://unlicense.org/. + +[pypi-image]: https://img.shields.io/pypi/v/versioneer.svg +[pypi-url]: https://pypi.python.org/pypi/versioneer/ +[travis-image]: +https://img.shields.io/travis/com/python-versioneer/python-versioneer.svg +[travis-url]: https://travis-ci.com/github/python-versioneer/python-versioneer + +""" +# pylint:disable=invalid-name,import-outside-toplevel,missing-function-docstring +# pylint:disable=missing-class-docstring,too-many-branches,too-many-statements +# pylint:disable=raise-missing-from,too-many-lines,too-many-locals,import-error +# pylint:disable=too-few-public-methods,redefined-outer-name,consider-using-with +# pylint:disable=attribute-defined-outside-init,too-many-arguments + +import configparser +import errno +import json +import os +import re +import subprocess +import sys +from pathlib import Path +from typing import Any, Callable, cast, Dict, List, Optional, Tuple, Union +from typing import NoReturn +import functools + +have_tomllib = True +if sys.version_info >= (3, 11): + import tomllib +else: + try: + import tomli as tomllib + except ImportError: + have_tomllib = False + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + VCS: str + style: str + tag_prefix: str + versionfile_source: str + versionfile_build: Optional[str] + parentdir_prefix: Optional[str] + verbose: Optional[bool] + + +def get_root() -> str: + """Get the project root directory. + + We require that all commands are run from the project root, i.e. the + directory that contains setup.py, setup.cfg, and versioneer.py . + """ + root = os.path.realpath(os.path.abspath(os.getcwd())) + setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") + versioneer_py = os.path.join(root, "versioneer.py") + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): + # allow 'python path/to/setup.py COMMAND' + root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0]))) + setup_py = os.path.join(root, "setup.py") + pyproject_toml = os.path.join(root, "pyproject.toml") + versioneer_py = os.path.join(root, "versioneer.py") + if not ( + os.path.exists(setup_py) + or os.path.exists(pyproject_toml) + or os.path.exists(versioneer_py) + ): + err = ("Versioneer was unable to run the project root directory. " + "Versioneer requires setup.py to be executed from " + "its immediate directory (like 'python setup.py COMMAND'), " + "or in a way that lets it use sys.argv[0] to find the root " + "(like 'python path/to/setup.py COMMAND').") + raise VersioneerBadRootError(err) + try: + # Certain runtime workflows (setup.py install/develop in a setuptools + # tree) execute all dependencies in a single python process, so + # "versioneer" may be imported multiple times, and python's shared + # module-import table will cache the first one. So we can't use + # os.path.dirname(__file__), as that will find whichever + # versioneer.py was first imported, even in later projects. + my_path = os.path.realpath(os.path.abspath(__file__)) + me_dir = os.path.normcase(os.path.splitext(my_path)[0]) + vsr_dir = os.path.normcase(os.path.splitext(versioneer_py)[0]) + if me_dir != vsr_dir and "VERSIONEER_PEP518" not in globals(): + print("Warning: build in %s is using versioneer.py from %s" + % (os.path.dirname(my_path), versioneer_py)) + except NameError: + pass + return root + + +def get_config_from_root(root: str) -> VersioneerConfig: + """Read the project setup.cfg file to determine Versioneer config.""" + # This might raise OSError (if setup.cfg is missing), or + # configparser.NoSectionError (if it lacks a [versioneer] section), or + # configparser.NoOptionError (if it lacks "VCS="). See the docstring at + # the top of versioneer.py for instructions on writing your setup.cfg . + root_pth = Path(root) + pyproject_toml = root_pth / "pyproject.toml" + setup_cfg = root_pth / "setup.cfg" + section: Union[Dict[str, Any], configparser.SectionProxy, None] = None + if pyproject_toml.exists() and have_tomllib: + try: + with open(pyproject_toml, 'rb') as fobj: + pp = tomllib.load(fobj) + section = pp['tool']['versioneer'] + except (tomllib.TOMLDecodeError, KeyError) as e: + print(f"Failed to load config from {pyproject_toml}: {e}") + print("Try to load it from setup.cfg") + if not section: + parser = configparser.ConfigParser() + with open(setup_cfg) as cfg_file: + parser.read_file(cfg_file) + parser.get("versioneer", "VCS") # raise error if missing + + section = parser["versioneer"] + + # `cast`` really shouldn't be used, but its simplest for the + # common VersioneerConfig users at the moment. We verify against + # `None` values elsewhere where it matters + + cfg = VersioneerConfig() + cfg.VCS = section['VCS'] + cfg.style = section.get("style", "") + cfg.versionfile_source = cast(str, section.get("versionfile_source")) + cfg.versionfile_build = section.get("versionfile_build") + cfg.tag_prefix = cast(str, section.get("tag_prefix")) + if cfg.tag_prefix in ("''", '""', None): + cfg.tag_prefix = "" + cfg.parentdir_prefix = section.get("parentdir_prefix") + if isinstance(section, configparser.SectionProxy): + # Make sure configparser translates to bool + cfg.verbose = section.getboolean("verbose") + else: + cfg.verbose = section.get("verbose") + + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +# these dictionaries contain VCS-specific tools +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} + + +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: + """Store f in HANDLERS[vcs][method].""" + HANDLERS.setdefault(vcs, {})[method] = f + return f + return decorate + + +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + try: + dispcmd = str([command] + args) + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) + break + except OSError as e: + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, process.returncode + return stdout, process.returncode + + +LONG_VERSION_PY['git'] = r''' +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools + + +def get_keywords() -> Dict[str, str]: + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s" + git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s" + git_date = "%(DOLLAR)sFormat:%%ci%(DOLLAR)s" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool + + +def get_config() -> VersioneerConfig: + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "%(STYLE)s" + cfg.tag_prefix = "%(TAG_PREFIX)s" + cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s" + cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} + + +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + try: + dispcmd = str([command] + args) + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) + break + except OSError as e: + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %%s" %% dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %%s" %% (commands,)) + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print("unable to run %%s (error)" %% dispcmd) + print("stdout was %%s" %% stdout) + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %%s but none started with prefix %%s" %% + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords: Dict[str, str] = {} + try: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %%d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r'\d', r)} + if verbose: + print("discarding '%%s', no digits" %% ",".join(refs - tags)) + if verbose: + print("likely tags: %%s" %% ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %%s" %% r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) + if rc != 0: + if verbose: + print("Directory %%s not under git control" %% root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces: Dict[str, Any] = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%%s'" + %% describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%%s' doesn't start with prefix '%%s'" + print(fmt %% (full_tag, tag_prefix)) + pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'" + %% (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces: Dict[str, Any]) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces: Dict[str, Any]) -> str: + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%%d.g%%s" %% (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%%d.dev%%d" %% (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%%d" %% (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = "0.post0.dev%%d" %% pieces["distance"] + return rendered + + +def render_pep440_post(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + return rendered + + +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%%s" %% pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%%d" %% pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces: Dict[str, Any]) -> str: + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces: Dict[str, Any]) -> str: + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%%s'" %% style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions() -> Dict[str, Any]: + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} +''' + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords: Dict[str, str] = {} + try: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r'\d', r)} + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces: Dict[str, Any] = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def do_vcs_install(versionfile_source: str, ipy: Optional[str]) -> None: + """Git-specific installation logic for Versioneer. + + For Git, this means creating/changing .gitattributes to mark _version.py + for export-subst keyword substitution. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + files = [versionfile_source] + if ipy: + files.append(ipy) + if "VERSIONEER_PEP518" not in globals(): + try: + my_path = __file__ + if my_path.endswith((".pyc", ".pyo")): + my_path = os.path.splitext(my_path)[0] + ".py" + versioneer_file = os.path.relpath(my_path) + except NameError: + versioneer_file = "versioneer.py" + files.append(versioneer_file) + present = False + try: + with open(".gitattributes", "r") as fobj: + for line in fobj: + if line.strip().startswith(versionfile_source): + if "export-subst" in line.strip().split()[1:]: + present = True + break + except OSError: + pass + if not present: + with open(".gitattributes", "a+") as fobj: + fobj.write(f"{versionfile_source} export-subst\n") + files.append(".gitattributes") + run_command(GITS, ["add", "--"] + files) + + +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +SHORT_VERSION_PY = """ +# This file was generated by 'versioneer.py' (0.29) from +# revision-control system data, or from the parent directory name of an +# unpacked source archive. Distribution tarballs contain a pre-generated copy +# of this file. + +import json + +version_json = ''' +%s +''' # END VERSION_JSON + + +def get_versions(): + return json.loads(version_json) +""" + + +def versions_from_file(filename: str) -> Dict[str, Any]: + """Try to determine the version from _version.py if present.""" + try: + with open(filename) as f: + contents = f.read() + except OSError: + raise NotThisMethod("unable to read _version.py") + mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + mo = re.search(r"version_json = '''\r\n(.*)''' # END VERSION_JSON", + contents, re.M | re.S) + if not mo: + raise NotThisMethod("no version_json in _version.py") + return json.loads(mo.group(1)) + + +def write_to_version_file(filename: str, versions: Dict[str, Any]) -> None: + """Write the given version number to the given _version.py file.""" + contents = json.dumps(versions, sort_keys=True, + indent=1, separators=(",", ": ")) + with open(filename, "w") as f: + f.write(SHORT_VERSION_PY % contents) + + print("set %s to '%s'" % (filename, versions["version"])) + + +def plus_or_dot(pieces: Dict[str, Any]) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces: Dict[str, Any]) -> str: + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces: Dict[str, Any]) -> str: + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces: Dict[str, Any]) -> str: + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +class VersioneerBadRootError(Exception): + """The project root directory is unknown or missing key files.""" + + +def get_versions(verbose: bool = False) -> Dict[str, Any]: + """Get the project version from whatever source is available. + + Returns dict with two keys: 'version' and 'full'. + """ + if "versioneer" in sys.modules: + # see the discussion in cmdclass.py:get_cmdclass() + del sys.modules["versioneer"] + + root = get_root() + cfg = get_config_from_root(root) + + assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg" + handlers = HANDLERS.get(cfg.VCS) + assert handlers, "unrecognized VCS '%s'" % cfg.VCS + verbose = verbose or bool(cfg.verbose) # `bool()` used to avoid `None` + assert cfg.versionfile_source is not None, \ + "please set versioneer.versionfile_source" + assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix" + + versionfile_abs = os.path.join(root, cfg.versionfile_source) + + # extract version from first of: _version.py, VCS command (e.g. 'git + # describe'), parentdir. This is meant to work for developers using a + # source checkout, for users of a tarball created by 'setup.py sdist', + # and for users of a tarball/zipball created by 'git archive' or github's + # download-from-tag feature or the equivalent in other VCSes. + + get_keywords_f = handlers.get("get_keywords") + from_keywords_f = handlers.get("keywords") + if get_keywords_f and from_keywords_f: + try: + keywords = get_keywords_f(versionfile_abs) + ver = from_keywords_f(keywords, cfg.tag_prefix, verbose) + if verbose: + print("got version from expanded keyword %s" % ver) + return ver + except NotThisMethod: + pass + + try: + ver = versions_from_file(versionfile_abs) + if verbose: + print("got version from file %s %s" % (versionfile_abs, ver)) + return ver + except NotThisMethod: + pass + + from_vcs_f = handlers.get("pieces_from_vcs") + if from_vcs_f: + try: + pieces = from_vcs_f(cfg.tag_prefix, root, verbose) + ver = render(pieces, cfg.style) + if verbose: + print("got version from VCS %s" % ver) + return ver + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + if verbose: + print("got version from parentdir %s" % ver) + return ver + except NotThisMethod: + pass + + if verbose: + print("unable to compute version") + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, "error": "unable to compute version", + "date": None} + + +def get_version() -> str: + """Get the short version string for this project.""" + return get_versions()["version"] + + +def get_cmdclass(cmdclass: Optional[Dict[str, Any]] = None): + """Get the custom setuptools subclasses used by Versioneer. + + If the package uses a different cmdclass (e.g. one from numpy), it + should be provide as an argument. + """ + if "versioneer" in sys.modules: + del sys.modules["versioneer"] + # this fixes the "python setup.py develop" case (also 'install' and + # 'easy_install .'), in which subdependencies of the main project are + # built (using setup.py bdist_egg) in the same python process. Assume + # a main project A and a dependency B, which use different versions + # of Versioneer. A's setup.py imports A's Versioneer, leaving it in + # sys.modules by the time B's setup.py is executed, causing B to run + # with the wrong versioneer. Setuptools wraps the sub-dep builds in a + # sandbox that restores sys.modules to it's pre-build state, so the + # parent is protected against the child's "import versioneer". By + # removing ourselves from sys.modules here, before the child build + # happens, we protect the child from the parent's versioneer too. + # Also see https://github.com/python-versioneer/python-versioneer/issues/52 + + cmds = {} if cmdclass is None else cmdclass.copy() + + # we add "version" to setuptools + from setuptools import Command + + class cmd_version(Command): + description = "report generated version string" + user_options: List[Tuple[str, str, str]] = [] + boolean_options: List[str] = [] + + def initialize_options(self) -> None: + pass + + def finalize_options(self) -> None: + pass + + def run(self) -> None: + vers = get_versions(verbose=True) + print("Version: %s" % vers["version"]) + print(" full-revisionid: %s" % vers.get("full-revisionid")) + print(" dirty: %s" % vers.get("dirty")) + print(" date: %s" % vers.get("date")) + if vers["error"]: + print(" error: %s" % vers["error"]) + cmds["version"] = cmd_version + + # we override "build_py" in setuptools + # + # most invocation pathways end up running build_py: + # distutils/build -> build_py + # distutils/install -> distutils/build ->.. + # setuptools/bdist_wheel -> distutils/install ->.. + # setuptools/bdist_egg -> distutils/install_lib -> build_py + # setuptools/install -> bdist_egg ->.. + # setuptools/develop -> ? + # pip install: + # copies source tree to a tempdir before running egg_info/etc + # if .git isn't copied too, 'git describe' will fail + # then does setup.py bdist_wheel, or sometimes setup.py install + # setup.py egg_info -> ? + + # pip install -e . and setuptool/editable_wheel will invoke build_py + # but the build_py command is not expected to copy any files. + + # we override different "build_py" commands for both environments + if 'build_py' in cmds: + _build_py: Any = cmds['build_py'] + else: + from setuptools.command.build_py import build_py as _build_py + + class cmd_build_py(_build_py): + def run(self) -> None: + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_py.run(self) + if getattr(self, "editable_mode", False): + # During editable installs `.py` and data files are + # not copied to build_lib + return + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if cfg.versionfile_build: + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_py"] = cmd_build_py + + if 'build_ext' in cmds: + _build_ext: Any = cmds['build_ext'] + else: + from setuptools.command.build_ext import build_ext as _build_ext + + class cmd_build_ext(_build_ext): + def run(self) -> None: + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + _build_ext.run(self) + if self.inplace: + # build_ext --inplace will only build extensions in + # build/lib<..> dir with no _version.py to write to. + # As in place builds will already have a _version.py + # in the module dir, we do not need to write one. + return + # now locate _version.py in the new build/ directory and replace + # it with an updated value + if not cfg.versionfile_build: + return + target_versionfile = os.path.join(self.build_lib, + cfg.versionfile_build) + if not os.path.exists(target_versionfile): + print(f"Warning: {target_versionfile} does not exist, skipping " + "version update. This can happen if you are running build_ext " + "without first running build_py.") + return + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + cmds["build_ext"] = cmd_build_ext + + if "cx_Freeze" in sys.modules: # cx_freeze enabled? + from cx_Freeze.dist import build_exe as _build_exe # type: ignore + # nczeczulin reports that py2exe won't like the pep440-style string + # as FILEVERSION, but it can be used for PRODUCTVERSION, e.g. + # setup(console=[{ + # "version": versioneer.get_version().split("+", 1)[0], # FILEVERSION + # "product_version": versioneer.get_version(), + # ... + + class cmd_build_exe(_build_exe): + def run(self) -> None: + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _build_exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["build_exe"] = cmd_build_exe + del cmds["build_py"] + + if 'py2exe' in sys.modules: # py2exe enabled? + try: + from py2exe.setuptools_buildexe import py2exe as _py2exe # type: ignore + except ImportError: + from py2exe.distutils_buildexe import py2exe as _py2exe # type: ignore + + class cmd_py2exe(_py2exe): + def run(self) -> None: + root = get_root() + cfg = get_config_from_root(root) + versions = get_versions() + target_versionfile = cfg.versionfile_source + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, versions) + + _py2exe.run(self) + os.unlink(target_versionfile) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % + {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + cmds["py2exe"] = cmd_py2exe + + # sdist farms its file list building out to egg_info + if 'egg_info' in cmds: + _egg_info: Any = cmds['egg_info'] + else: + from setuptools.command.egg_info import egg_info as _egg_info + + class cmd_egg_info(_egg_info): + def find_sources(self) -> None: + # egg_info.find_sources builds the manifest list and writes it + # in one shot + super().find_sources() + + # Modify the filelist and normalize it + root = get_root() + cfg = get_config_from_root(root) + self.filelist.append('versioneer.py') + if cfg.versionfile_source: + # There are rare cases where versionfile_source might not be + # included by default, so we must be explicit + self.filelist.append(cfg.versionfile_source) + self.filelist.sort() + self.filelist.remove_duplicates() + + # The write method is hidden in the manifest_maker instance that + # generated the filelist and was thrown away + # We will instead replicate their final normalization (to unicode, + # and POSIX-style paths) + from setuptools import unicode_utils + normalized = [unicode_utils.filesys_decode(f).replace(os.sep, '/') + for f in self.filelist.files] + + manifest_filename = os.path.join(self.egg_info, 'SOURCES.txt') + with open(manifest_filename, 'w') as fobj: + fobj.write('\n'.join(normalized)) + + cmds['egg_info'] = cmd_egg_info + + # we override different "sdist" commands for both environments + if 'sdist' in cmds: + _sdist: Any = cmds['sdist'] + else: + from setuptools.command.sdist import sdist as _sdist + + class cmd_sdist(_sdist): + def run(self) -> None: + versions = get_versions() + self._versioneer_generated_versions = versions + # unless we update this, the command will keep using the old + # version + self.distribution.metadata.version = versions["version"] + return _sdist.run(self) + + def make_release_tree(self, base_dir: str, files: List[str]) -> None: + root = get_root() + cfg = get_config_from_root(root) + _sdist.make_release_tree(self, base_dir, files) + # now locate _version.py in the new base_dir directory + # (remembering that it may be a hardlink) and replace it with an + # updated value + target_versionfile = os.path.join(base_dir, cfg.versionfile_source) + print("UPDATING %s" % target_versionfile) + write_to_version_file(target_versionfile, + self._versioneer_generated_versions) + cmds["sdist"] = cmd_sdist + + return cmds + + +CONFIG_ERROR = """ +setup.cfg is missing the necessary Versioneer configuration. You need +a section like: + + [versioneer] + VCS = git + style = pep440 + versionfile_source = src/myproject/_version.py + versionfile_build = myproject/_version.py + tag_prefix = + parentdir_prefix = myproject- + +You will also need to edit your setup.py to use the results: + + import versioneer + setup(version=versioneer.get_version(), + cmdclass=versioneer.get_cmdclass(), ...) + +Please read the docstring in ./versioneer.py for configuration instructions, +edit setup.cfg, and re-run the installer or 'python versioneer.py setup'. +""" + +SAMPLE_CONFIG = """ +# See the docstring in versioneer.py for instructions. Note that you must +# re-run 'versioneer.py setup' after changing this section, and commit the +# resulting files. + +[versioneer] +#VCS = git +#style = pep440 +#versionfile_source = +#versionfile_build = +#tag_prefix = +#parentdir_prefix = + +""" + +OLD_SNIPPET = """ +from ._version import get_versions +__version__ = get_versions()['version'] +del get_versions +""" + +INIT_PY_SNIPPET = """ +from . import {0} +__version__ = {0}.get_versions()['version'] +""" + + +def do_setup() -> int: + """Do main VCS-independent setup function for installing Versioneer.""" + root = get_root() + try: + cfg = get_config_from_root(root) + except (OSError, configparser.NoSectionError, + configparser.NoOptionError) as e: + if isinstance(e, (OSError, configparser.NoSectionError)): + print("Adding sample versioneer config to setup.cfg", + file=sys.stderr) + with open(os.path.join(root, "setup.cfg"), "a") as f: + f.write(SAMPLE_CONFIG) + print(CONFIG_ERROR, file=sys.stderr) + return 1 + + print(" creating %s" % cfg.versionfile_source) + with open(cfg.versionfile_source, "w") as f: + LONG = LONG_VERSION_PY[cfg.VCS] + f.write(LONG % {"DOLLAR": "$", + "STYLE": cfg.style, + "TAG_PREFIX": cfg.tag_prefix, + "PARENTDIR_PREFIX": cfg.parentdir_prefix, + "VERSIONFILE_SOURCE": cfg.versionfile_source, + }) + + ipy = os.path.join(os.path.dirname(cfg.versionfile_source), + "__init__.py") + maybe_ipy: Optional[str] = ipy + if os.path.exists(ipy): + try: + with open(ipy, "r") as f: + old = f.read() + except OSError: + old = "" + module = os.path.splitext(os.path.basename(cfg.versionfile_source))[0] + snippet = INIT_PY_SNIPPET.format(module) + if OLD_SNIPPET in old: + print(" replacing boilerplate in %s" % ipy) + with open(ipy, "w") as f: + f.write(old.replace(OLD_SNIPPET, snippet)) + elif snippet not in old: + print(" appending to %s" % ipy) + with open(ipy, "a") as f: + f.write(snippet) + else: + print(" %s unmodified" % ipy) + else: + print(" %s doesn't exist, ok" % ipy) + maybe_ipy = None + + # Make VCS-specific changes. For git, this means creating/changing + # .gitattributes to mark _version.py for export-subst keyword + # substitution. + do_vcs_install(cfg.versionfile_source, maybe_ipy) + return 0 + + +def scan_setup_py() -> int: + """Validate the contents of setup.py against Versioneer's expectations.""" + found = set() + setters = False + errors = 0 + with open("setup.py", "r") as f: + for line in f.readlines(): + if "import versioneer" in line: + found.add("import") + if "versioneer.get_cmdclass()" in line: + found.add("cmdclass") + if "versioneer.get_version()" in line: + found.add("get_version") + if "versioneer.VCS" in line: + setters = True + if "versioneer.versionfile_source" in line: + setters = True + if len(found) != 3: + print("") + print("Your setup.py appears to be missing some important items") + print("(but I might be wrong). Please make sure it has something") + print("roughly like the following:") + print("") + print(" import versioneer") + print(" setup( version=versioneer.get_version(),") + print(" cmdclass=versioneer.get_cmdclass(), ...)") + print("") + errors += 1 + if setters: + print("You should remove lines like 'versioneer.VCS = ' and") + print("'versioneer.versionfile_source = ' . This configuration") + print("now lives in setup.cfg, and should be removed from setup.py") + print("") + errors += 1 + return errors + + +def setup_command() -> NoReturn: + """Set up Versioneer and exit with appropriate error code.""" + errors = do_setup() + errors += scan_setup_py() + sys.exit(1 if errors else 0) + + +if __name__ == "__main__": + cmd = sys.argv[1] + if cmd == "setup": + setup_command() diff --git a/sdk/python/sdk/zrok/zrok/__init__.py b/sdk/python/sdk/zrok/zrok/__init__.py index 9a61a593..3ae70bb1 100644 --- a/sdk/python/sdk/zrok/zrok/__init__.py +++ b/sdk/python/sdk/zrok/zrok/__init__.py @@ -1,2 +1,4 @@ -from . import environment -from . import access, model, share, overview \ No newline at end of file +from . import environment # noqa +from . import access, decor, model, share, overview # noqa +from . import _version +__version__ = _version.get_versions()['version'] diff --git a/sdk/python/sdk/zrok/zrok/_version.py b/sdk/python/sdk/zrok/zrok/_version.py new file mode 100644 index 00000000..b5a79645 --- /dev/null +++ b/sdk/python/sdk/zrok/zrok/_version.py @@ -0,0 +1,683 @@ + +# This file helps to compute a version number in source trees obtained from +# git-archive tarball (such as those provided by githubs download-from-tag +# feature). Distribution tarballs (built by setup.py sdist) and build +# directories (produced by setup.py build) will contain a much shorter file +# that just contains the computed version number. + +# This file is released into the public domain. +# Generated by versioneer-0.29 +# https://github.com/python-versioneer/python-versioneer + +"""Git implementation of _version.py.""" + +import errno +import os +import re +import subprocess +import sys +from typing import Any, Callable, Dict, List, Optional, Tuple +import functools + + +def get_keywords() -> Dict[str, str]: + """Get the keywords needed to look up the version information.""" + # these strings will be replaced by git during git-archive. + # setup.py/versioneer.py will grep for the variable names, so they must + # each be defined on a line of their own. _version.py will just call + # get_keywords(). + git_refnames = "$Format:%d$" + git_full = "$Format:%H$" + git_date = "$Format:%ci$" + keywords = {"refnames": git_refnames, "full": git_full, "date": git_date} + return keywords + + +class VersioneerConfig: + """Container for Versioneer configuration parameters.""" + + VCS: str + style: str + tag_prefix: str + parentdir_prefix: str + versionfile_source: str + verbose: bool + + +def get_config() -> VersioneerConfig: + """Create, populate and return the VersioneerConfig() object.""" + # these strings are filled in when 'setup.py versioneer' creates + # _version.py + cfg = VersioneerConfig() + cfg.VCS = "git" + cfg.style = "pep440-pre" + cfg.tag_prefix = "v" + cfg.parentdir_prefix = "zrok-" + cfg.versionfile_source = "zrok/_version.py" + cfg.verbose = False + return cfg + + +class NotThisMethod(Exception): + """Exception raised if a method is not valid for the current scenario.""" + + +LONG_VERSION_PY: Dict[str, str] = {} +HANDLERS: Dict[str, Dict[str, Callable]] = {} + + +def register_vcs_handler(vcs: str, method: str) -> Callable: # decorator + """Create decorator to mark a method as the handler of a VCS.""" + def decorate(f: Callable) -> Callable: + """Store f in HANDLERS[vcs][method].""" + if vcs not in HANDLERS: + HANDLERS[vcs] = {} + HANDLERS[vcs][method] = f + return f + return decorate + + +def run_command( + commands: List[str], + args: List[str], + cwd: Optional[str] = None, + verbose: bool = False, + hide_stderr: bool = False, + env: Optional[Dict[str, str]] = None, +) -> Tuple[Optional[str], Optional[int]]: + """Call the given command(s).""" + assert isinstance(commands, list) + process = None + + popen_kwargs: Dict[str, Any] = {} + if sys.platform == "win32": + # This hides the console window if pythonw.exe is used + startupinfo = subprocess.STARTUPINFO() + startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW + popen_kwargs["startupinfo"] = startupinfo + + for command in commands: + try: + dispcmd = str([command] + args) + # remember shell=False, so use git.cmd on windows, not just git + process = subprocess.Popen([command] + args, cwd=cwd, env=env, + stdout=subprocess.PIPE, + stderr=(subprocess.PIPE if hide_stderr + else None), **popen_kwargs) + break + except OSError as e: + if e.errno == errno.ENOENT: + continue + if verbose: + print("unable to run %s" % dispcmd) + print(e) + return None, None + else: + if verbose: + print("unable to find command, tried %s" % (commands,)) + return None, None + stdout = process.communicate()[0].strip().decode() + if process.returncode != 0: + if verbose: + print("unable to run %s (error)" % dispcmd) + print("stdout was %s" % stdout) + return None, process.returncode + return stdout, process.returncode + + +def versions_from_parentdir( + parentdir_prefix: str, + root: str, + verbose: bool, +) -> Dict[str, Any]: + """Try to determine the version from the parent directory name. + + Source tarballs conventionally unpack into a directory that includes both + the project name and a version string. We will also support searching up + two directory levels for an appropriately named parent directory + """ + rootdirs = [] + + for _ in range(3): + dirname = os.path.basename(root) + if dirname.startswith(parentdir_prefix): + return {"version": dirname[len(parentdir_prefix):], + "full-revisionid": None, + "dirty": False, "error": None, "date": None} + rootdirs.append(root) + root = os.path.dirname(root) # up a level + + if verbose: + print("Tried directories %s but none started with prefix %s" % + (str(rootdirs), parentdir_prefix)) + raise NotThisMethod("rootdir doesn't start with parentdir_prefix") + + +@register_vcs_handler("git", "get_keywords") +def git_get_keywords(versionfile_abs: str) -> Dict[str, str]: + """Extract version information from the given file.""" + # the code embedded in _version.py can just fetch the value of these + # keywords. When used from setup.py, we don't want to import _version.py, + # so we do it with a regexp instead. This function is not used from + # _version.py. + keywords: Dict[str, str] = {} + try: + with open(versionfile_abs, "r") as fobj: + for line in fobj: + if line.strip().startswith("git_refnames ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["refnames"] = mo.group(1) + if line.strip().startswith("git_full ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["full"] = mo.group(1) + if line.strip().startswith("git_date ="): + mo = re.search(r'=\s*"(.*)"', line) + if mo: + keywords["date"] = mo.group(1) + except OSError: + pass + return keywords + + +@register_vcs_handler("git", "keywords") +def git_versions_from_keywords( + keywords: Dict[str, str], + tag_prefix: str, + verbose: bool, +) -> Dict[str, Any]: + """Get version information from git keywords.""" + if "refnames" not in keywords: + raise NotThisMethod("Short version file found") + date = keywords.get("date") + if date is not None: + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + + # git-2.2.0 added "%cI", which expands to an ISO-8601 -compliant + # datestamp. However we prefer "%ci" (which expands to an "ISO-8601 + # -like" string, which we must then edit to make compliant), because + # it's been around since git-1.5.3, and it's too difficult to + # discover which version we're using, or to work around using an + # older one. + date = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + refnames = keywords["refnames"].strip() + if refnames.startswith("$Format"): + if verbose: + print("keywords are unexpanded, not using") + raise NotThisMethod("unexpanded keywords, not a git-archive tarball") + refs = {r.strip() for r in refnames.strip("()").split(",")} + # starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of + # just "foo-1.0". If we see a "tag: " prefix, prefer those. + TAG = "tag: " + tags = {r[len(TAG):] for r in refs if r.startswith(TAG)} + if not tags: + # Either we're using git < 1.8.3, or there really are no tags. We use + # a heuristic: assume all version tags have a digit. The old git %d + # expansion behaves like git log --decorate=short and strips out the + # refs/heads/ and refs/tags/ prefixes that would let us distinguish + # between branches and tags. By ignoring refnames without digits, we + # filter out many common branch names like "release" and + # "stabilization", as well as "HEAD" and "master". + tags = {r for r in refs if re.search(r'\d', r)} + if verbose: + print("discarding '%s', no digits" % ",".join(refs - tags)) + if verbose: + print("likely tags: %s" % ",".join(sorted(tags))) + for ref in sorted(tags): + # sorting will prefer e.g. "2.0" over "2.0rc1" + if ref.startswith(tag_prefix): + r = ref[len(tag_prefix):] + # Filter out refs that exactly match prefix or that don't start + # with a number once the prefix is stripped (mostly a concern + # when prefix is '') + if not re.match(r'\d', r): + continue + if verbose: + print("picking %s" % r) + return {"version": r, + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": None, + "date": date} + # no suitable tags, so version is "0+unknown", but full hex is still there + if verbose: + print("no suitable tags, using unknown + full revision id") + return {"version": "0+unknown", + "full-revisionid": keywords["full"].strip(), + "dirty": False, "error": "no suitable tags", "date": None} + + +@register_vcs_handler("git", "pieces_from_vcs") +def git_pieces_from_vcs( + tag_prefix: str, + root: str, + verbose: bool, + runner: Callable = run_command +) -> Dict[str, Any]: + """Get version from 'git describe' in the root of the source tree. + + This only gets called if the git-archive 'subst' keywords were *not* + expanded, and _version.py hasn't already been rewritten with a short + version string, meaning we're inside a checked out source tree. + """ + GITS = ["git"] + if sys.platform == "win32": + GITS = ["git.cmd", "git.exe"] + + # GIT_DIR can interfere with correct operation of Versioneer. + # It may be intended to be passed to the Versioneer-versioned project, + # but that should not change where we get our version from. + env = os.environ.copy() + env.pop("GIT_DIR", None) + runner = functools.partial(runner, env=env) + + _, rc = runner(GITS, ["rev-parse", "--git-dir"], cwd=root, + hide_stderr=not verbose) + if rc != 0: + if verbose: + print("Directory %s not under git control" % root) + raise NotThisMethod("'git rev-parse --git-dir' returned error") + + # if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty] + # if there isn't one, this yields HEX[-dirty] (no NUM) + describe_out, rc = runner(GITS, [ + "describe", "--tags", "--dirty", "--always", "--long", + "--match", f"{tag_prefix}[[:digit:]]*" + ], cwd=root) + # --long was added in git-1.5.5 + if describe_out is None: + raise NotThisMethod("'git describe' failed") + describe_out = describe_out.strip() + full_out, rc = runner(GITS, ["rev-parse", "HEAD"], cwd=root) + if full_out is None: + raise NotThisMethod("'git rev-parse' failed") + full_out = full_out.strip() + + pieces: Dict[str, Any] = {} + pieces["long"] = full_out + pieces["short"] = full_out[:7] # maybe improved later + pieces["error"] = None + + branch_name, rc = runner(GITS, ["rev-parse", "--abbrev-ref", "HEAD"], + cwd=root) + # --abbrev-ref was added in git-1.6.3 + if rc != 0 or branch_name is None: + raise NotThisMethod("'git rev-parse --abbrev-ref' returned error") + branch_name = branch_name.strip() + + if branch_name == "HEAD": + # If we aren't exactly on a branch, pick a branch which represents + # the current commit. If all else fails, we are on a branchless + # commit. + branches, rc = runner(GITS, ["branch", "--contains"], cwd=root) + # --contains was added in git-1.5.4 + if rc != 0 or branches is None: + raise NotThisMethod("'git branch --contains' returned error") + branches = branches.split("\n") + + # Remove the first line if we're running detached + if "(" in branches[0]: + branches.pop(0) + + # Strip off the leading "* " from the list of branches. + branches = [branch[2:] for branch in branches] + if "master" in branches: + branch_name = "master" + elif not branches: + branch_name = None + else: + # Pick the first branch that is returned. Good or bad. + branch_name = branches[0] + + pieces["branch"] = branch_name + + # parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty] + # TAG might have hyphens. + git_describe = describe_out + + # look for -dirty suffix + dirty = git_describe.endswith("-dirty") + pieces["dirty"] = dirty + if dirty: + git_describe = git_describe[:git_describe.rindex("-dirty")] + + # now we have TAG-NUM-gHEX or HEX + + if "-" in git_describe: + # TAG-NUM-gHEX + mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe) + if not mo: + # unparsable. Maybe git-describe is misbehaving? + pieces["error"] = ("unable to parse git-describe output: '%s'" + % describe_out) + return pieces + + # tag + full_tag = mo.group(1) + if not full_tag.startswith(tag_prefix): + if verbose: + fmt = "tag '%s' doesn't start with prefix '%s'" + print(fmt % (full_tag, tag_prefix)) + pieces["error"] = ("tag '%s' doesn't start with prefix '%s'" + % (full_tag, tag_prefix)) + return pieces + pieces["closest-tag"] = full_tag[len(tag_prefix):] + + # distance: number of commits since tag + pieces["distance"] = int(mo.group(2)) + + # commit: short hex revision ID + pieces["short"] = mo.group(3) + + else: + # HEX: no tags + pieces["closest-tag"] = None + out, rc = runner(GITS, ["rev-list", "HEAD", "--left-right"], cwd=root) + pieces["distance"] = len(out.split()) # total number of commits + + # commit date: see ISO-8601 comment in git_versions_from_keywords() + date = runner(GITS, ["show", "-s", "--format=%ci", "HEAD"], cwd=root)[0].strip() + # Use only the last line. Previous lines may contain GPG signature + # information. + date = date.splitlines()[-1] + pieces["date"] = date.strip().replace(" ", "T", 1).replace(" ", "", 1) + + return pieces + + +def plus_or_dot(pieces: Dict[str, Any]) -> str: + """Return a + if we don't already have one, else return a .""" + if "+" in pieces.get("closest-tag", ""): + return "." + return "+" + + +def render_pep440(pieces: Dict[str, Any]) -> str: + """Build up version string, with post-release "local version identifier". + + Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you + get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty + + Exceptions: + 1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_branch(pieces: Dict[str, Any]) -> str: + """TAG[[.dev0]+DISTANCE.gHEX[.dirty]] . + + The ".dev0" means not master branch. Note that .dev0 sorts backwards + (a feature branch will appear "older" than the master branch). + + Exceptions: + 1: no tags. 0[.dev0]+untagged.DISTANCE.gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "%d.g%s" % (pieces["distance"], pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0" + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+untagged.%d.g%s" % (pieces["distance"], + pieces["short"]) + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def pep440_split_post(ver: str) -> Tuple[str, Optional[int]]: + """Split pep440 version string at the post-release segment. + + Returns the release segments before the post-release and the + post-release version number (or -1 if no post-release segment is present). + """ + vc = str.split(ver, ".post") + return vc[0], int(vc[1] or 0) if len(vc) == 2 else None + + +def render_pep440_pre(pieces: Dict[str, Any]) -> str: + """TAG[.postN.devDISTANCE] -- No -dirty. + + Exceptions: + 1: no tags. 0.post0.devDISTANCE + """ + if pieces["closest-tag"]: + if pieces["distance"]: + # update the post release segment + tag_version, post_version = pep440_split_post(pieces["closest-tag"]) + rendered = tag_version + if post_version is not None: + rendered += ".post%d.dev%d" % (post_version + 1, pieces["distance"]) + else: + rendered += ".post0.dev%d" % (pieces["distance"]) + else: + # no commits, use the tag as the version + rendered = pieces["closest-tag"] + else: + # exception #1 + rendered = "0.post0.dev%d" % pieces["distance"] + return rendered + + +def render_pep440_post(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX] . + + The ".dev0" means dirty. Note that .dev0 sorts backwards + (a dirty tree will appear "older" than the corresponding clean one), + but you shouldn't be releasing software with -dirty anyways. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + return rendered + + +def render_pep440_post_branch(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]+gHEX[.dirty]] . + + The ".dev0" means not master branch. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0]+gHEX[.dirty] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += plus_or_dot(pieces) + rendered += "g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["branch"] != "master": + rendered += ".dev0" + rendered += "+g%s" % pieces["short"] + if pieces["dirty"]: + rendered += ".dirty" + return rendered + + +def render_pep440_old(pieces: Dict[str, Any]) -> str: + """TAG[.postDISTANCE[.dev0]] . + + The ".dev0" means dirty. + + Exceptions: + 1: no tags. 0.postDISTANCE[.dev0] + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"] or pieces["dirty"]: + rendered += ".post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + else: + # exception #1 + rendered = "0.post%d" % pieces["distance"] + if pieces["dirty"]: + rendered += ".dev0" + return rendered + + +def render_git_describe(pieces: Dict[str, Any]) -> str: + """TAG[-DISTANCE-gHEX][-dirty]. + + Like 'git describe --tags --dirty --always'. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + if pieces["distance"]: + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render_git_describe_long(pieces: Dict[str, Any]) -> str: + """TAG-DISTANCE-gHEX[-dirty]. + + Like 'git describe --tags --dirty --always -long'. + The distance/hash is unconditional. + + Exceptions: + 1: no tags. HEX[-dirty] (note: no 'g' prefix) + """ + if pieces["closest-tag"]: + rendered = pieces["closest-tag"] + rendered += "-%d-g%s" % (pieces["distance"], pieces["short"]) + else: + # exception #1 + rendered = pieces["short"] + if pieces["dirty"]: + rendered += "-dirty" + return rendered + + +def render(pieces: Dict[str, Any], style: str) -> Dict[str, Any]: + """Render the given version pieces into the requested style.""" + if pieces["error"]: + return {"version": "unknown", + "full-revisionid": pieces.get("long"), + "dirty": None, + "error": pieces["error"], + "date": None} + + if not style or style == "default": + style = "pep440" # the default + + if style == "pep440": + rendered = render_pep440(pieces) + elif style == "pep440-branch": + rendered = render_pep440_branch(pieces) + elif style == "pep440-pre": + rendered = render_pep440_pre(pieces) + elif style == "pep440-post": + rendered = render_pep440_post(pieces) + elif style == "pep440-post-branch": + rendered = render_pep440_post_branch(pieces) + elif style == "pep440-old": + rendered = render_pep440_old(pieces) + elif style == "git-describe": + rendered = render_git_describe(pieces) + elif style == "git-describe-long": + rendered = render_git_describe_long(pieces) + else: + raise ValueError("unknown style '%s'" % style) + + return {"version": rendered, "full-revisionid": pieces["long"], + "dirty": pieces["dirty"], "error": None, + "date": pieces.get("date")} + + +def get_versions() -> Dict[str, Any]: + """Get version information or return default if unable to do so.""" + # I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have + # __file__, we can work backwards from there to the root. Some + # py2exe/bbfreeze/non-CPython implementations don't do __file__, in which + # case we can only use expanded keywords. + + cfg = get_config() + verbose = cfg.verbose + + try: + return git_versions_from_keywords(get_keywords(), cfg.tag_prefix, + verbose) + except NotThisMethod: + pass + + try: + root = os.path.realpath(__file__) + # versionfile_source is the relative path from the top of the source + # tree (where the .git directory might live) to this file. Invert + # this to find the root from __file__. + for _ in cfg.versionfile_source.split('/'): + root = os.path.dirname(root) + except NameError: + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to find root of source tree", + "date": None} + + try: + pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose) + return render(pieces, cfg.style) + except NotThisMethod: + pass + + try: + if cfg.parentdir_prefix: + return versions_from_parentdir(cfg.parentdir_prefix, root, verbose) + except NotThisMethod: + pass + + return {"version": "0+unknown", "full-revisionid": None, + "dirty": None, + "error": "unable to compute version", "date": None} diff --git a/sdk/python/sdk/zrok/zrok/access.py b/sdk/python/sdk/zrok/zrok/access.py index 078e27e0..dc3a1dfd 100644 --- a/sdk/python/sdk/zrok/zrok/access.py +++ b/sdk/python/sdk/zrok/zrok/access.py @@ -3,6 +3,24 @@ from zrok_api.models import AccessRequest, UnaccessRequest from zrok_api.api import ShareApi from zrok import model + +class Access(): + root: Root + request: model.AccessRequest + access: model.Access + + def __init__(self, root: Root, request: model.AccessRequest): + self.root = root + self.request = request + + def __enter__(self) -> model.Access: + self.access = CreateAccess(root=self.root, request=self.request) + return self.access + + def __exit__(self, exception_type, exception_value, exception_traceback): + DeleteAccess(root=self.root, acc=self.access) + + def CreateAccess(root: Root, request: model.AccessRequest) -> model.Access: if not root.IsEnabled(): raise Exception("environment is not enabled; enable with 'zrok enable' first!") @@ -19,20 +37,21 @@ def CreateAccess(root: Root, request: model.AccessRequest) -> model.Access: except Exception as e: raise Exception("unable to create access", e) return model.Access(Token=res.frontend_token, - ShareToken=request.ShareToken, - BackendMode=res.backend_mode) + ShareToken=request.ShareToken, + BackendMode=res.backend_mode) + def DeleteAccess(root: Root, acc: model.Access): req = UnaccessRequest(frontend_token=acc.Token, shr_token=acc.ShareToken, env_zid=root.env.ZitiIdentity) - + try: zrok = root.Client() except Exception as e: raise Exception("error getting zrok client", e) - + try: ShareApi(zrok).unaccess(body=req) except Exception as e: - raise Exception("error deleting access", e) \ No newline at end of file + raise Exception("error deleting access", e) diff --git a/sdk/python/sdk/zrok/zrok/decor.py b/sdk/python/sdk/zrok/zrok/decor.py new file mode 100644 index 00000000..afaca880 --- /dev/null +++ b/sdk/python/sdk/zrok/zrok/decor.py @@ -0,0 +1,33 @@ +from dataclasses import dataclass +import openziti +from zrok.environment.root import Root + + +@dataclass +class Opts: + root: Root + shrToken: str + bindPort: int + bindHost: str = "" + + +class MonkeyPatch(openziti.monkeypatch): + def __init__(self, opts: {}, *args, **kwargs): + zif = opts['cfg'].root.ZitiIdentityNamed(opts['cfg'].root.EnvironmentIdentityName()) + cfg = dict(ztx=openziti.load(zif), service=opts['cfg'].shrToken) + super(MonkeyPatch, self).__init__(bindings={(opts['cfg'].bindHost, opts['cfg'].bindPort): cfg}, *args, **kwargs) + + def __enter__(self): + return self + + def __exit__(self, exc_type, exc_val, exc_tb): + super(MonkeyPatch, self).__exit__(exc_type, exc_val, exc_tb) + + +def zrok(opts: {}, *zargs, **zkwargs): + def zrockify_func(func): + def zrockified(*args, **kwargs): + with MonkeyPatch(opts=opts, *zargs, **zkwargs): + func(*args, **kwargs) + return zrockified + return zrockify_func diff --git a/sdk/python/sdk/zrok/zrok/dialer.py b/sdk/python/sdk/zrok/zrok/dialer.py index 37d7fdd0..adf3c632 100644 --- a/sdk/python/sdk/zrok/zrok/dialer.py +++ b/sdk/python/sdk/zrok/zrok/dialer.py @@ -2,8 +2,9 @@ from zrok.environment.root import Root import openziti from socket import SOCK_STREAM + def Dialer(shrToken: str, root: Root) -> openziti.zitisock.ZitiSocket: openziti.load(root.ZitiIdentityNamed(root.EnvironmentIdentityName())) - client = openziti.socket(type = SOCK_STREAM) + client = openziti.socket(type=SOCK_STREAM) client.connect((shrToken, 1)) - return client \ No newline at end of file + return client diff --git a/sdk/python/sdk/zrok/zrok/environment/__init__.py b/sdk/python/sdk/zrok/zrok/environment/__init__.py index da433371..c8f5e5a1 100644 --- a/sdk/python/sdk/zrok/zrok/environment/__init__.py +++ b/sdk/python/sdk/zrok/zrok/environment/__init__.py @@ -1 +1 @@ -from . import dirs, root \ No newline at end of file +from . import dirs, root # noqa diff --git a/sdk/python/sdk/zrok/zrok/environment/dirs.py b/sdk/python/sdk/zrok/zrok/environment/dirs.py index d269fddd..4b53d292 100644 --- a/sdk/python/sdk/zrok/zrok/environment/dirs.py +++ b/sdk/python/sdk/zrok/zrok/environment/dirs.py @@ -1,26 +1,32 @@ from pathlib import Path import os + def rootDir() -> str: home = str(Path.home()) return os.path.join(home, ".zrok") + def metadataFile() -> str: zrd = rootDir() return os.path.join(zrd, "metadata.json") + def configFile() -> str: zrd = rootDir() return os.path.join(zrd, "config.json") + def environmentFile() -> str: zrd = rootDir() return os.path.join(zrd, "environment.json") + def identitiesDir() -> str: zrd = rootDir() return os.path.join(zrd, "identities") + def identityFile(name: str) -> str: idd = identitiesDir() - return os.path.join(idd, name + ".json") \ No newline at end of file + return os.path.join(idd, name + ".json") diff --git a/sdk/python/sdk/zrok/zrok/environment/root.py b/sdk/python/sdk/zrok/zrok/environment/root.py index 242ef51a..65e483cb 100644 --- a/sdk/python/sdk/zrok/zrok/environment/root.py +++ b/sdk/python/sdk/zrok/zrok/environment/root.py @@ -1,6 +1,6 @@ from dataclasses import dataclass, field from typing import NamedTuple -from .dirs import * +from .dirs import identityFile, rootDir, configFile, environmentFile, metadataFile import os import json import zrok_api as zrok @@ -9,25 +9,30 @@ import re V = "v0.4" + @dataclass class Metadata: V: str = "" RootPath: str = "" + @dataclass class Config: ApiEndpoint: str = "" + @dataclass class Environment: Token: str = "" ZitiIdentity: str = "" ApiEndpoint: str = "" + class ApiEndpoint(NamedTuple): endpoint: str frm: str + @dataclass class Root: meta: Metadata = field(default_factory=Metadata) @@ -36,7 +41,7 @@ class Root: def HasConfig(self) -> bool: return self.cfg != Config() - + def Client(self) -> zrok.ApiClient: apiEndpoint = self.ApiEndpoint() @@ -45,14 +50,13 @@ class Root: cfg.api_key["x-token"] = self.env.Token cfg.api_key_prefix['Authorization'] = 'Bearer' - zrock_client = zrok.ApiClient(configuration=cfg) v = zrok.MetadataApi(zrock_client).version() # allow reported version string to be optionally prefixed with - # "refs/heads/" or "refs/tags/" + # "refs/heads/" or "refs/tags/" rxp = re.compile("^(refs/(heads|tags)/)?" + V) if not rxp.match(v): - raise Exception("Expected a '" + V + "' version, received: '" + v+ "'") + raise Exception("expected a '" + V + "' version, received: '" + v + "'") return zrock_client def ApiEndpoint(self) -> ApiEndpoint: @@ -73,25 +77,27 @@ class Root: frm = "env" return ApiEndpoint(apiEndpoint.rstrip("/"), frm) - + def IsEnabled(self) -> bool: return self.env != Environment() - + def PublicIdentityName(self) -> str: return "public" - + def EnvironmentIdentityName(self) -> str: return "environment" - + def ZitiIdentityNamed(self, name: str) -> str: return identityFile(name) + def Default() -> Root: r = Root() root = rootDir() r.meta = Metadata(V=V, RootPath=root) return r + def Assert() -> bool: exists = __rootExists() if exists: @@ -99,6 +105,7 @@ def Assert() -> bool: return meta.V == V return False + def Load() -> Root: r = Root() if __rootExists(): @@ -109,31 +116,40 @@ def Load() -> Root: r = Default() return r + def __rootExists() -> bool: mf = metadataFile() return os.path.isfile(mf) + def __assertMetadata(): pass + def __loadMetadata() -> Metadata: mf = metadataFile() with open(mf) as f: data = json.load(f) return Metadata(V=data["v"]) - + + def __loadConfig() -> Config: cf = configFile() with open(cf) as f: data = json.load(f) return Config(ApiEndpoint=data["api_endpoint"]) - + + def isEnabled() -> bool: ef = environmentFile() return os.path.isfile(ef) + def __loadEnvironment() -> Environment: ef = environmentFile() with open(ef) as f: data = json.load(f) - return Environment(Token=data["zrok_token"], ZitiIdentity=data["ziti_identity"], ApiEndpoint=data["api_endpoint"]) \ No newline at end of file + return Environment( + Token=data["zrok_token"], + ZitiIdentity=data["ziti_identity"], + ApiEndpoint=data["api_endpoint"]) diff --git a/sdk/python/sdk/zrok/zrok/listener.py b/sdk/python/sdk/zrok/zrok/listener.py index bce83cb7..5f85f58d 100644 --- a/sdk/python/sdk/zrok/zrok/listener.py +++ b/sdk/python/sdk/zrok/zrok/listener.py @@ -1,6 +1,7 @@ -from zrok.environment.root import Root import openziti - +from zrok.environment.root import Root + + class Listener(): shrToken: str root: Root @@ -9,7 +10,9 @@ class Listener(): def __init__(self, shrToken: str, root: Root): self.shrToken = shrToken self.root = root - ztx = openziti.load(self.root.ZitiIdentityNamed(self.root.EnvironmentIdentityName())) + ztx = openziti.load( + self.root.ZitiIdentityNamed( + self.root.EnvironmentIdentityName())) self.__server = ztx.bind(self.shrToken) def __enter__(self) -> openziti.zitisock.ZitiSocket: @@ -23,4 +26,4 @@ class Listener(): self.__server.listen() def close(self): - self.__server.close() \ No newline at end of file + self.__server.close() diff --git a/sdk/python/sdk/zrok/zrok/model.py b/sdk/python/sdk/zrok/zrok/model.py index 28efecca..8a835623 100644 --- a/sdk/python/sdk/zrok/zrok/model.py +++ b/sdk/python/sdk/zrok/zrok/model.py @@ -13,6 +13,7 @@ ShareMode = str PRIVATE_SHARE_MODE: ShareMode = "private" PUBLIC_SHARE_MODE: ShareMode = "public" + @dataclass class ShareRequest: BackendMode: BackendMode @@ -21,37 +22,45 @@ class ShareRequest: Frontends: list[str] = field(default_factory=list[str]) BasicAuth: list[str] = field(default_factory=list[str]) OauthProvider: str = "" - OauthEmailDomains: list[str] = field(default_factory=list[str]) + OauthEmailAddressPatterns: list[str] = field(default_factory=list[str]) OauthAuthorizationCheckInterval: str = "" + Reserved: bool = False + UniqueName: str = "" + @dataclass class Share: Token: str FrontendEndpoints: list[str] + @dataclass class AccessRequest: ShareToken: str + @dataclass class Access: Token: str ShareToken: str BackendMode: BackendMode + @dataclass class SessionMetrics: BytesRead: int BytesWritten: int LastUpdate: int + @dataclass class Metrics: Namespace: str Sessions: dict[str, SessionMetrics] + AuthScheme = str AUTH_SCHEME_NONE: AuthScheme = "none" AUTH_SCHEME_BASIC: AuthScheme = "basic" -AUTH_SCHEME_OAUTH: AuthScheme = "oauth" \ No newline at end of file +AUTH_SCHEME_OAUTH: AuthScheme = "oauth" diff --git a/sdk/python/sdk/zrok/zrok/overview.py b/sdk/python/sdk/zrok/zrok/overview.py index 1df57639..b8d64649 100644 --- a/sdk/python/sdk/zrok/zrok/overview.py +++ b/sdk/python/sdk/zrok/zrok/overview.py @@ -1,9 +1,11 @@ from zrok.environment.root import Root import urllib3 + + def Overview(root: Root) -> str: if not root.IsEnabled(): raise Exception("environment is not enabled; enable with 'zrok enable' first!") - + http = urllib3.PoolManager() apiEndpoint = root.ApiEndpoint().endpoint try: @@ -15,4 +17,4 @@ def Overview(root: Root) -> str: }) except Exception as e: raise Exception("unable to get account overview", e) - return response.data.decode('utf-8') \ No newline at end of file + return response.data.decode('utf-8') diff --git a/sdk/python/sdk/zrok/zrok/share.py b/sdk/python/sdk/zrok/zrok/share.py index 2bdc07eb..3ab2c9f7 100644 --- a/sdk/python/sdk/zrok/zrok/share.py +++ b/sdk/python/sdk/zrok/zrok/share.py @@ -3,10 +3,29 @@ from zrok_api.models import ShareRequest, UnshareRequest, AuthUser from zrok_api.api import ShareApi from zrok import model + +class Share(): + root: Root + request: model.ShareRequest + share: model.Share + + def __init__(self, root: Root, request: model.ShareRequest): + self.root = root + self.request = request + + def __enter__(self) -> model.Share: + self.share = CreateShare(root=self.root, request=self.request) + return self.share + + def __exit__(self, exception_type, exception_value, exception_traceback): + if not self.request.Reserved: + DeleteShare(root=self.root, shr=self.share) + + def CreateShare(root: Root, request: model.ShareRequest) -> model.Share: if not root.IsEnabled(): raise Exception("environment is not enabled; enable with 'zrok enable' first!") - + match request.ShareMode: case model.PRIVATE_SHARE_MODE: out = __newPrivateShare(root, request) @@ -14,7 +33,10 @@ def CreateShare(root: Root, request: model.ShareRequest) -> model.Share: out = __newPublicShare(root, request) case _: raise Exception("unknown share mode " + request.ShareMode) - + out.reserved = request.Reserved + if request.Reserved: + out.unique_name = request.UniqueName + if len(request.BasicAuth) > 0: out.auth_scheme = model.AUTH_SCHEME_BASIC for pair in request.BasicAuth: @@ -22,7 +44,7 @@ def CreateShare(root: Root, request: model.ShareRequest) -> model.Share: if len(tokens) == 2: out.auth_users.append(AuthUser(username=tokens[0].strip(), password=tokens[1].strip())) else: - raise Exception("invalid username:password pair: " + pair) + raise Exception("invalid username:password pair: " + pair) if request.OauthProvider != "": out.auth_scheme = model.AUTH_SCHEME_OAUTH @@ -35,29 +57,30 @@ def CreateShare(root: Root, request: model.ShareRequest) -> model.Share: res = ShareApi(zrok).share(body=out) except Exception as e: raise Exception("unable to create share", e) - + return model.Share(Token=res.shr_token, FrontendEndpoints=res.frontend_proxy_endpoints) - - + + def __newPrivateShare(root: Root, request: model.ShareRequest) -> ShareRequest: return ShareRequest(env_zid=root.env.ZitiIdentity, - share_mode=request.ShareMode, - backend_mode=request.BackendMode, - backend_proxy_endpoint=request.Target, - auth_scheme=model.AUTH_SCHEME_NONE - ) + share_mode=request.ShareMode, + backend_mode=request.BackendMode, + backend_proxy_endpoint=request.Target, + auth_scheme=model.AUTH_SCHEME_NONE + ) + def __newPublicShare(root: Root, request: model.ShareRequest) -> ShareRequest: - ret= ShareRequest(env_zid=root.env.ZitiIdentity, - share_mode=request.ShareMode, - frontend_selection=request.Frontends, - backend_mode=request.BackendMode, - backend_proxy_endpoint=request.Target, - auth_scheme=model.AUTH_SCHEME_NONE, - oauth_email_domains=request.OauthEmailDomains, - oauth_authorization_check_interval=request.OauthAuthorizationCheckInterval - ) + ret = ShareRequest(env_zid=root.env.ZitiIdentity, + share_mode=request.ShareMode, + frontend_selection=request.Frontends, + backend_mode=request.BackendMode, + backend_proxy_endpoint=request.Target, + auth_scheme=model.AUTH_SCHEME_NONE, + oauth_email_domains=request.OauthEmailAddressPatterns, + oauth_authorization_check_interval=request.OauthAuthorizationCheckInterval + ) if request.OauthProvider != "": ret.oauth_provider = request.OauthProvider @@ -67,13 +90,13 @@ def __newPublicShare(root: Root, request: model.ShareRequest) -> ShareRequest: def DeleteShare(root: Root, shr: model.Share): req = UnshareRequest(env_zid=root.env.ZitiIdentity, shr_token=shr.Token) - + try: zrok = root.Client() except Exception as e: raise Exception("error getting zrok client", e) - + try: ShareApi(zrok).unshare(body=req) except Exception as e: - raise Exception("error deleting share", e) \ No newline at end of file + raise Exception("error deleting share", e) diff --git a/sdk/python/sdk/zrok/zrok_api/__init__.py b/sdk/python/sdk/zrok/zrok_api/__init__.py index fd150b7b..87130c7c 100644 --- a/sdk/python/sdk/zrok/zrok_api/__init__.py +++ b/sdk/python/sdk/zrok/zrok_api/__init__.py @@ -27,6 +27,7 @@ from zrok_api.configuration import Configuration from zrok_api.models.access_request import AccessRequest from zrok_api.models.access_response import AccessResponse from zrok_api.models.auth_user import AuthUser +from zrok_api.models.change_password_request import ChangePasswordRequest from zrok_api.models.configuration import Configuration from zrok_api.models.create_frontend_request import CreateFrontendRequest from zrok_api.models.create_frontend_response import CreateFrontendResponse @@ -41,6 +42,7 @@ from zrok_api.models.error_message import ErrorMessage from zrok_api.models.frontend import Frontend from zrok_api.models.frontends import Frontends from zrok_api.models.identity_body import IdentityBody +from zrok_api.models.inline_response200 import InlineResponse200 from zrok_api.models.inline_response201 import InlineResponse201 from zrok_api.models.invite_request import InviteRequest from zrok_api.models.invite_token_generate_request import InviteTokenGenerateRequest @@ -57,6 +59,7 @@ from zrok_api.models.register_request import RegisterRequest from zrok_api.models.register_response import RegisterResponse from zrok_api.models.reset_password_request import ResetPasswordRequest from zrok_api.models.reset_password_request_body import ResetPasswordRequestBody +from zrok_api.models.reset_token_body import ResetTokenBody from zrok_api.models.share import Share from zrok_api.models.share_request import ShareRequest from zrok_api.models.share_response import ShareResponse diff --git a/sdk/python/sdk/zrok/zrok_api/api/account_api.py b/sdk/python/sdk/zrok/zrok_api/api/account_api.py index 4682090f..dac28edb 100644 --- a/sdk/python/sdk/zrok/zrok_api/api/account_api.py +++ b/sdk/python/sdk/zrok/zrok_api/api/account_api.py @@ -32,6 +32,99 @@ class AccountApi(object): api_client = ApiClient() self.api_client = api_client + def change_password(self, **kwargs): # noqa: E501 + """change_password # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.change_password(async_req=True) + >>> result = thread.get() + + :param async_req bool + :param ChangePasswordRequest body: + :return: None + If the method is called asynchronously, + returns the request thread. + """ + kwargs['_return_http_data_only'] = True + if kwargs.get('async_req'): + return self.change_password_with_http_info(**kwargs) # noqa: E501 + else: + (data) = self.change_password_with_http_info(**kwargs) # noqa: E501 + return data + + def change_password_with_http_info(self, **kwargs): # noqa: E501 + """change_password # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.change_password_with_http_info(async_req=True) + >>> result = thread.get() + + :param async_req bool + :param ChangePasswordRequest body: + :return: None + If the method is called asynchronously, + returns the request thread. + """ + + all_params = ['body'] # noqa: E501 + all_params.append('async_req') + all_params.append('_return_http_data_only') + all_params.append('_preload_content') + all_params.append('_request_timeout') + + params = locals() + for key, val in six.iteritems(params['kwargs']): + if key not in all_params: + raise TypeError( + "Got an unexpected keyword argument '%s'" + " to method change_password" % key + ) + params[key] = val + del params['kwargs'] + + collection_formats = {} + + path_params = {} + + query_params = [] + + header_params = {} + + form_params = [] + local_var_files = {} + + body_params = None + if 'body' in params: + body_params = params['body'] + # HTTP header `Accept` + header_params['Accept'] = self.api_client.select_header_accept( + ['application/zrok.v1+json']) # noqa: E501 + + # HTTP header `Content-Type` + header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 + ['application/zrok.v1+json']) # noqa: E501 + + # Authentication setting + auth_settings = ['key'] # noqa: E501 + + return self.api_client.call_api( + '/changePassword', 'POST', + path_params, + query_params, + header_params, + body=body_params, + post_params=form_params, + files=local_var_files, + response_type=None, # noqa: E501 + auth_settings=auth_settings, + async_req=params.get('async_req'), + _return_http_data_only=params.get('_return_http_data_only'), + _preload_content=params.get('_preload_content', True), + _request_timeout=params.get('_request_timeout'), + collection_formats=collection_formats) + def invite(self, **kwargs): # noqa: E501 """invite # noqa: E501 @@ -493,6 +586,99 @@ class AccountApi(object): _request_timeout=params.get('_request_timeout'), collection_formats=collection_formats) + def reset_token(self, **kwargs): # noqa: E501 + """reset_token # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.reset_token(async_req=True) + >>> result = thread.get() + + :param async_req bool + :param ResetTokenBody body: + :return: InlineResponse200 + If the method is called asynchronously, + returns the request thread. + """ + kwargs['_return_http_data_only'] = True + if kwargs.get('async_req'): + return self.reset_token_with_http_info(**kwargs) # noqa: E501 + else: + (data) = self.reset_token_with_http_info(**kwargs) # noqa: E501 + return data + + def reset_token_with_http_info(self, **kwargs): # noqa: E501 + """reset_token # noqa: E501 + + This method makes a synchronous HTTP request by default. To make an + asynchronous HTTP request, please pass async_req=True + >>> thread = api.reset_token_with_http_info(async_req=True) + >>> result = thread.get() + + :param async_req bool + :param ResetTokenBody body: + :return: InlineResponse200 + If the method is called asynchronously, + returns the request thread. + """ + + all_params = ['body'] # noqa: E501 + all_params.append('async_req') + all_params.append('_return_http_data_only') + all_params.append('_preload_content') + all_params.append('_request_timeout') + + params = locals() + for key, val in six.iteritems(params['kwargs']): + if key not in all_params: + raise TypeError( + "Got an unexpected keyword argument '%s'" + " to method reset_token" % key + ) + params[key] = val + del params['kwargs'] + + collection_formats = {} + + path_params = {} + + query_params = [] + + header_params = {} + + form_params = [] + local_var_files = {} + + body_params = None + if 'body' in params: + body_params = params['body'] + # HTTP header `Accept` + header_params['Accept'] = self.api_client.select_header_accept( + ['application/zrok.v1+json']) # noqa: E501 + + # HTTP header `Content-Type` + header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501 + ['application/zrok.v1+json']) # noqa: E501 + + # Authentication setting + auth_settings = ['key'] # noqa: E501 + + return self.api_client.call_api( + '/resetToken', 'POST', + path_params, + query_params, + header_params, + body=body_params, + post_params=form_params, + files=local_var_files, + response_type='InlineResponse200', # noqa: E501 + auth_settings=auth_settings, + async_req=params.get('async_req'), + _return_http_data_only=params.get('_return_http_data_only'), + _preload_content=params.get('_preload_content', True), + _request_timeout=params.get('_request_timeout'), + collection_formats=collection_formats) + def verify(self, **kwargs): # noqa: E501 """verify # noqa: E501 diff --git a/sdk/python/sdk/zrok/zrok_api/configuration.py b/sdk/python/sdk/zrok/zrok_api/configuration.py index 9fd585c4..35e5b1f6 100644 --- a/sdk/python/sdk/zrok/zrok_api/configuration.py +++ b/sdk/python/sdk/zrok/zrok_api/configuration.py @@ -219,9 +219,12 @@ class Configuration(six.with_metaclass(TypeWithDefault, object)): :return: The token for basic HTTP authentication. """ - return urllib3.util.make_headers( - basic_auth=self.username + ':' + self.password - ).get('authorization') + token = "" + if self.username or self.password: + token = urllib3.util.make_headers( + basic_auth=self.username + ':' + self.password + ).get('authorization') + return token def auth_settings(self): """Gets Auth Settings dict for api client. diff --git a/sdk/python/sdk/zrok/zrok_api/models/__init__.py b/sdk/python/sdk/zrok/zrok_api/models/__init__.py index dc6bd93d..b792df01 100644 --- a/sdk/python/sdk/zrok/zrok_api/models/__init__.py +++ b/sdk/python/sdk/zrok/zrok_api/models/__init__.py @@ -17,6 +17,7 @@ from __future__ import absolute_import from zrok_api.models.access_request import AccessRequest from zrok_api.models.access_response import AccessResponse from zrok_api.models.auth_user import AuthUser +from zrok_api.models.change_password_request import ChangePasswordRequest from zrok_api.models.configuration import Configuration from zrok_api.models.create_frontend_request import CreateFrontendRequest from zrok_api.models.create_frontend_response import CreateFrontendResponse @@ -31,6 +32,7 @@ from zrok_api.models.error_message import ErrorMessage from zrok_api.models.frontend import Frontend from zrok_api.models.frontends import Frontends from zrok_api.models.identity_body import IdentityBody +from zrok_api.models.inline_response200 import InlineResponse200 from zrok_api.models.inline_response201 import InlineResponse201 from zrok_api.models.invite_request import InviteRequest from zrok_api.models.invite_token_generate_request import InviteTokenGenerateRequest @@ -47,6 +49,7 @@ from zrok_api.models.register_request import RegisterRequest from zrok_api.models.register_response import RegisterResponse from zrok_api.models.reset_password_request import ResetPasswordRequest from zrok_api.models.reset_password_request_body import ResetPasswordRequestBody +from zrok_api.models.reset_token_body import ResetTokenBody from zrok_api.models.share import Share from zrok_api.models.share_request import ShareRequest from zrok_api.models.share_response import ShareResponse diff --git a/sdk/python/sdk/zrok/zrok_api/models/change_password_request.py b/sdk/python/sdk/zrok/zrok_api/models/change_password_request.py new file mode 100644 index 00000000..fe3f4862 --- /dev/null +++ b/sdk/python/sdk/zrok/zrok_api/models/change_password_request.py @@ -0,0 +1,162 @@ +# coding: utf-8 + +""" + zrok + + zrok client access # noqa: E501 + + OpenAPI spec version: 0.3.0 + + Generated by: https://github.com/swagger-api/swagger-codegen.git +""" + +import pprint +import re # noqa: F401 + +import six + +class ChangePasswordRequest(object): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + """ + Attributes: + swagger_types (dict): The key is attribute name + and the value is attribute type. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + """ + swagger_types = { + 'email': 'str', + 'old_password': 'str', + 'new_password': 'str' + } + + attribute_map = { + 'email': 'email', + 'old_password': 'oldPassword', + 'new_password': 'newPassword' + } + + def __init__(self, email=None, old_password=None, new_password=None): # noqa: E501 + """ChangePasswordRequest - a model defined in Swagger""" # noqa: E501 + self._email = None + self._old_password = None + self._new_password = None + self.discriminator = None + if email is not None: + self.email = email + if old_password is not None: + self.old_password = old_password + if new_password is not None: + self.new_password = new_password + + @property + def email(self): + """Gets the email of this ChangePasswordRequest. # noqa: E501 + + + :return: The email of this ChangePasswordRequest. # noqa: E501 + :rtype: str + """ + return self._email + + @email.setter + def email(self, email): + """Sets the email of this ChangePasswordRequest. + + + :param email: The email of this ChangePasswordRequest. # noqa: E501 + :type: str + """ + + self._email = email + + @property + def old_password(self): + """Gets the old_password of this ChangePasswordRequest. # noqa: E501 + + + :return: The old_password of this ChangePasswordRequest. # noqa: E501 + :rtype: str + """ + return self._old_password + + @old_password.setter + def old_password(self, old_password): + """Sets the old_password of this ChangePasswordRequest. + + + :param old_password: The old_password of this ChangePasswordRequest. # noqa: E501 + :type: str + """ + + self._old_password = old_password + + @property + def new_password(self): + """Gets the new_password of this ChangePasswordRequest. # noqa: E501 + + + :return: The new_password of this ChangePasswordRequest. # noqa: E501 + :rtype: str + """ + return self._new_password + + @new_password.setter + def new_password(self, new_password): + """Sets the new_password of this ChangePasswordRequest. + + + :param new_password: The new_password of this ChangePasswordRequest. # noqa: E501 + :type: str + """ + + self._new_password = new_password + + def to_dict(self): + """Returns the model properties as a dict""" + result = {} + + for attr, _ in six.iteritems(self.swagger_types): + value = getattr(self, attr) + if isinstance(value, list): + result[attr] = list(map( + lambda x: x.to_dict() if hasattr(x, "to_dict") else x, + value + )) + elif hasattr(value, "to_dict"): + result[attr] = value.to_dict() + elif isinstance(value, dict): + result[attr] = dict(map( + lambda item: (item[0], item[1].to_dict()) + if hasattr(item[1], "to_dict") else item, + value.items() + )) + else: + result[attr] = value + if issubclass(ChangePasswordRequest, dict): + for key, value in self.items(): + result[key] = value + + return result + + def to_str(self): + """Returns the string representation of the model""" + return pprint.pformat(self.to_dict()) + + def __repr__(self): + """For `print` and `pprint`""" + return self.to_str() + + def __eq__(self, other): + """Returns true if both objects are equal""" + if not isinstance(other, ChangePasswordRequest): + return False + + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Returns true if both objects are not equal""" + return not self == other diff --git a/sdk/python/sdk/zrok/zrok_api/models/frontend.py b/sdk/python/sdk/zrok/zrok_api/models/frontend.py index fe85cc8c..f45b26a3 100644 --- a/sdk/python/sdk/zrok/zrok_api/models/frontend.py +++ b/sdk/python/sdk/zrok/zrok_api/models/frontend.py @@ -29,6 +29,7 @@ class Frontend(object): """ swagger_types = { 'id': 'int', + 'token': 'str', 'shr_token': 'str', 'z_id': 'str', 'created_at': 'int', @@ -37,15 +38,17 @@ class Frontend(object): attribute_map = { 'id': 'id', + 'token': 'token', 'shr_token': 'shrToken', 'z_id': 'zId', 'created_at': 'createdAt', 'updated_at': 'updatedAt' } - def __init__(self, id=None, shr_token=None, z_id=None, created_at=None, updated_at=None): # noqa: E501 + def __init__(self, id=None, token=None, shr_token=None, z_id=None, created_at=None, updated_at=None): # noqa: E501 """Frontend - a model defined in Swagger""" # noqa: E501 self._id = None + self._token = None self._shr_token = None self._z_id = None self._created_at = None @@ -53,6 +56,8 @@ class Frontend(object): self.discriminator = None if id is not None: self.id = id + if token is not None: + self.token = token if shr_token is not None: self.shr_token = shr_token if z_id is not None: @@ -83,6 +88,27 @@ class Frontend(object): self._id = id + @property + def token(self): + """Gets the token of this Frontend. # noqa: E501 + + + :return: The token of this Frontend. # noqa: E501 + :rtype: str + """ + return self._token + + @token.setter + def token(self, token): + """Sets the token of this Frontend. + + + :param token: The token of this Frontend. # noqa: E501 + :type: str + """ + + self._token = token + @property def shr_token(self): """Gets the shr_token of this Frontend. # noqa: E501 diff --git a/sdk/python/sdk/zrok/zrok_api/models/inline_response200.py b/sdk/python/sdk/zrok/zrok_api/models/inline_response200.py new file mode 100644 index 00000000..a972a3fb --- /dev/null +++ b/sdk/python/sdk/zrok/zrok_api/models/inline_response200.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + zrok + + zrok client access # noqa: E501 + + OpenAPI spec version: 0.3.0 + + Generated by: https://github.com/swagger-api/swagger-codegen.git +""" + +import pprint +import re # noqa: F401 + +import six + +class InlineResponse200(object): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + """ + Attributes: + swagger_types (dict): The key is attribute name + and the value is attribute type. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + """ + swagger_types = { + 'token': 'str' + } + + attribute_map = { + 'token': 'token' + } + + def __init__(self, token=None): # noqa: E501 + """InlineResponse200 - a model defined in Swagger""" # noqa: E501 + self._token = None + self.discriminator = None + if token is not None: + self.token = token + + @property + def token(self): + """Gets the token of this InlineResponse200. # noqa: E501 + + + :return: The token of this InlineResponse200. # noqa: E501 + :rtype: str + """ + return self._token + + @token.setter + def token(self, token): + """Sets the token of this InlineResponse200. + + + :param token: The token of this InlineResponse200. # noqa: E501 + :type: str + """ + + self._token = token + + def to_dict(self): + """Returns the model properties as a dict""" + result = {} + + for attr, _ in six.iteritems(self.swagger_types): + value = getattr(self, attr) + if isinstance(value, list): + result[attr] = list(map( + lambda x: x.to_dict() if hasattr(x, "to_dict") else x, + value + )) + elif hasattr(value, "to_dict"): + result[attr] = value.to_dict() + elif isinstance(value, dict): + result[attr] = dict(map( + lambda item: (item[0], item[1].to_dict()) + if hasattr(item[1], "to_dict") else item, + value.items() + )) + else: + result[attr] = value + if issubclass(InlineResponse200, dict): + for key, value in self.items(): + result[key] = value + + return result + + def to_str(self): + """Returns the string representation of the model""" + return pprint.pformat(self.to_dict()) + + def __repr__(self): + """For `print` and `pprint`""" + return self.to_str() + + def __eq__(self, other): + """Returns true if both objects are equal""" + if not isinstance(other, InlineResponse200): + return False + + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Returns true if both objects are not equal""" + return not self == other diff --git a/sdk/python/sdk/zrok/zrok_api/models/reset_token_body.py b/sdk/python/sdk/zrok/zrok_api/models/reset_token_body.py new file mode 100644 index 00000000..a7004ed3 --- /dev/null +++ b/sdk/python/sdk/zrok/zrok_api/models/reset_token_body.py @@ -0,0 +1,110 @@ +# coding: utf-8 + +""" + zrok + + zrok client access # noqa: E501 + + OpenAPI spec version: 0.3.0 + + Generated by: https://github.com/swagger-api/swagger-codegen.git +""" + +import pprint +import re # noqa: F401 + +import six + +class ResetTokenBody(object): + """NOTE: This class is auto generated by the swagger code generator program. + + Do not edit the class manually. + """ + """ + Attributes: + swagger_types (dict): The key is attribute name + and the value is attribute type. + attribute_map (dict): The key is attribute name + and the value is json key in definition. + """ + swagger_types = { + 'email_address': 'str' + } + + attribute_map = { + 'email_address': 'emailAddress' + } + + def __init__(self, email_address=None): # noqa: E501 + """ResetTokenBody - a model defined in Swagger""" # noqa: E501 + self._email_address = None + self.discriminator = None + if email_address is not None: + self.email_address = email_address + + @property + def email_address(self): + """Gets the email_address of this ResetTokenBody. # noqa: E501 + + + :return: The email_address of this ResetTokenBody. # noqa: E501 + :rtype: str + """ + return self._email_address + + @email_address.setter + def email_address(self, email_address): + """Sets the email_address of this ResetTokenBody. + + + :param email_address: The email_address of this ResetTokenBody. # noqa: E501 + :type: str + """ + + self._email_address = email_address + + def to_dict(self): + """Returns the model properties as a dict""" + result = {} + + for attr, _ in six.iteritems(self.swagger_types): + value = getattr(self, attr) + if isinstance(value, list): + result[attr] = list(map( + lambda x: x.to_dict() if hasattr(x, "to_dict") else x, + value + )) + elif hasattr(value, "to_dict"): + result[attr] = value.to_dict() + elif isinstance(value, dict): + result[attr] = dict(map( + lambda item: (item[0], item[1].to_dict()) + if hasattr(item[1], "to_dict") else item, + value.items() + )) + else: + result[attr] = value + if issubclass(ResetTokenBody, dict): + for key, value in self.items(): + result[key] = value + + return result + + def to_str(self): + """Returns the string representation of the model""" + return pprint.pformat(self.to_dict()) + + def __repr__(self): + """For `print` and `pprint`""" + return self.to_str() + + def __eq__(self, other): + """Returns true if both objects are equal""" + if not isinstance(other, ResetTokenBody): + return False + + return self.__dict__ == other.__dict__ + + def __ne__(self, other): + """Returns true if both objects are not equal""" + return not self == other diff --git a/sdk/python/sdk/zrok/zrok_api/models/share_request.py b/sdk/python/sdk/zrok/zrok_api/models/share_request.py index fdbc7477..331fa41a 100644 --- a/sdk/python/sdk/zrok/zrok_api/models/share_request.py +++ b/sdk/python/sdk/zrok/zrok_api/models/share_request.py @@ -38,7 +38,8 @@ class ShareRequest(object): 'oauth_provider': 'str', 'oauth_email_domains': 'list[str]', 'oauth_authorization_check_interval': 'str', - 'reserved': 'bool' + 'reserved': 'bool', + 'unique_name': 'str' } attribute_map = { @@ -52,10 +53,11 @@ class ShareRequest(object): 'oauth_provider': 'oauthProvider', 'oauth_email_domains': 'oauthEmailDomains', 'oauth_authorization_check_interval': 'oauthAuthorizationCheckInterval', - 'reserved': 'reserved' + 'reserved': 'reserved', + 'unique_name': 'uniqueName' } - def __init__(self, env_zid=None, share_mode=None, frontend_selection=None, backend_mode=None, backend_proxy_endpoint=None, auth_scheme=None, auth_users=None, oauth_provider=None, oauth_email_domains=None, oauth_authorization_check_interval=None, reserved=None): # noqa: E501 + def __init__(self, env_zid=None, share_mode=None, frontend_selection=None, backend_mode=None, backend_proxy_endpoint=None, auth_scheme=None, auth_users=None, oauth_provider=None, oauth_email_domains=None, oauth_authorization_check_interval=None, reserved=None, unique_name=None): # noqa: E501 """ShareRequest - a model defined in Swagger""" # noqa: E501 self._env_zid = None self._share_mode = None @@ -68,6 +70,7 @@ class ShareRequest(object): self._oauth_email_domains = None self._oauth_authorization_check_interval = None self._reserved = None + self._unique_name = None self.discriminator = None if env_zid is not None: self.env_zid = env_zid @@ -91,6 +94,8 @@ class ShareRequest(object): self.oauth_authorization_check_interval = oauth_authorization_check_interval if reserved is not None: self.reserved = reserved + if unique_name is not None: + self.unique_name = unique_name @property def env_zid(self): @@ -179,7 +184,7 @@ class ShareRequest(object): :param backend_mode: The backend_mode of this ShareRequest. # noqa: E501 :type: str """ - allowed_values = ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy"] # noqa: E501 + allowed_values = ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive", "socks"] # noqa: E501 if backend_mode not in allowed_values: raise ValueError( "Invalid value for `backend_mode` ({0}), must be one of {1}" # noqa: E501 @@ -341,6 +346,27 @@ class ShareRequest(object): self._reserved = reserved + @property + def unique_name(self): + """Gets the unique_name of this ShareRequest. # noqa: E501 + + + :return: The unique_name of this ShareRequest. # noqa: E501 + :rtype: str + """ + return self._unique_name + + @unique_name.setter + def unique_name(self, unique_name): + """Sets the unique_name of this ShareRequest. + + + :param unique_name: The unique_name of this ShareRequest. # noqa: E501 + :type: str + """ + + self._unique_name = unique_name + def to_dict(self): """Returns the model properties as a dict""" result = {} diff --git a/sdk/python/sdk/zrok/zrok_api/rest.py b/sdk/python/sdk/zrok/zrok_api/rest.py index 21bc091b..311a61cb 100644 --- a/sdk/python/sdk/zrok/zrok_api/rest.py +++ b/sdk/python/sdk/zrok/zrok_api/rest.py @@ -42,11 +42,11 @@ class RESTResponse(io.IOBase): def getheaders(self): """Returns a dictionary of the response headers.""" - return self.urllib3_response.getheaders() + return self.urllib3_response.headers def getheader(self, name, default=None): """Returns a given response header.""" - return self.urllib3_response.getheader(name, default) + return self.urllib3_response.headers.get(name, default) class RESTClientObject(object): diff --git a/specs/zrok.yml b/specs/zrok.yml index 1f5ebeb8..74b522bb 100644 --- a/specs/zrok.yml +++ b/specs/zrok.yml @@ -15,6 +15,32 @@ paths: # # account # + /changePassword: + post: + tags: + - account + security: + - key: [] + operationId: changePassword + parameters: + - name: body + in: body + schema: + $ref: '#/definitions/changePasswordRequest' + responses: + 200: + description: changed password + 400: + description: password not changed + 401: + description: unauthorized + 422: + description: password validation failure + schema: + $ref: '#/definitions/errorMessage' + 500: + description: internal server error + /invite: post: tags: @@ -121,6 +147,32 @@ paths: 500: description: internal server error + /resetToken: + post: + tags: + - account + security: + - key: [] + operationId: resetToken + parameters: + - name: body + in: body + schema: + properties: + emailAddress: + type: string + responses: + 200: + description: token reset + schema: + properties: + token: + type: string + 404: + description: account not found + 500: + description: internal server error + /verify: post: tags: @@ -577,6 +629,8 @@ paths: description: unauthorized 404: description: not found + 409: + description: conflict 422: description: unprocessable 500: @@ -675,6 +729,16 @@ definitions: password: type: string + changePasswordRequest: + type: object + properties: + email: + type: string + oldPassword: + type: string + newPassword: + type: string + configuration: type: object properties: @@ -778,6 +842,8 @@ definitions: properties: id: type: integer + token: + type: string shrToken: type: string zId: @@ -971,7 +1037,7 @@ definitions: type: string backendMode: type: string - enum: ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive"] + enum: ["proxy", "web", "tcpTunnel", "udpTunnel", "caddy", "drive", "socks"] backendProxyEndpoint: type: string authScheme: @@ -991,6 +1057,8 @@ definitions: type: string reserved: type: boolean + uniqueName: + type: string shareResponse: type: object diff --git a/ui/package-lock.json b/ui/package-lock.json index f391254f..41e6f732 100644 --- a/ui/package-lock.json +++ b/ui/package-lock.json @@ -6197,9 +6197,9 @@ } }, "node_modules/caniuse-lite": { - "version": "1.0.30001519", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001519.tgz", - "integrity": "sha512-0QHgqR+Jv4bxHMp8kZ1Kn8CH55OikjKJ6JmKkZYP1F3D7w+lnFXF70nG5eNfsZS89jadi5Ywy5UCSKLAglIRkg==", + "version": "1.0.30001587", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001587.tgz", + "integrity": "sha512-HMFNotUmLXn71BQxg8cijvqxnIAofforZOwGsxyXJ0qugTdspUF4sPSJ2vhgprHCB996tIDzEq1ubumPDV8ULA==", "funding": [ { "type": "opencollective", @@ -24422,9 +24422,9 @@ } }, "caniuse-lite": { - "version": "1.0.30001519", - "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001519.tgz", - "integrity": "sha512-0QHgqR+Jv4bxHMp8kZ1Kn8CH55OikjKJ6JmKkZYP1F3D7w+lnFXF70nG5eNfsZS89jadi5Ywy5UCSKLAglIRkg==" + "version": "1.0.30001587", + "resolved": "https://registry.npmjs.org/caniuse-lite/-/caniuse-lite-1.0.30001587.tgz", + "integrity": "sha512-HMFNotUmLXn71BQxg8cijvqxnIAofforZOwGsxyXJ0qugTdspUF4sPSJ2vhgprHCB996tIDzEq1ubumPDV8ULA==" }, "canvas-color-tracker": { "version": "1.1.6", diff --git a/ui/src/App.js b/ui/src/App.js index aed64f78..a7d14e2d 100644 --- a/ui/src/App.js +++ b/ui/src/App.js @@ -9,12 +9,21 @@ const App = () => { const [user, setUser] = useState(); useEffect(() => { - const localUser = localStorage.getItem("user"); - if(localUser) { - setUser(JSON.parse(localUser)); - console.log("reloaded user", localUser); + function checkUserData() { + const localUser = localStorage.getItem("user"); + if(localUser) { + console.log(localUser) + setUser(JSON.parse(localUser)); + console.log("reloaded user", localUser); + } } - }, []); + + document.addEventListener('storage', checkUserData) + + return () => { + document.removeEventListener('storage', checkUserData) + } + }, []); const logout = () => { setUser(null); diff --git a/ui/src/api/account.js b/ui/src/api/account.js new file mode 100644 index 00000000..81447ad0 --- /dev/null +++ b/ui/src/api/account.js @@ -0,0 +1,181 @@ +/** @module account */ +// Auto-generated, edits will be overwritten +import * as gateway from './gateway' + +/** + * @param {object} options Optional options + * @param {module:types.changePasswordRequest} [options.body] + * @return {Promise} changed password + */ +export function changePassword(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(changePasswordOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {module:types.inviteRequest} [options.body] + * @return {Promise} invitation created + */ +export function invite(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(inviteOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {module:types.loginRequest} [options.body] + * @return {Promise} login successful + */ +export function login(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(loginOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {module:types.registerRequest} [options.body] + * @return {Promise} account created + */ +export function register(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(registerOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {module:types.resetPasswordRequest} [options.body] + * @return {Promise} password reset + */ +export function resetPassword(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(resetPasswordOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {object} [options.body] + * @return {Promise} forgot password request created + */ +export function resetPasswordRequest(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(resetPasswordRequestOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {object} [options.body] + * @return {Promise} token reset + */ +export function resetToken(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(resetTokenOperation, parameters) +} + +/** + * @param {object} options Optional options + * @param {module:types.verifyRequest} [options.body] + * @return {Promise} token ready + */ +export function verify(options) { + if (!options) options = {} + const parameters = { + body: { + body: options.body + } + } + return gateway.request(verifyOperation, parameters) +} + +const changePasswordOperation = { + path: '/changePassword', + contentTypes: ['application/zrok.v1+json'], + method: 'post', + security: [ + { + id: 'key' + } + ] +} + +const inviteOperation = { + path: '/invite', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} + +const loginOperation = { + path: '/login', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} + +const registerOperation = { + path: '/register', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} + +const resetPasswordOperation = { + path: '/resetPassword', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} + +const resetPasswordRequestOperation = { + path: '/resetPasswordRequest', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} + +const resetTokenOperation = { + path: '/resetToken', + contentTypes: ['application/zrok.v1+json'], + method: 'post', + security: [ + { + id: 'key' + } + ] +} + +const verifyOperation = { + path: '/verify', + contentTypes: ['application/zrok.v1+json'], + method: 'post' +} diff --git a/ui/src/api/types.js b/ui/src/api/types.js new file mode 100644 index 00000000..bcadc5f9 --- /dev/null +++ b/ui/src/api/types.js @@ -0,0 +1,336 @@ +/** @module types */ +// Auto-generated, edits will be overwritten + +/** + * @typedef accessRequest + * @memberof module:types + * + * @property {string} envZId + * @property {string} shrToken + */ + +/** + * @typedef accessResponse + * @memberof module:types + * + * @property {string} frontendToken + * @property {string} backendMode + */ + +/** + * @typedef authUser + * @memberof module:types + * + * @property {string} username + * @property {string} password + */ + +/** + * @typedef changePasswordRequest + * @memberof module:types + * + * @property {string} email + * @property {string} oldPassword + * @property {string} newPassword + */ + +/** + * @typedef configuration + * @memberof module:types + * + * @property {string} version + * @property {string} touLink + * @property {boolean} invitesOpen + * @property {boolean} requiresInviteToken + * @property {string} inviteTokenContact + * @property {module:types.passwordRequirements} passwordRequirements + */ + +/** + * @typedef createFrontendRequest + * @memberof module:types + * + * @property {string} zId + * @property {string} url_template + * @property {string} public_name + */ + +/** + * @typedef createFrontendResponse + * @memberof module:types + * + * @property {string} token + */ + +/** + * @typedef deleteFrontendRequest + * @memberof module:types + * + * @property {string} frontendToken + */ + +/** + * @typedef disableRequest + * @memberof module:types + * + * @property {string} identity + */ + +/** + * @typedef enableRequest + * @memberof module:types + * + * @property {string} description + * @property {string} host + */ + +/** + * @typedef enableResponse + * @memberof module:types + * + * @property {string} identity + * @property {string} cfg + */ + +/** + * @typedef environment + * @memberof module:types + * + * @property {string} description + * @property {string} host + * @property {string} address + * @property {string} zId + * @property {module:types.sparkData} activity + * @property {boolean} limited + * @property {number} createdAt + * @property {number} updatedAt + */ + +/** + * @typedef environmentAndResources + * @memberof module:types + * + * @property {module:types.environment} environment + * @property {module:types.frontends} frontends + * @property {module:types.shares} shares + */ + +/** + * @typedef frontend + * @memberof module:types + * + * @property {number} id + * @property {string} token + * @property {string} shrToken + * @property {string} zId + * @property {number} createdAt + * @property {number} updatedAt + */ + +/** + * @typedef inviteTokenGenerateRequest + * @memberof module:types + * + * @property {string[]} tokens + */ + +/** + * @typedef inviteRequest + * @memberof module:types + * + * @property {string} email + * @property {string} token + */ + +/** + * @typedef loginRequest + * @memberof module:types + * + * @property {string} email + * @property {string} password + */ + +/** + * @typedef metrics + * @memberof module:types + * + * @property {string} scope + * @property {string} id + * @property {number} period + * @property {module:types.metricsSample[]} samples + */ + +/** + * @typedef metricsSample + * @memberof module:types + * + * @property {number} rx + * @property {number} tx + * @property {number} timestamp + */ + +/** + * @typedef overview + * @memberof module:types + * + * @property {boolean} accountLimited + * @property {module:types.environmentAndResources[]} environments + */ + +/** + * @typedef passwordRequirements + * @memberof module:types + * + * @property {number} length + * @property {boolean} requireCapital + * @property {boolean} requireNumeric + * @property {boolean} requireSpecial + * @property {string} validSpecialCharacters + */ + +/** + * @typedef principal + * @memberof module:types + * + * @property {number} id + * @property {string} email + * @property {string} token + * @property {boolean} limitless + * @property {boolean} admin + */ + +/** + * @typedef publicFrontend + * @memberof module:types + * + * @property {string} token + * @property {string} zId + * @property {string} urlTemplate + * @property {string} publicName + * @property {number} createdAt + * @property {number} updatedAt + */ + +/** + * @typedef registerRequest + * @memberof module:types + * + * @property {string} token + * @property {string} password + */ + +/** + * @typedef registerResponse + * @memberof module:types + * + * @property {string} token + */ + +/** + * @typedef resetPasswordRequest + * @memberof module:types + * + * @property {string} token + * @property {string} password + */ + +/** + * @typedef share + * @memberof module:types + * + * @property {string} token + * @property {string} zId + * @property {string} shareMode + * @property {string} backendMode + * @property {string} frontendSelection + * @property {string} frontendEndpoint + * @property {string} backendProxyEndpoint + * @property {boolean} reserved + * @property {module:types.sparkData} activity + * @property {boolean} limited + * @property {number} createdAt + * @property {number} updatedAt + */ + +/** + * @typedef shareRequest + * @memberof module:types + * + * @property {string} envZId + * @property {string} shareMode + * @property {string[]} frontendSelection + * @property {string} backendMode + * @property {string} backendProxyEndpoint + * @property {string} authScheme + * @property {module:types.authUser[]} authUsers + * @property {string} oauthProvider + * @property {string[]} oauthEmailDomains + * @property {string} oauthAuthorizationCheckInterval + * @property {boolean} reserved + * @property {string} uniqueName + */ + +/** + * @typedef shareResponse + * @memberof module:types + * + * @property {string[]} frontendProxyEndpoints + * @property {string} shrToken + */ + +/** + * @typedef sparkDataSample + * @memberof module:types + * + * @property {number} rx + * @property {number} tx + */ + +/** + * @typedef unaccessRequest + * @memberof module:types + * + * @property {string} frontendToken + * @property {string} envZId + * @property {string} shrToken + */ + +/** + * @typedef unshareRequest + * @memberof module:types + * + * @property {string} envZId + * @property {string} shrToken + * @property {boolean} reserved + */ + +/** + * @typedef updateFrontendRequest + * @memberof module:types + * + * @property {string} frontendToken + * @property {string} publicName + * @property {string} urlTemplate + */ + +/** + * @typedef updateShareRequest + * @memberof module:types + * + * @property {string} shrToken + * @property {string} backendProxyEndpoint + */ + +/** + * @typedef verifyRequest + * @memberof module:types + * + * @property {string} token + */ + +/** + * @typedef verifyResponse + * @memberof module:types + * + * @property {string} email + */ diff --git a/ui/src/components/password.js b/ui/src/components/password.js index 5e83d251..2051433e 100644 --- a/ui/src/components/password.js +++ b/ui/src/components/password.js @@ -45,28 +45,28 @@ const PasswordForm = (props) => { { (props.passwordLength > 0 || props.passwordRequireCapital || props.passwordRequireNumeric || props.passwordRequireSpecial) && -

Password Requirements

+

Password Requirements

} { props.passwordLength > 0 && - Minimum Length of {props.passwordLength} + Minimum Length of {props.passwordLength} } { props.passwordRequireCapital && - Requires at least 1 Capital Letter + Requires at least 1 Capital Letter } { props.passwordRequireNumeric && - Requires at least 1 Digit + Requires at least 1 Digit } { props.passwordRequireSpecial && - Requires at least 1 Special Character - {props.passwordValidSpecialCharacters.split("").join(" ")} + Requires at least 1 Special Character + {props.passwordValidSpecialCharacters.split("").join(" ")} } - + { if(row.property.endsWith("At")) { return new Date(row.value).toLocaleString(); } - return row.value; + return row.value.toString(); }; const PropertyTable = (props) => { diff --git a/ui/src/console/detail/access/AccessDetail.js b/ui/src/console/detail/access/AccessDetail.js index 0d2a031e..c2b3e5da 100644 --- a/ui/src/console/detail/access/AccessDetail.js +++ b/ui/src/console/detail/access/AccessDetail.js @@ -4,6 +4,7 @@ import {useEffect, useState} from "react"; import {MetadataApi} from "../../../api/src"; import {Tab, Tabs} from "react-bootstrap"; import DetailTab from "./DetailTab"; +import ActionsTab from "./ActionsTab"; const AccessDetail = (props) => { const [detail, setDetail] = useState({}); @@ -18,11 +19,14 @@ const AccessDetail = (props) => { return (
-

{" "}{detail.shrToken} ({detail.id})

+

{" "}{detail.token}

+ + +
); diff --git a/ui/src/console/detail/access/ActionsTab.js b/ui/src/console/detail/access/ActionsTab.js new file mode 100644 index 00000000..37b978a5 --- /dev/null +++ b/ui/src/console/detail/access/ActionsTab.js @@ -0,0 +1,27 @@ +import * as share from "../../../api/share"; +import {Button} from "react-bootstrap"; + +const ActionsTab = (props) => { + const deleteFrontend = (feToken, shrToken, envZId) => { + if(window.confirm("Really delete access frontend '" + feToken + "' for share '" + shrToken + "'?")) { + share.unaccess({body: {frontendToken: feToken, shrToken: shrToken, envZId: envZId}}).then(resp => { + console.log(resp); + }); + } + } + + return ( +
+

Delete your access frontend '{props.frontend.token}' for share '{props.frontend.shrToken}'?

+

+ This will remove your zrok access frontend from this environment. You will still need to + terminate the corresponding zrok access process in your local environment. +

+ +
+ ); +}; + +export default ActionsTab; \ No newline at end of file diff --git a/ui/src/console/detail/account/AccountDetail.js b/ui/src/console/detail/account/AccountDetail.js index 5ca50391..07736825 100644 --- a/ui/src/console/detail/account/AccountDetail.js +++ b/ui/src/console/detail/account/AccountDetail.js @@ -6,6 +6,7 @@ import SecretToggle from "../../SecretToggle"; import React from "react"; import MetricsTab from "./MetricsTab"; import EnvironmentsTab from "./EnvironmentsTab"; +import ActionsTab from "./ActionsTab"; const AccountDetail = (props) => { const customProperties = { @@ -25,6 +26,9 @@ const AccountDetail = (props) => { + + + ); diff --git a/ui/src/console/detail/account/ActionsTab.js b/ui/src/console/detail/account/ActionsTab.js new file mode 100644 index 00000000..cf825dbe --- /dev/null +++ b/ui/src/console/detail/account/ActionsTab.js @@ -0,0 +1,50 @@ +import React, {useState} from "react"; +import ChangePassword from "./actions/ChangePassword"; +import {Button} from "react-bootstrap"; +import RegenerateToken from "./actions/RegenerateToken"; + +const ActionsTab = (props) => { + const [showRegenerateTokenModal, setShowRegenerateTokenModal] = useState(false); + const openRegenerateTokenModal = () => setShowRegenerateTokenModal(true); + const closeRegenerateTokenModal = () => setShowRegenerateTokenModal(false); + + const [showChangePasswordModal, setShowChangePasswordModal] = useState(false); + const openChangePasswordModal = () => setShowChangePasswordModal(true); + const closeChangePasswordModal = () => setShowChangePasswordModal(false); + + return ( +
+
+

Change Password?

+

Change your password here. Note that this will not log you out of any already logged in sessions.

+ + +
+ +
+ +
+

Regenerate your account token (DANGER!)?

+

+ Regenerating your account token will stop all environments and shares from operating properly! +

+

+ You will need to manually edit your + ${HOME}/.zrok/environment.json files (in each environment) to use the new + zrok_token . Updating these files will restore the functionality of your environments. +

+

+ Alternatively, you can just zrok disable any enabled environments and re-enable using the + new account token. Running zrok disable will delete your environments and + any shares they contain (including reserved shares). So if you have environments and reserved shares you + need to preserve, your best bet is to update the zrok_token in those environments as + described above. +

+ + +
+
+ ) +} + +export default ActionsTab; \ No newline at end of file diff --git a/ui/src/console/detail/account/actions/ChangePassword.js b/ui/src/console/detail/account/actions/ChangePassword.js new file mode 100644 index 00000000..d0dab991 --- /dev/null +++ b/ui/src/console/detail/account/actions/ChangePassword.js @@ -0,0 +1,142 @@ +import React, {useEffect, useState} from "react"; +import * as account from "../../../../api/account"; +import {Button, Container, Form, Row} from "react-bootstrap"; +import Modal from "react-bootstrap/Modal"; +import * as metadata from "../../../../api/metadata"; + +const validatePassword = (password, l, rc, rn, rs, spc, cb) => { + if(password.length < l) { + cb(false, "Entered password is too short! (" + l + " characters minimum)!"); + return; + } + if(rc && !/[A-Z]/.test(password)) { + cb(false, "Entered password requires a capital letter!"); + return; + } + if(rn && !/\d/.test(password)) { + cb(false, "Entered password requires a digit!"); + return; + } + if(rs) { + if(!spc.split("").some(v => password.includes(v))) { + cb(false, "Entered password requires a special character!"); + return; + } + } + return cb(true, ""); +} + +const ChangePassword = (props) => { + const [oldPassword, setOldPassword] = useState(''); + const [newPassword, setNewPassword] = useState(''); + const [confirmPassword, setConfirmPassword] = useState(''); + const [message, setMessage] = useState(''); + + const [passwordLength, setPasswordLength] = useState(8); + const [passwordRequireCapital, setPasswordRequireCapital] = useState(true); + const [passwordRequireNumeric, setPasswordRequireNumeric] = useState(true); + const [passwordRequireSpecial, setPasswordRequireSpecial] = useState(true); + const [passwordValidSpecialCharacters, setPasswordValidSpecialCharacters] = useState(""); + + useEffect(() => { + metadata.configuration().then(resp => { + if (!resp.error) { + setPasswordLength(resp.data.passwordRequirements.length) + setPasswordRequireCapital(resp.data.passwordRequirements.requireCapital) + setPasswordRequireNumeric(resp.data.passwordRequirements.requireNumeric) + setPasswordRequireSpecial(resp.data.passwordRequirements.requireSpecial) + setPasswordValidSpecialCharacters(resp.data.passwordRequirements.validSpecialCharacters) + } + }).catch(err => { + console.log("error getting configuration", err); + }); + }, []) + + const handleSubmit = async e => { + e.preventDefault(); + + let ok = false; + validatePassword(newPassword, + passwordLength, + passwordRequireCapital, + passwordRequireNumeric, + passwordRequireSpecial, + passwordValidSpecialCharacters, (isOk, msg) => { ok = isOk; setMessage(msg); }) + if(!ok) { + return; + } + + if(confirmPassword !== newPassword) { + setMessage("New password and confirmation do not match!"); + return; + } + + account.changePassword({ body: { oldPassword: oldPassword, newPassword: newPassword, email: props.user.email } }) + .then(resp => { + if (!resp.error) { + console.log("resp", resp) + setMessage("Password successfully changed!"); + } else { + setMessage("Failure changing password! Is old password correct?"); + } + }).catch(resp => { + console.log("resp", resp) + setMessage("Failure changing password! Is old password correct?") + }) + + } + + let hide = () => { + props.onHide(); + setMessage(""); + setOldPassword(""); + setNewPassword(""); + setConfirmPassword(""); + } + + return ( + + Change Password + +
+ + + { setMessage(''); setOldPassword(t.target.value); }} + value={oldPassword} + style={{ marginBottom: "1em" }} + /> + + + { setMessage(''); setNewPassword(t.target.value); }} + value={newPassword} + style={{ marginBottom: "1em" }} + /> + + + { setMessage(''); setConfirmPassword(t.target.value); }} + value={confirmPassword} + /> + + +

{message}

+
+ + + +
+
+
+
+ ); +} + +export default ChangePassword; \ No newline at end of file diff --git a/ui/src/console/detail/account/actions/RegenerateToken.js b/ui/src/console/detail/account/actions/RegenerateToken.js new file mode 100644 index 00000000..e74af0c6 --- /dev/null +++ b/ui/src/console/detail/account/actions/RegenerateToken.js @@ -0,0 +1,82 @@ +import Modal from "react-bootstrap/Modal"; +import {Button, Container, Form, Row} from "react-bootstrap"; +import React, {useState} from "react"; +import * as account from "../../../../api/account"; + +const RegenerateToken = (props) => { + const [confirmEmail, setConfirmEmail] = useState(''); + const [message, setMessage] = useState(''); + + const hide = () => { + props.onHide(); + setConfirmEmail(''); + setMessage(''); + }; + + const handleSubmit = (event) => { + event.preventDefault(); + + if(confirmEmail !== props.user.email) { + setMessage("Email address confirmation does not match!"); + return; + } + + account.resetToken({body: {emailAddress: props.user.email}}) + .then(resp => { + console.log(resp); + let user = JSON.parse(localStorage.getItem('user')); + localStorage.setItem('user', JSON.stringify({ + email: user.email, + token: resp.data.token + })); + document.dispatchEvent(new Event('storage')); + setMessage("Your new account token is: " + resp.data.token); + }).catch(err => { + setMessage("Account token regeneration failed!"); + console.log("account token regeneration failed", err); + }); + }; + + return ( + + Are you very sure? + +
+ +

+ Did you read the warning on the previous screen? This action will reset all of your active + environments and shares! +

+

+ You will need to update each of + your ${HOME}/.zrok/environments.yml files + with your new token to allow them to continue working! +

+

+ Hit Escape or click the 'X' to abort! +

+ + { + setMessage(''); + setConfirmEmail(t.target.value); + }} + value={confirmEmail} + style={{marginBottom: "1em"}} + /> + + +

{message}

+
+ + + +
+
+
+
+ ); +}; + +export default RegenerateToken; \ No newline at end of file diff --git a/ui/src/console/login/Login.js b/ui/src/console/login/Login.js index a107197f..768aa722 100644 --- a/ui/src/console/login/Login.js +++ b/ui/src/console/login/Login.js @@ -33,9 +33,19 @@ const Login = (props) => { account.login({body: {"email": email, "password": password}}) .then(resp => { - let user = { - "email": email, - "token": resp + if (!resp.error) { + let user = { + "email": email.toLowerCase(), + "token": resp.data + } + props.loginSuccess(user) + localStorage.setItem('user', JSON.stringify(user)) + console.log(user) + console.log('login succeeded', resp) + document.dispatchEvent(new Event('storage')) + } else { + console.log('login failed') + setMessage(errorMessage); } props.loginSuccess(user) localStorage.setItem('user', JSON.stringify(user)) diff --git a/ui/src/console/visualizer/graph.js b/ui/src/console/visualizer/graph.js index 3f1c28f2..bc450378 100644 --- a/ui/src/console/visualizer/graph.js +++ b/ui/src/console/visualizer/graph.js @@ -78,7 +78,7 @@ export const mergeGraph = (oldGraph, user, accountLimited, newOverview) => { id: 'ac:' + fe.id, feId: fe.id, target: fe.shrToken, - label: fe.shrToken, + label: fe.token, type: "frontend", val: 50 } diff --git a/util/uniqueName.go b/util/uniqueName.go new file mode 100644 index 00000000..419f8f1d --- /dev/null +++ b/util/uniqueName.go @@ -0,0 +1,18 @@ +package util + +import ( + goaway "github.com/TwiN/go-away" + "regexp" +) + +// IsValidUniqueName ensures that the string represents a valid unique name. Lowercase alphanumeric only. 4-32 characters. +func IsValidUniqueName(uniqueName string) bool { + match, err := regexp.Match("^[a-z0-9]{4,32}$", []byte(uniqueName)) + if err != nil { + return false + } + if match && goaway.IsProfane(uniqueName) { + return false + } + return match +} diff --git a/website/algolia-crawler-config.js b/website/algolia-crawler-config.js index 799c3679..766de4a2 100644 --- a/website/algolia-crawler-config.js +++ b/website/algolia-crawler-config.js @@ -1,43 +1,44 @@ // this is not part of the Docusaurus site, but a copy of the active config in the Algolia Crawler new Crawler({ - appId: 'CO73R59OLO', - apiKey: 'ALGOLIA_CRAWLER_API_KEY', + appId: "CO73R59OLO", + apiKey: "ALGOLIA_CRAWLER_API_KEY", rateLimit: 8, maxDepth: 10, - startUrls: ['https://docs.zrok.io/'], - sitemaps: ['https://docs.zrok.io/sitemap.xml'], - ignoreCanonicalTo: true, - discoveryPatterns: ['https://docs.zrok.io/**'], + startUrls: ["https://docs.zrok.io/"], + sitemaps: ["https://docs.zrok.io/sitemap.xml"], + ignoreCanonicalTo: false, + discoveryPatterns: ["https://docs.zrok.io/**"], + schedule: "every 1 day at 2:22 pm", actions: [ { - indexName: 'zrok', - pathsToMatch: ['https://docs.zrok.io/**'], + indexName: "zrok", + pathsToMatch: ["https://docs.zrok.io/**"], recordExtractor: ({ $, helpers }) => { // priority order: deepest active sub list header -> navbar active item -> 'Documentation' const lvl0 = $( - '.menu__link.menu__link--sublist.menu__link--active, .navbar__item.navbar__link--active' + ".menu__link.menu__link--sublist.menu__link--active, .navbar__item.navbar__link--active", ) .last() - .text() || 'Documentation'; + .text() || "Documentation"; return helpers.docsearch({ recordProps: { lvl0: { - selectors: '', + selectors: "", defaultValue: lvl0, }, - lvl1: ['header h1', 'article h1'], - lvl2: 'article h2', - lvl3: 'article h3', - lvl4: 'article h4', - lvl5: 'article h5, article td:first-child', - lvl6: 'article h6', - content: 'article p, article li, article td:last-child', + lvl1: ["header h1", "article h1"], + lvl2: "article h2", + lvl3: "article h3", + lvl4: "article h4", + lvl5: "article h5, article td:first-child", + lvl6: "article h6", + content: "article p, article li, article td:last-child", }, indexHeadings: true, aggregateContent: true, - recordVersion: 'v3', + recordVersion: "v3", }); }, }, @@ -45,51 +46,51 @@ new Crawler({ initialIndexSettings: { zrok: { attributesForFaceting: [ - 'type', - 'lang', - 'language', - 'version', - 'docusaurus_tag', + "type", + "lang", + "language", + "version", + "docusaurus_tag", ], attributesToRetrieve: [ - 'hierarchy', - 'content', - 'anchor', - 'url', - 'url_without_anchor', - 'type', + "hierarchy", + "content", + "anchor", + "url", + "url_without_anchor", + "type", ], - attributesToHighlight: ['hierarchy', 'content'], - attributesToSnippet: ['content:10'], - camelCaseAttributes: ['hierarchy', 'content'], + attributesToHighlight: ["hierarchy", "content"], + attributesToSnippet: ["content:10"], + camelCaseAttributes: ["hierarchy", "content"], searchableAttributes: [ - 'unordered(hierarchy.lvl0)', - 'unordered(hierarchy.lvl1)', - 'unordered(hierarchy.lvl2)', - 'unordered(hierarchy.lvl3)', - 'unordered(hierarchy.lvl4)', - 'unordered(hierarchy.lvl5)', - 'unordered(hierarchy.lvl6)', - 'content', + "unordered(hierarchy.lvl0)", + "unordered(hierarchy.lvl1)", + "unordered(hierarchy.lvl2)", + "unordered(hierarchy.lvl3)", + "unordered(hierarchy.lvl4)", + "unordered(hierarchy.lvl5)", + "unordered(hierarchy.lvl6)", + "content", ], distinct: true, - attributeForDistinct: 'url', + attributeForDistinct: "url", customRanking: [ - 'desc(weight.pageRank)', - 'desc(weight.level)', - 'asc(weight.position)', + "desc(weight.pageRank)", + "desc(weight.level)", + "asc(weight.position)", ], ranking: [ - 'words', - 'filters', - 'typo', - 'attribute', - 'proximity', - 'exact', - 'custom', + "words", + "filters", + "typo", + "attribute", + "proximity", + "exact", + "custom", ], highlightPreTag: '', - highlightPostTag: '', + highlightPostTag: "", minWordSizefor1Typo: 3, minWordSizefor2Typos: 7, allowTyposOnNumericTokens: false, @@ -97,8 +98,8 @@ new Crawler({ ignorePlurals: true, advancedSyntax: true, attributeCriteriaComputedByMinProximity: true, - removeWordsIfNoResults: 'allOptional', - separatorsToIndex: '_', + removeWordsIfNoResults: "allOptional", + separatorsToIndex: "_", }, }, }); \ No newline at end of file diff --git a/website/docusaurus.config.js b/website/docusaurus.config.js index 8af391a7..d13d5abd 100644 --- a/website/docusaurus.config.js +++ b/website/docusaurus.config.js @@ -82,12 +82,8 @@ const config = { pages: { path: './src/pages' }, - // googleAnalytics: { - // - // }, - gtag: { - trackingID: 'G-V2KMEXWJ10', - anonymizeIP: true, + googleTagManager: { + containerId: 'GTM-MDFLZPK8', }, sitemap: { diff --git a/website/package-lock.json b/website/package-lock.json index 13462a7a..326f0606 100644 --- a/website/package-lock.json +++ b/website/package-lock.json @@ -8,9 +8,9 @@ "name": "website", "version": "0.0.0", "dependencies": { - "@docusaurus/core": "^3.0.1", - "@docusaurus/plugin-client-redirects": "^3.0.1", - "@docusaurus/preset-classic": "^3.0.1", + "@docusaurus/core": "^3.1.0", + "@docusaurus/plugin-client-redirects": "^3.1.0", + "@docusaurus/preset-classic": "^3.1.0", "@mdx-js/react": "^3.0.0", "clsx": "^1.2.1", "prism-react-renderer": "^1.3.5", @@ -20,7 +20,7 @@ "remark-math": "^5.1.1" }, "devDependencies": { - "@docusaurus/module-type-aliases": "^3.0.1", + "@docusaurus/module-type-aliases": "^3.1.0", "yaml-loader": "^0.8.0" }, "engines": { @@ -69,74 +69,74 @@ } }, "node_modules/@algolia/cache-browser-local-storage": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.20.0.tgz", - "integrity": "sha512-uujahcBt4DxduBTvYdwO3sBfHuJvJokiC3BP1+O70fglmE1ShkH8lpXqZBac1rrU3FnNYSUs4pL9lBdTKeRPOQ==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-browser-local-storage/-/cache-browser-local-storage-4.22.1.tgz", + "integrity": "sha512-Sw6IAmOCvvP6QNgY9j+Hv09mvkvEIDKjYW8ow0UDDAxSXy664RBNQk3i/0nt7gvceOJ6jGmOTimaZoY1THmU7g==", "dependencies": { - "@algolia/cache-common": "4.20.0" + "@algolia/cache-common": "4.22.1" } }, "node_modules/@algolia/cache-common": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.20.0.tgz", - "integrity": "sha512-vCfxauaZutL3NImzB2G9LjLt36vKAckc6DhMp05An14kVo8F1Yofb6SIl6U3SaEz8pG2QOB9ptwM5c+zGevwIQ==" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-common/-/cache-common-4.22.1.tgz", + "integrity": "sha512-TJMBKqZNKYB9TptRRjSUtevJeQVXRmg6rk9qgFKWvOy8jhCPdyNZV1nB3SKGufzvTVbomAukFR8guu/8NRKBTA==" }, "node_modules/@algolia/cache-in-memory": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.20.0.tgz", - "integrity": "sha512-Wm9ak/IaacAZXS4mB3+qF/KCoVSBV6aLgIGFEtQtJwjv64g4ePMapORGmCyulCFwfePaRAtcaTbMcJF+voc/bg==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/cache-in-memory/-/cache-in-memory-4.22.1.tgz", + "integrity": "sha512-ve+6Ac2LhwpufuWavM/aHjLoNz/Z/sYSgNIXsinGofWOysPilQZPUetqLj8vbvi+DHZZaYSEP9H5SRVXnpsNNw==", "dependencies": { - "@algolia/cache-common": "4.20.0" + "@algolia/cache-common": "4.22.1" } }, "node_modules/@algolia/client-account": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.20.0.tgz", - "integrity": "sha512-GGToLQvrwo7am4zVkZTnKa72pheQeez/16sURDWm7Seyz+HUxKi3BM6fthVVPUEBhtJ0reyVtuK9ArmnaKl10Q==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-account/-/client-account-4.22.1.tgz", + "integrity": "sha512-k8m+oegM2zlns/TwZyi4YgCtyToackkOpE+xCaKCYfBfDtdGOaVZCM5YvGPtK+HGaJMIN/DoTL8asbM3NzHonw==", "dependencies": { - "@algolia/client-common": "4.20.0", - "@algolia/client-search": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/client-common": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-analytics": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.20.0.tgz", - "integrity": "sha512-EIr+PdFMOallRdBTHHdKI3CstslgLORQG7844Mq84ib5oVFRVASuuPmG4bXBgiDbcsMLUeOC6zRVJhv1KWI0ug==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-analytics/-/client-analytics-4.22.1.tgz", + "integrity": "sha512-1ssi9pyxyQNN4a7Ji9R50nSdISIumMFDwKNuwZipB6TkauJ8J7ha/uO60sPJFqQyqvvI+px7RSNRQT3Zrvzieg==", "dependencies": { - "@algolia/client-common": "4.20.0", - "@algolia/client-search": "4.20.0", - "@algolia/requester-common": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/client-common": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-common": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.20.0.tgz", - "integrity": "sha512-P3WgMdEss915p+knMMSd/fwiHRHKvDu4DYRrCRaBrsfFw7EQHon+EbRSm4QisS9NYdxbS04kcvNoavVGthyfqQ==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-common/-/client-common-4.22.1.tgz", + "integrity": "sha512-IvaL5v9mZtm4k4QHbBGDmU3wa/mKokmqNBqPj0K7lcR8ZDKzUorhcGp/u8PkPC/e0zoHSTvRh7TRkGX3Lm7iOQ==", "dependencies": { - "@algolia/requester-common": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-personalization": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.20.0.tgz", - "integrity": "sha512-N9+zx0tWOQsLc3K4PVRDV8GUeOLAY0i445En79Pr3zWB+m67V+n/8w4Kw1C5LlbHDDJcyhMMIlqezh6BEk7xAQ==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-personalization/-/client-personalization-4.22.1.tgz", + "integrity": "sha512-sl+/klQJ93+4yaqZ7ezOttMQ/nczly/3GmgZXJ1xmoewP5jmdP/X/nV5U7EHHH3hCUEHeN7X1nsIhGPVt9E1cQ==", "dependencies": { - "@algolia/client-common": "4.20.0", - "@algolia/requester-common": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/client-common": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/client-search": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.20.0.tgz", - "integrity": "sha512-zgwqnMvhWLdpzKTpd3sGmMlr4c+iS7eyyLGiaO51zDZWGMkpgoNVmltkzdBwxOVXz0RsFMznIxB9zuarUv4TZg==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/client-search/-/client-search-4.22.1.tgz", + "integrity": "sha512-yb05NA4tNaOgx3+rOxAmFztgMTtGBi97X7PC3jyNeGiwkAjOZc2QrdZBYyIdcDLoI09N0gjtpClcackoTN0gPA==", "dependencies": { - "@algolia/client-common": "4.20.0", - "@algolia/requester-common": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/client-common": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/@algolia/events": { @@ -145,47 +145,47 @@ "integrity": "sha512-FQzvOCgoFXAbf5Y6mYozw2aj5KCJoA3m4heImceldzPSMbdyS4atVjJzXKMsfX3wnZTFYwkkt8/z8UesLHlSBQ==" }, "node_modules/@algolia/logger-common": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.20.0.tgz", - "integrity": "sha512-xouigCMB5WJYEwvoWW5XDv7Z9f0A8VoXJc3VKwlHJw/je+3p2RcDXfksLI4G4lIVncFUYMZx30tP/rsdlvvzHQ==" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-common/-/logger-common-4.22.1.tgz", + "integrity": "sha512-OnTFymd2odHSO39r4DSWRFETkBufnY2iGUZNrMXpIhF5cmFE8pGoINNPzwg02QLBlGSaLqdKy0bM8S0GyqPLBg==" }, "node_modules/@algolia/logger-console": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.20.0.tgz", - "integrity": "sha512-THlIGG1g/FS63z0StQqDhT6bprUczBI8wnLT3JWvfAQDZX5P6fCg7dG+pIrUBpDIHGszgkqYEqECaKKsdNKOUA==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/logger-console/-/logger-console-4.22.1.tgz", + "integrity": "sha512-O99rcqpVPKN1RlpgD6H3khUWylU24OXlzkavUAMy6QZd1776QAcauE3oP8CmD43nbaTjBexZj2nGsBH9Tc0FVA==", "dependencies": { - "@algolia/logger-common": "4.20.0" + "@algolia/logger-common": "4.22.1" } }, "node_modules/@algolia/requester-browser-xhr": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.20.0.tgz", - "integrity": "sha512-HbzoSjcjuUmYOkcHECkVTwAelmvTlgs48N6Owt4FnTOQdwn0b8pdht9eMgishvk8+F8bal354nhx/xOoTfwiAw==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-browser-xhr/-/requester-browser-xhr-4.22.1.tgz", + "integrity": "sha512-dtQGYIg6MteqT1Uay3J/0NDqD+UciHy3QgRbk7bNddOJu+p3hzjTRYESqEnoX/DpEkaNYdRHUKNylsqMpgwaEw==", "dependencies": { - "@algolia/requester-common": "4.20.0" + "@algolia/requester-common": "4.22.1" } }, "node_modules/@algolia/requester-common": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.20.0.tgz", - "integrity": "sha512-9h6ye6RY/BkfmeJp7Z8gyyeMrmmWsMOCRBXQDs4mZKKsyVlfIVICpcSibbeYcuUdurLhIlrOUkH3rQEgZzonng==" + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-common/-/requester-common-4.22.1.tgz", + "integrity": "sha512-dgvhSAtg2MJnR+BxrIFqlLtkLlVVhas9HgYKMk2Uxiy5m6/8HZBL40JVAMb2LovoPFs9I/EWIoFVjOrFwzn5Qg==" }, "node_modules/@algolia/requester-node-http": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.20.0.tgz", - "integrity": "sha512-ocJ66L60ABSSTRFnCHIEZpNHv6qTxsBwJEPfYaSBsLQodm0F9ptvalFkHMpvj5DfE22oZrcrLbOYM2bdPJRHng==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/requester-node-http/-/requester-node-http-4.22.1.tgz", + "integrity": "sha512-JfmZ3MVFQkAU+zug8H3s8rZ6h0ahHZL/SpMaSasTCGYR5EEJsCc8SI5UZ6raPN2tjxa5bxS13BRpGSBUens7EA==", "dependencies": { - "@algolia/requester-common": "4.20.0" + "@algolia/requester-common": "4.22.1" } }, "node_modules/@algolia/transporter": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.20.0.tgz", - "integrity": "sha512-Lsii1pGWOAISbzeyuf+r/GPhvHMPHSPrTDWNcIzOE1SG1inlJHICaVe2ikuoRjcpgxZNU54Jl+if15SUCsaTUg==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/@algolia/transporter/-/transporter-4.22.1.tgz", + "integrity": "sha512-kzWgc2c9IdxMa3YqA6TN0NW5VrKYYW/BELIn7vnLyn+U/RFdZ4lxxt9/8yq3DKV5snvoDzzO4ClyejZRdV3lMQ==", "dependencies": { - "@algolia/cache-common": "4.20.0", - "@algolia/logger-common": "4.20.0", - "@algolia/requester-common": "4.20.0" + "@algolia/cache-common": "4.22.1", + "@algolia/logger-common": "4.22.1", + "@algolia/requester-common": "4.22.1" } }, "node_modules/@ampproject/remapping": { @@ -2165,9 +2165,9 @@ } }, "node_modules/@docusaurus/core": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.0.1.tgz", - "integrity": "sha512-CXrLpOnW+dJdSv8M5FAJ3JBwXtL6mhUWxFA8aS0ozK6jBG/wgxERk5uvH28fCeFxOGbAT9v1e9dOMo1X2IEVhQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/core/-/core-3.1.0.tgz", + "integrity": "sha512-GWudMGYA9v26ssbAWJNfgeDZk+lrudUTclLPRsmxiknEBk7UMp7Rglonhqbsf3IKHOyHkMU4Fr5jFyg5SBx9jQ==", "dependencies": { "@babel/core": "^7.23.3", "@babel/generator": "^7.23.3", @@ -2179,13 +2179,13 @@ "@babel/runtime": "^7.22.6", "@babel/runtime-corejs3": "^7.22.6", "@babel/traverse": "^7.22.8", - "@docusaurus/cssnano-preset": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", + "@docusaurus/cssnano-preset": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/mdx-loader": "3.1.0", "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "@slorber/static-site-generator-webpack-plugin": "^4.0.7", "@svgr/webpack": "^6.5.1", "autoprefixer": "^10.4.14", @@ -2251,9 +2251,9 @@ } }, "node_modules/@docusaurus/cssnano-preset": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.0.1.tgz", - "integrity": "sha512-wjuXzkHMW+ig4BD6Ya1Yevx9UJadO4smNZCEljqBoQfIQrQskTswBs7lZ8InHP7mCt273a/y/rm36EZhqJhknQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/cssnano-preset/-/cssnano-preset-3.1.0.tgz", + "integrity": "sha512-ned7qsgCqSv/e7KyugFNroAfiszuxLwnvMW7gmT2Ywxb/Nyt61yIw7KHyAZCMKglOalrqnYA4gMhLUCK/mVePA==", "dependencies": { "cssnano-preset-advanced": "^5.3.10", "postcss": "^8.4.26", @@ -2265,9 +2265,9 @@ } }, "node_modules/@docusaurus/logger": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.0.1.tgz", - "integrity": "sha512-I5L6Nk8OJzkVA91O2uftmo71LBSxe1vmOn9AMR6JRCzYeEBrqneWMH02AqMvjJ2NpMiviO+t0CyPjyYV7nxCWQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/logger/-/logger-3.1.0.tgz", + "integrity": "sha512-p740M+HCst1VnKKzL60Hru9xfG4EUYJDarjlEC4hHeBy9+afPmY3BNPoSHx9/8zxuYfUlv/psf7I9NvRVdmdvg==", "dependencies": { "chalk": "^4.1.2", "tslib": "^2.6.0" @@ -2277,15 +2277,15 @@ } }, "node_modules/@docusaurus/mdx-loader": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.0.1.tgz", - "integrity": "sha512-ldnTmvnvlrONUq45oKESrpy+lXtbnTcTsFkOTIDswe5xx5iWJjt6eSa0f99ZaWlnm24mlojcIGoUWNCS53qVlQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/mdx-loader/-/mdx-loader-3.1.0.tgz", + "integrity": "sha512-D7onDz/3mgBonexWoQXPw3V2E5Bc4+jYRf9gGUUK+KoQwU8xMDaDkUUfsr7t6UBa/xox9p5+/3zwLuXOYMzGSg==", "dependencies": { "@babel/parser": "^7.22.7", "@babel/traverse": "^7.22.8", - "@docusaurus/logger": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/logger": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "@mdx-js/mdx": "^3.0.0", "@slorber/remark-comment": "^1.0.0", "escape-html": "^1.0.3", @@ -2317,12 +2317,12 @@ } }, "node_modules/@docusaurus/module-type-aliases": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.0.1.tgz", - "integrity": "sha512-DEHpeqUDsLynl3AhQQiO7AbC7/z/lBra34jTcdYuvp9eGm01pfH1wTVq8YqWZq6Jyx0BgcVl/VJqtE9StRd9Ag==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/module-type-aliases/-/module-type-aliases-3.1.0.tgz", + "integrity": "sha512-XUl7Z4PWlKg4l6KF05JQ3iDHQxnPxbQUqTNKvviHyuHdlalOFv6qeDAm7IbzyQPJD5VA6y4dpRbTWSqP9ClwPg==", "dependencies": { "@docusaurus/react-loadable": "5.5.2", - "@docusaurus/types": "3.0.1", + "@docusaurus/types": "3.1.0", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2336,15 +2336,15 @@ } }, "node_modules/@docusaurus/plugin-client-redirects": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.0.1.tgz", - "integrity": "sha512-CoZapnHbV3j5jsHCa/zmKaa8+H+oagHBgg91dN5I8/3kFit/xtZPfRaznvDX49cHg2nSoV74B3VMAT+bvCmzFQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-client-redirects/-/plugin-client-redirects-3.1.0.tgz", + "integrity": "sha512-CuFbdciMGvtGYiIPSOpj5idsHOQUcqZWTLCmZV3ePhviekm4dRZm1+QK/BxigmSTL5ICJMGbtOQnz7bgFSWHqg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "eta": "^2.2.0", "fs-extra": "^11.1.1", "lodash": "^4.17.21", @@ -2359,17 +2359,17 @@ } }, "node_modules/@docusaurus/plugin-content-blog": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.0.1.tgz", - "integrity": "sha512-cLOvtvAyaMQFLI8vm4j26svg3ktxMPSXpuUJ7EERKoGbfpJSsgtowNHcRsaBVmfuCsRSk1HZ/yHBsUkTmHFEsg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-blog/-/plugin-content-blog-3.1.0.tgz", + "integrity": "sha512-iMa6WBaaEdYuxckvJtLcq/HQdlA4oEbCXf/OFfsYJCCULcDX7GDZpKxLF3X1fLsax3sSm5bmsU+CA0WD+R1g3A==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "cheerio": "^1.0.0-rc.12", "feed": "^4.2.2", "fs-extra": "^11.1.1", @@ -2390,17 +2390,17 @@ } }, "node_modules/@docusaurus/plugin-content-docs": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.0.1.tgz", - "integrity": "sha512-dRfAOA5Ivo+sdzzJGXEu33yAtvGg8dlZkvt/NEJ7nwi1F2j4LEdsxtfX2GKeETB2fP6XoGNSQnFXqa2NYGrHFg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-docs/-/plugin-content-docs-3.1.0.tgz", + "integrity": "sha512-el5GxhT8BLrsWD0qGa8Rq+Ttb/Ni6V3DGT2oAPio0qcs/mUAxeyXEAmihkvmLCnAgp6xD27Ce7dISZ5c6BXeqA==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/module-type-aliases": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "@types/react-router-config": "^5.0.7", "combine-promises": "^1.1.0", "fs-extra": "^11.1.1", @@ -2419,15 +2419,15 @@ } }, "node_modules/@docusaurus/plugin-content-pages": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.0.1.tgz", - "integrity": "sha512-oP7PoYizKAXyEttcvVzfX3OoBIXEmXTMzCdfmC4oSwjG4SPcJsRge3mmI6O8jcZBgUPjIzXD21bVGWEE1iu8gg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-content-pages/-/plugin-content-pages-3.1.0.tgz", + "integrity": "sha512-9gntYQFpk+93+Xl7gYczJu8I9uWoyRLnRwS0+NUFcs9iZtHKsdqKWPRrONC9elfN3wJ9ORwTbcVzsTiB8jvYlg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "fs-extra": "^11.1.1", "tslib": "^2.6.0", "webpack": "^5.88.1" @@ -2441,13 +2441,13 @@ } }, "node_modules/@docusaurus/plugin-debug": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.0.1.tgz", - "integrity": "sha512-09dxZMdATky4qdsZGzhzlUvvC+ilQ2hKbYF+wez+cM2mGo4qHbv8+qKXqxq0CQZyimwlAOWQLoSozIXU0g0i7g==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-debug/-/plugin-debug-3.1.0.tgz", + "integrity": "sha512-AbvJwCVRbmQ8w9d8QXbF4Iq/ui0bjPZNYFIhtducGFnm2YQRN1mraK8mCEQb0Aq0T8SqRRvSfC/far4n/s531w==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", "fs-extra": "^11.1.1", "react-json-view-lite": "^1.2.0", "tslib": "^2.6.0" @@ -2461,13 +2461,13 @@ } }, "node_modules/@docusaurus/plugin-google-analytics": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.0.1.tgz", - "integrity": "sha512-jwseSz1E+g9rXQwDdr0ZdYNjn8leZBnKPjjQhMBEiwDoenL3JYFcNW0+p0sWoVF/f2z5t7HkKA+cYObrUh18gg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-analytics/-/plugin-google-analytics-3.1.0.tgz", + "integrity": "sha512-zvUOMzu9Uhz0ciqnSbtnp/5i1zEYlzarQrOXG90P3Is3efQI43p2YLW/rzSGdLb5MfQo2HvKT6Q5+tioMO045Q==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "tslib": "^2.6.0" }, "engines": { @@ -2479,13 +2479,13 @@ } }, "node_modules/@docusaurus/plugin-google-gtag": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.0.1.tgz", - "integrity": "sha512-UFTDvXniAWrajsulKUJ1DB6qplui1BlKLQZjX4F7qS/qfJ+qkKqSkhJ/F4VuGQ2JYeZstYb+KaUzUzvaPK1aRQ==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-gtag/-/plugin-google-gtag-3.1.0.tgz", + "integrity": "sha512-0txshvaY8qIBdkk2UATdVcfiCLGq3KAUfuRQD2cRNgO39iIf4/ihQxH9NXcRTwKs4Q5d9yYHoix3xT6pFuEYOg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "@types/gtag.js": "^0.0.12", "tslib": "^2.6.0" }, @@ -2498,13 +2498,13 @@ } }, "node_modules/@docusaurus/plugin-google-tag-manager": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.0.1.tgz", - "integrity": "sha512-IPFvuz83aFuheZcWpTlAdiiX1RqWIHM+OH8wS66JgwAKOiQMR3+nLywGjkLV4bp52x7nCnwhNk1rE85Cpy/CIw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-google-tag-manager/-/plugin-google-tag-manager-3.1.0.tgz", + "integrity": "sha512-zOWPEi8kMyyPtwG0vhyXrdbLs8fIZmY5vlbi9lUU+v8VsroO5iHmfR2V3SMsrsfOanw5oV/ciWqbxezY00qEZg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "tslib": "^2.6.0" }, "engines": { @@ -2516,16 +2516,16 @@ } }, "node_modules/@docusaurus/plugin-sitemap": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.0.1.tgz", - "integrity": "sha512-xARiWnjtVvoEniZudlCq5T9ifnhCu/GAZ5nA7XgyLfPcNpHQa241HZdsTlLtVcecEVVdllevBKOp7qknBBaMGw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/plugin-sitemap/-/plugin-sitemap-3.1.0.tgz", + "integrity": "sha512-TkR5vGBpUooEB9SoW42thahqqwKzfHrQQhkB+JrEGERsl4bKODSuJNle4aA4h6LSkg4IyfXOW8XOI0NIPWb9Cg==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "fs-extra": "^11.1.1", "sitemap": "^7.1.1", "tslib": "^2.6.0" @@ -2539,23 +2539,23 @@ } }, "node_modules/@docusaurus/preset-classic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.0.1.tgz", - "integrity": "sha512-il9m9xZKKjoXn6h0cRcdnt6wce0Pv1y5t4xk2Wx7zBGhKG1idu4IFHtikHlD0QPuZ9fizpXspXcTzjL5FXc1Gw==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/preset-classic/-/preset-classic-3.1.0.tgz", + "integrity": "sha512-xGLQRFmmT9IinAGUDVRYZ54Ys28USNbA3OTXQXnSJLPr1rCY7CYnHI4XoOnKWrNnDiAI4ruMzunXWyaElUYCKQ==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/plugin-debug": "3.0.1", - "@docusaurus/plugin-google-analytics": "3.0.1", - "@docusaurus/plugin-google-gtag": "3.0.1", - "@docusaurus/plugin-google-tag-manager": "3.0.1", - "@docusaurus/plugin-sitemap": "3.0.1", - "@docusaurus/theme-classic": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-search-algolia": "3.0.1", - "@docusaurus/types": "3.0.1" + "@docusaurus/core": "3.1.0", + "@docusaurus/plugin-content-blog": "3.1.0", + "@docusaurus/plugin-content-docs": "3.1.0", + "@docusaurus/plugin-content-pages": "3.1.0", + "@docusaurus/plugin-debug": "3.1.0", + "@docusaurus/plugin-google-analytics": "3.1.0", + "@docusaurus/plugin-google-gtag": "3.1.0", + "@docusaurus/plugin-google-tag-manager": "3.1.0", + "@docusaurus/plugin-sitemap": "3.1.0", + "@docusaurus/theme-classic": "3.1.0", + "@docusaurus/theme-common": "3.1.0", + "@docusaurus/theme-search-algolia": "3.1.0", + "@docusaurus/types": "3.1.0" }, "engines": { "node": ">=18.0" @@ -2578,22 +2578,22 @@ } }, "node_modules/@docusaurus/theme-classic": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.0.1.tgz", - "integrity": "sha512-XD1FRXaJiDlmYaiHHdm27PNhhPboUah9rqIH0lMpBt5kYtsGjJzhqa27KuZvHLzOP2OEpqd2+GZ5b6YPq7Q05Q==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-classic/-/theme-classic-3.1.0.tgz", + "integrity": "sha512-/+jMl2Z9O8QQxves5AtHdt91gWsEZFgOV3La/6eyKEd7QLqQUtM5fxEJ40rq9NKYjqCd1HzZ9egIMeJoWwillw==", "dependencies": { - "@docusaurus/core": "3.0.1", - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-translations": "3.0.1", - "@docusaurus/types": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/module-type-aliases": "3.1.0", + "@docusaurus/plugin-content-blog": "3.1.0", + "@docusaurus/plugin-content-docs": "3.1.0", + "@docusaurus/plugin-content-pages": "3.1.0", + "@docusaurus/theme-common": "3.1.0", + "@docusaurus/theme-translations": "3.1.0", + "@docusaurus/types": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "@mdx-js/react": "^3.0.0", "clsx": "^2.0.0", "copy-text-to-clipboard": "^3.2.0", @@ -2617,17 +2617,17 @@ } }, "node_modules/@docusaurus/theme-classic/node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } }, "node_modules/@docusaurus/theme-classic/node_modules/prism-react-renderer": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.0.tgz", - "integrity": "sha512-UYRg2TkVIaI6tRVHC5OJ4/BxqPUxJkJvq/odLT/ykpt1zGYXooNperUxQcCvi87LyRnR4nCh81ceOA+e7nrydg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", "dependencies": { "@types/prismjs": "^1.26.0", "clsx": "^2.0.0" @@ -2637,17 +2637,17 @@ } }, "node_modules/@docusaurus/theme-common": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.0.1.tgz", - "integrity": "sha512-cr9TOWXuIOL0PUfuXv6L5lPlTgaphKP+22NdVBOYah5jSq5XAAulJTjfe+IfLsEG4L7lJttLbhW7LXDFSAI7Ag==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-common/-/theme-common-3.1.0.tgz", + "integrity": "sha512-YGwEFALLIbF5ocW/Fy6Ae7tFWUOugEN3iwxTx8UkLAcLqYUboDSadesYtVBmRCEB4FVA2qoP7YaW3lu3apUPPw==", "dependencies": { - "@docusaurus/mdx-loader": "3.0.1", - "@docusaurus/module-type-aliases": "3.0.1", - "@docusaurus/plugin-content-blog": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/plugin-content-pages": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-common": "3.0.1", + "@docusaurus/mdx-loader": "3.1.0", + "@docusaurus/module-type-aliases": "3.1.0", + "@docusaurus/plugin-content-blog": "3.1.0", + "@docusaurus/plugin-content-docs": "3.1.0", + "@docusaurus/plugin-content-pages": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-common": "3.1.0", "@types/history": "^4.7.11", "@types/react": "*", "@types/react-router-config": "*", @@ -2666,17 +2666,17 @@ } }, "node_modules/@docusaurus/theme-common/node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } }, "node_modules/@docusaurus/theme-common/node_modules/prism-react-renderer": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.0.tgz", - "integrity": "sha512-UYRg2TkVIaI6tRVHC5OJ4/BxqPUxJkJvq/odLT/ykpt1zGYXooNperUxQcCvi87LyRnR4nCh81ceOA+e7nrydg==", + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/prism-react-renderer/-/prism-react-renderer-2.3.1.tgz", + "integrity": "sha512-Rdf+HzBLR7KYjzpJ1rSoxT9ioO85nZngQEoFIhL07XhtJHlCU3SOz0GJ6+qvMyQe0Se+BV3qpe6Yd/NmQF5Juw==", "dependencies": { "@types/prismjs": "^1.26.0", "clsx": "^2.0.0" @@ -2686,18 +2686,18 @@ } }, "node_modules/@docusaurus/theme-search-algolia": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.0.1.tgz", - "integrity": "sha512-DDiPc0/xmKSEdwFkXNf1/vH1SzJPzuJBar8kMcBbDAZk/SAmo/4lf6GU2drou4Ae60lN2waix+jYWTWcJRahSA==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-search-algolia/-/theme-search-algolia-3.1.0.tgz", + "integrity": "sha512-8cJH0ZhPsEDjq3jR3I+wHmWzVY2bXMQJ59v2QxUmsTZxbWA4u+IzccJMIJx4ooFl9J6iYynwYsFuHxyx/KUmfQ==", "dependencies": { "@docsearch/react": "^3.5.2", - "@docusaurus/core": "3.0.1", - "@docusaurus/logger": "3.0.1", - "@docusaurus/plugin-content-docs": "3.0.1", - "@docusaurus/theme-common": "3.0.1", - "@docusaurus/theme-translations": "3.0.1", - "@docusaurus/utils": "3.0.1", - "@docusaurus/utils-validation": "3.0.1", + "@docusaurus/core": "3.1.0", + "@docusaurus/logger": "3.1.0", + "@docusaurus/plugin-content-docs": "3.1.0", + "@docusaurus/theme-common": "3.1.0", + "@docusaurus/theme-translations": "3.1.0", + "@docusaurus/utils": "3.1.0", + "@docusaurus/utils-validation": "3.1.0", "algoliasearch": "^4.18.0", "algoliasearch-helper": "^3.13.3", "clsx": "^2.0.0", @@ -2716,17 +2716,17 @@ } }, "node_modules/@docusaurus/theme-search-algolia/node_modules/clsx": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.0.0.tgz", - "integrity": "sha512-rQ1+kcj+ttHG0MKVGBUXwayCCF1oh39BF5COIpRzuCEv8Mwjv0XucrI2ExNTOn9IlLifGClWQcU9BrZORvtw6Q==", + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/clsx/-/clsx-2.1.0.tgz", + "integrity": "sha512-m3iNNWpd9rl3jvvcBnu70ylMdrXt8Vlq4HYadnU5fwcOtvkSQWPmj7amUcDT2qYI7risszBjI5AUIUox9D16pg==", "engines": { "node": ">=6" } }, "node_modules/@docusaurus/theme-translations": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.0.1.tgz", - "integrity": "sha512-6UrbpzCTN6NIJnAtZ6Ne9492vmPVX+7Fsz4kmp+yor3KQwA1+MCzQP7ItDNkP38UmVLnvB/cYk/IvehCUqS3dg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/theme-translations/-/theme-translations-3.1.0.tgz", + "integrity": "sha512-DApE4AbDI+WBajihxB54L4scWQhVGNZAochlC9fkbciPuFAgdRBD3NREb0rgfbKexDC/rioppu/WJA0u8tS+yA==", "dependencies": { "fs-extra": "^11.1.1", "tslib": "^2.6.0" @@ -2736,10 +2736,11 @@ } }, "node_modules/@docusaurus/types": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.0.1.tgz", - "integrity": "sha512-plyX2iU1tcUsF46uQ01pAd4JhexR7n0iiQ5MSnBFX6M6NSJgDYdru/i1/YNPKOnQHBoXGLHv0dNT6OAlDWNjrg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/types/-/types-3.1.0.tgz", + "integrity": "sha512-VaczOZf7+re8aFBIWnex1XENomwHdsSTkrdX43zyor7G/FY4OIsP6X28Xc3o0jiY0YdNuvIDyA5TNwOtpgkCVw==", "dependencies": { + "@mdx-js/mdx": "^3.0.0", "@types/history": "^4.7.11", "@types/react": "*", "commander": "^5.1.0", @@ -2755,11 +2756,11 @@ } }, "node_modules/@docusaurus/utils": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.0.1.tgz", - "integrity": "sha512-TwZ33Am0q4IIbvjhUOs+zpjtD/mXNmLmEgeTGuRq01QzulLHuPhaBTTAC/DHu6kFx3wDgmgpAlaRuCHfTcXv8g==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils/-/utils-3.1.0.tgz", + "integrity": "sha512-LgZfp0D+UBqAh7PZ//MUNSFBMavmAPku6Si9x8x3V+S318IGCNJ6hUr2O29UO0oLybEWUjD5Jnj9IUN6XyZeeg==", "dependencies": { - "@docusaurus/logger": "3.0.1", + "@docusaurus/logger": "3.1.0", "@svgr/webpack": "^6.5.1", "escape-string-regexp": "^4.0.0", "file-loader": "^6.2.0", @@ -2790,9 +2791,9 @@ } }, "node_modules/@docusaurus/utils-common": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.0.1.tgz", - "integrity": "sha512-W0AxD6w6T8g6bNro8nBRWf7PeZ/nn7geEWM335qHU2DDDjHuV4UZjgUGP1AQsdcSikPrlIqTJJbKzer1lRSlIg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-common/-/utils-common-3.1.0.tgz", + "integrity": "sha512-SfvnRLHoZ9bwTw67knkSs7IcUR0GY2SaGkpdB/J9pChrDiGhwzKNUhcieoPyPYrOWGRPk3rVNYtoy+Bc7psPAw==", "dependencies": { "tslib": "^2.6.0" }, @@ -2809,12 +2810,12 @@ } }, "node_modules/@docusaurus/utils-validation": { - "version": "3.0.1", - "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.0.1.tgz", - "integrity": "sha512-ujTnqSfyGQ7/4iZdB4RRuHKY/Nwm58IIb+41s5tCXOv/MBU2wGAjOHq3U+AEyJ8aKQcHbxvTKJaRchNHYUVUQg==", + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/@docusaurus/utils-validation/-/utils-validation-3.1.0.tgz", + "integrity": "sha512-dFxhs1NLxPOSzmcTk/eeKxLY5R+U4cua22g9MsAMiRWcwFKStZ2W3/GDY0GmnJGqNS8QAQepJrxQoyxXkJNDeg==", "dependencies": { - "@docusaurus/logger": "3.0.1", - "@docusaurus/utils": "3.0.1", + "@docusaurus/logger": "3.1.0", + "@docusaurus/utils": "3.1.0", "joi": "^17.9.2", "js-yaml": "^4.1.0", "tslib": "^2.6.0" @@ -3995,30 +3996,30 @@ } }, "node_modules/algoliasearch": { - "version": "4.20.0", - "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.20.0.tgz", - "integrity": "sha512-y+UHEjnOItoNy0bYO+WWmLWBlPwDjKHW6mNHrPi0NkuhpQOOEbrkwQH/wgKFDLh7qlKjzoKeiRtlpewDPDG23g==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/algoliasearch/-/algoliasearch-4.22.1.tgz", + "integrity": "sha512-jwydKFQJKIx9kIZ8Jm44SdpigFwRGPESaxZBaHSV0XWN2yBJAOT4mT7ppvlrpA4UGzz92pqFnVKr/kaZXrcreg==", "dependencies": { - "@algolia/cache-browser-local-storage": "4.20.0", - "@algolia/cache-common": "4.20.0", - "@algolia/cache-in-memory": "4.20.0", - "@algolia/client-account": "4.20.0", - "@algolia/client-analytics": "4.20.0", - "@algolia/client-common": "4.20.0", - "@algolia/client-personalization": "4.20.0", - "@algolia/client-search": "4.20.0", - "@algolia/logger-common": "4.20.0", - "@algolia/logger-console": "4.20.0", - "@algolia/requester-browser-xhr": "4.20.0", - "@algolia/requester-common": "4.20.0", - "@algolia/requester-node-http": "4.20.0", - "@algolia/transporter": "4.20.0" + "@algolia/cache-browser-local-storage": "4.22.1", + "@algolia/cache-common": "4.22.1", + "@algolia/cache-in-memory": "4.22.1", + "@algolia/client-account": "4.22.1", + "@algolia/client-analytics": "4.22.1", + "@algolia/client-common": "4.22.1", + "@algolia/client-personalization": "4.22.1", + "@algolia/client-search": "4.22.1", + "@algolia/logger-common": "4.22.1", + "@algolia/logger-console": "4.22.1", + "@algolia/requester-browser-xhr": "4.22.1", + "@algolia/requester-common": "4.22.1", + "@algolia/requester-node-http": "4.22.1", + "@algolia/transporter": "4.22.1" } }, "node_modules/algoliasearch-helper": { - "version": "3.16.0", - "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.16.0.tgz", - "integrity": "sha512-RxOtBafSQwyqD5BLO/q9VsVw/zuNz8kjb51OZhCIWLr33uvKB+vrRis+QK+JFlNQXbXf+w28fsTWiBupc1pHew==", + "version": "3.16.1", + "resolved": "https://registry.npmjs.org/algoliasearch-helper/-/algoliasearch-helper-3.16.1.tgz", + "integrity": "sha512-qxAHVjjmT7USVvrM8q6gZGaJlCK1fl4APfdAA7o8O6iXEc68G0xMNrzRkxoB/HmhhvyHnoteS/iMTiHiTcQQcg==", "dependencies": { "@algolia/events": "^4.0.1" }, @@ -7476,9 +7477,9 @@ } }, "node_modules/image-size": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.0.2.tgz", - "integrity": "sha512-xfOoWjceHntRb3qFCrh5ZFORYH8XCdYpASltMhZ/Q0KZiOwjdE/Yl2QCiWdwD+lygV5bMCvauzgu5PxBX/Yerg==", + "version": "1.1.1", + "resolved": "https://registry.npmjs.org/image-size/-/image-size-1.1.1.tgz", + "integrity": "sha512-541xKlUw6jr/6gGuk92F+mYM5zaFAc5ahphvkqvNe2bQ6gVBkd6bfrmVJ2t4KDAfikAYZyIqTnktX3i6/aQDrQ==", "dependencies": { "queue": "6.0.2" }, @@ -7486,7 +7487,7 @@ "image-size": "bin/image-size.js" }, "engines": { - "node": ">=14.0.0" + "node": ">=16.x" } }, "node_modules/immer": { @@ -8720,9 +8721,9 @@ } }, "node_modules/mdast-util-to-hast": { - "version": "13.0.2", - "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.0.2.tgz", - "integrity": "sha512-U5I+500EOOw9e3ZrclN3Is3fRpw8c19SMyNZlZ2IS+7vLsNzb2Om11VpIVOR+/0137GhZsFEF6YiKD5+0Hr2Og==", + "version": "13.1.0", + "resolved": "https://registry.npmjs.org/mdast-util-to-hast/-/mdast-util-to-hast-13.1.0.tgz", + "integrity": "sha512-/e2l/6+OdGp/FB+ctrJ9Avz71AN/GRH3oi/3KAx/kMnoUsD6q0woXlDT8lLEeViVKE7oZxE7RXzvO3T8kF2/sA==", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -8731,7 +8732,8 @@ "micromark-util-sanitize-uri": "^2.0.0", "trim-lines": "^3.0.0", "unist-util-position": "^5.0.0", - "unist-util-visit": "^5.0.0" + "unist-util-visit": "^5.0.0", + "vfile": "^6.0.0" }, "funding": { "type": "opencollective", @@ -12666,9 +12668,9 @@ } }, "node_modules/remark-rehype": { - "version": "11.0.0", - "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.0.0.tgz", - "integrity": "sha512-vx8x2MDMcxuE4lBmQ46zYUDfcFMmvg80WYX+UNLeG6ixjdCCLcw1lrgAukwBTuOFsS78eoAedHGn9sNM0w7TPw==", + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/remark-rehype/-/remark-rehype-11.1.0.tgz", + "integrity": "sha512-z3tJrAs2kIs1AqIIy6pzHmAHlF1hWQ+OdY4/hv+Wxe35EhyLKcajL33iUEn3ScxtFox9nUvRufR/Zre8Q08H/g==", "dependencies": { "@types/hast": "^3.0.0", "@types/mdast": "^4.0.0", @@ -12999,9 +13001,9 @@ } }, "node_modules/search-insights": { - "version": "2.11.0", - "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.11.0.tgz", - "integrity": "sha512-Uin2J8Bpm3xaZi9Y8QibSys6uJOFZ+REMrf42v20AA3FUDUrshKkMEP6liJbMAHCm71wO6ls4mwAf7a3gFVxLw==", + "version": "2.13.0", + "resolved": "https://registry.npmjs.org/search-insights/-/search-insights-2.13.0.tgz", + "integrity": "sha512-Orrsjf9trHHxFRuo9/rzm0KIWmgzE8RMlZMzuhZOJ01Rnz3D0YBAe+V6473t6/H6c7irs6Lt48brULAiRWb3Vw==", "peer": true }, "node_modules/section-matter": { diff --git a/website/package.json b/website/package.json index 794000ae..c33685ef 100644 --- a/website/package.json +++ b/website/package.json @@ -14,9 +14,9 @@ "write-heading-ids": "docusaurus write-heading-ids" }, "dependencies": { - "@docusaurus/core": "^3.0.1", - "@docusaurus/plugin-client-redirects": "^3.0.1", - "@docusaurus/preset-classic": "^3.0.1", + "@docusaurus/core": "^3.1.0", + "@docusaurus/plugin-client-redirects": "^3.1.0", + "@docusaurus/preset-classic": "^3.1.0", "@mdx-js/react": "^3.0.0", "clsx": "^1.2.1", "prism-react-renderer": "^1.3.5", @@ -26,7 +26,7 @@ "remark-math": "^5.1.1" }, "devDependencies": { - "@docusaurus/module-type-aliases": "^3.0.1", + "@docusaurus/module-type-aliases": "^3.1.0", "yaml-loader": "^0.8.0" }, "browserslist": { diff --git a/website/static/img/logo-apple.svg b/website/static/img/logo-apple.svg index edad61a9..f372d9d2 100644 --- a/website/static/img/logo-apple.svg +++ b/website/static/img/logo-apple.svg @@ -1,47 +1,47 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file